summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/amd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/net/ethernet/amd
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/amd')
-rw-r--r--drivers/net/ethernet/amd/7990.c670
-rw-r--r--drivers/net/ethernet/amd/7990.h251
-rw-r--r--drivers/net/ethernet/amd/Kconfig205
-rw-r--r--drivers/net/ethernet/amd/Makefile21
-rw-r--r--drivers/net/ethernet/amd/a2065.c782
-rw-r--r--drivers/net/ethernet/amd/a2065.h173
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c763
-rw-r--r--drivers/net/ethernet/amd/am79c961a.h143
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c1929
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h802
-rw-r--r--drivers/net/ethernet/amd/ariadne.c791
-rw-r--r--drivers/net/ethernet/amd/ariadne.h415
-rw-r--r--drivers/net/ethernet/amd/atarilance.c1166
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1376
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h115
-rw-r--r--drivers/net/ethernet/amd/declance.c1380
-rw-r--r--drivers/net/ethernet/amd/hplance.c232
-rw-r--r--drivers/net/ethernet/amd/hplance.h27
-rw-r--r--drivers/net/ethernet/amd/lance.c1311
-rw-r--r--drivers/net/ethernet/amd/mvme147.c200
-rw-r--r--drivers/net/ethernet/amd/ni65.c1247
-rw-r--r--drivers/net/ethernet/amd/ni65.h121
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c1507
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c3039
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c947
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1524
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile12
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h1723
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c293
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c525
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c676
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c3654
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2822
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c853
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c514
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c490
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1677
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c526
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c853
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c3461
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c629
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c279
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h1364
43 files changed, 41488 insertions, 0 deletions
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
new file mode 100644
index 000000000..ef512cf89
--- /dev/null
+++ b/drivers/net/ethernet/amd/7990.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * 7990.c -- LANCE ethernet IC generic routines.
+ * This is an attempt to separate out the bits of various ethernet
+ * drivers that are common because they all use the AMD 7990 LANCE
+ * (Local Area Network Controller for Ethernet) chip.
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ *
+ * Most of this stuff was obtained by looking at other LANCE drivers,
+ * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
+ * NB: this was made easy by the fact that Jes Sorensen had cleaned up
+ * most of a2025 and sunlance with the aim of merging them, so the
+ * common code was pretty obvious.
+ */
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/route.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/pgtable.h>
+#include <asm/irq.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#ifdef CONFIG_HP300
+#include <asm/blinken.h>
+#endif
+
+#include "7990.h"
+
+#define WRITERAP(lp, x) out_be16(lp->base + LANCE_RAP, (x))
+#define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
+#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
+
+#if IS_ENABLED(CONFIG_HPLANCE)
+#include "hplance.h"
+
+#undef WRITERAP
+#undef WRITERDP
+#undef READRDP
+
+#if IS_ENABLED(CONFIG_MVME147_NET)
+
+/* Lossage Factor Nine, Mr Sulu. */
+#define WRITERAP(lp, x) (lp->writerap(lp, x))
+#define WRITERDP(lp, x) (lp->writerdp(lp, x))
+#define READRDP(lp) (lp->readrdp(lp))
+
+#else
+
+/* These inlines can be used if only CONFIG_HPLANCE is defined */
+static inline void WRITERAP(struct lance_private *lp, __u16 value)
+{
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static inline void WRITERDP(struct lance_private *lp, __u16 value)
+{
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static inline __u16 READRDP(struct lance_private *lp)
+{
+ __u16 value;
+ do {
+ value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+ return value;
+}
+
+#endif
+#endif /* IS_ENABLED(CONFIG_HPLANCE) */
+
+/* debugging output macros, various flavours */
+/* #define TEST_HITS */
+#ifdef UNDEF
+#define PRINT_RINGS() \
+do { \
+ int t; \
+ for (t = 0; t < RX_RING_SIZE; t++) { \
+ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
+ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
+ ib->brx_ring[t].length, \
+ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
+ } \
+ for (t = 0; t < TX_RING_SIZE; t++) { \
+ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
+ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
+ ib->btx_ring[t].length, \
+ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
+ } \
+} while (0)
+#else
+#define PRINT_RINGS()
+#endif
+
+/* Load the CSR registers. The LANCE has to be STOPped when we do this! */
+static void load_csrs(struct lance_private *lp)
+{
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr;
+
+ leptr = LANCE_ADDR(aib);
+
+ WRITERAP(lp, LE_CSR1); /* load address of init block */
+ WRITERDP(lp, leptr & 0xFFFF);
+ WRITERAP(lp, LE_CSR2);
+ WRITERDP(lp, leptr >> 16);
+ WRITERAP(lp, LE_CSR3);
+ WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
+
+ /* Point back to csr0 */
+ WRITERAP(lp, LE_CSR0);
+}
+
+/* #define to 0 or 1 appropriately */
+#define DEBUG_IRING 0
+/* Set up the Lance Rx and Tx rings and the init block */
+static void lance_init_ring(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ aib = lp->lance_init_block;
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
+
+ /* Copy the ethernet address to the lance init block
+ * Notice that we do a byteswap if we're big endian.
+ * [I think this is the right criterion; at least, sunlance,
+ * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
+ * However, the datasheet says that the BSWAP bit doesn't affect
+ * the init block, so surely it should be low byte first for
+ * everybody? Um.]
+ * We could define the ib->physaddr as three 16bit values and
+ * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
+ */
+#ifdef __BIG_ENDIAN
+ ib->phys_addr[0] = dev->dev_addr[1];
+ ib->phys_addr[1] = dev->dev_addr[0];
+ ib->phys_addr[2] = dev->dev_addr[3];
+ ib->phys_addr[3] = dev->dev_addr[2];
+ ib->phys_addr[4] = dev->dev_addr[5];
+ ib->phys_addr[5] = dev->dev_addr[4];
+#else
+ for (i = 0; i < 6; i++)
+ ib->phys_addr[i] = dev->dev_addr[i];
+#endif
+
+ if (DEBUG_IRING)
+ printk("TX rings:\n");
+
+ lp->tx_full = 0;
+ /* Setup the Tx ring entries */
+ for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ if (DEBUG_IRING)
+ printk("RX rings:\n");
+ for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ /* 0xf000 == bits that must be one (reserved, presumably) */
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("RX ptr: %8.8x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("TX ptr: %8.8x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+ PRINT_RINGS();
+}
+
+/* LANCE must be STOPped before we do this, too... */
+static int init_restart_lance(struct lance_private *lp)
+{
+ int i;
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_INIT);
+
+ /* Need a hook here for sunlance ledma stuff */
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
+ return -1;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ WRITERDP(lp, LE_C0_IDON);
+ WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
+
+ return 0;
+}
+
+static int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
+
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+
+ load_csrs(lp);
+ lance_init_ring(dev);
+ netif_trans_update(dev); /* prevent tx timeout */
+ status = init_restart_lance(lp);
+#ifdef DEBUG_DRIVER
+ printk("Lance restart=%d\n", status);
+#endif
+ return status;
+}
+
+static int lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
+#ifdef TEST_HITS
+ int i;
+#endif
+
+#ifdef TEST_HITS
+ printk("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ else
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ }
+ printk("]");
+#endif
+#ifdef CONFIG_HP300
+ blinken_leds(0x40, 0);
+#endif
+ WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
+ for (rd = &ib->brx_ring[lp->rx_new]; /* For each Rx ring we own... */
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring[lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
+ int len = (rd->mblength & 0xfff) - 4;
+ struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
+
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
+}
+
+static int lance_tx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
+
+#ifdef CONFIG_HP300
+ blinken_leds(0x80, 0);
+#endif
+ /* csr0 is 2f3 */
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring[i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk("%s: Carrier Lost, trying %s\n",
+ dev->name,
+ lp->tpe ? "TPE" : "AUI");
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off the transmitter */
+ /* Restart the adapter */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ return 0;
+}
+
+static irqreturn_t
+lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+
+ spin_lock(&lp->devlock);
+
+ WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
+ csr0 = READRDP(lp);
+
+ PRINT_RINGS();
+
+ if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
+ spin_unlock(&lp->devlock);
+ return IRQ_NONE; /* been generated by the Lance. */
+ }
+
+ /* Acknowledge all the interrupt sources ASAP */
+ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ WRITERDP(lp, LE_C0_STRT);
+ }
+
+ if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
+
+ spin_unlock(&lp->devlock);
+ return IRQ_HANDLED;
+}
+
+int lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int res;
+
+ /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+ if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
+ return -EAGAIN;
+
+ res = lance_reset(dev);
+ spin_lock_init(&lp->devlock);
+ netif_start_queue(dev);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(lance_open);
+
+int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ /* Stop the LANCE */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+
+ free_irq(lp->irq, dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lance_close);
+
+void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ printk("lance_tx_timeout\n");
+ lance_reset(dev);
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+EXPORT_SYMBOL_GPL(lance_tx_timeout);
+
+netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen, len;
+ static int outs;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ if (!TX_BUFFS_AVAIL) {
+ dev_consume_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ skblen = skb->len;
+
+#ifdef DEBUG_DRIVER
+ /* dump the packet */
+ {
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if ((i % 16) == 0)
+ printk("\n");
+ printk("%2.2x ", skb->data[i]);
+ }
+ }
+#endif
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring[entry].length = (-len) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
+
+ if (skb->len < ETH_ZLEN)
+ memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+
+ /* Now, give the packet to the lance */
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
+
+ outs++;
+ /* Kick the lance: transmit now */
+ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
+ dev_consume_skb_any(skb);
+
+ spin_lock_irqsave(&lp->devlock, flags);
+ if (TX_BUFFS_AVAIL)
+ netif_start_queue(dev);
+ else
+ lp->tx_full = 1;
+ spin_unlock_irqrestore(&lp->devlock, flags);
+
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL_GPL(lance_start_xmit);
+
+/* taken from the depca driver via a2065.c */
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+}
+
+
+void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int stopped;
+
+ stopped = netif_queue_stopped(dev);
+ if (!stopped)
+ netif_stop_queue(dev);
+
+ while (lp->tx_old != lp->tx_new)
+ schedule();
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+
+ if (!stopped)
+ netif_start_queue(dev);
+}
+EXPORT_SYMBOL_GPL(lance_set_multicast);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void lance_poll(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ spin_lock(&lp->devlock);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STRT);
+ spin_unlock(&lp->devlock);
+ lance_interrupt(dev->irq, dev);
+}
+EXPORT_SYMBOL_GPL(lance_poll);
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
new file mode 100644
index 000000000..e53551dae
--- /dev/null
+++ b/drivers/net/ethernet/amd/7990.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * 7990.h -- LANCE ethernet IC generic routines.
+ * This is an attempt to separate out the bits of various ethernet
+ * drivers that are common because they all use the AMD 7990 LANCE
+ * (Local Area Network Controller for Ethernet) chip.
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ *
+ * Most of this stuff was obtained by looking at other LANCE drivers,
+ * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
+ */
+
+#ifndef _7990_H
+#define _7990_H
+
+/* The lance only has two register locations. We communicate mostly via memory. */
+#define LANCE_RDP 0 /* Register Data Port */
+#define LANCE_RAP 2 /* Register Address Port */
+
+/* Transmit/receive ring definitions.
+ * We allow the specific drivers to override these defaults if they want to.
+ * NB: according to lance.c, increasing the number of buffers is a waste
+ * of space and reduces the chance that an upper layer will be able to
+ * reorder queued Tx packets based on priority. [Clearly there is a minimum
+ * limit too: too small and we drop rx packets and can't tx at full speed.]
+ * 4+4 seems to be the usual setting; the atarilance driver uses 3 and 5.
+ */
+
+/* Blast! This won't work. The problem is that we can't specify a default
+ * setting because that would cause the lance_init_block struct to be
+ * too long (and overflow the RAM on shared-memory cards like the HP LANCE.
+ */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+#endif
+
+#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define PKT_BUFF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_BUFF_SIZE PKT_BUFF_SIZE
+
+/* Each receive buffer is described by a receive message descriptor (RMD) */
+struct lance_rx_desc {
+ volatile unsigned short rmd0; /* low address of packet */
+ volatile unsigned char rmd1_bits; /* descriptor bits */
+ volatile unsigned char rmd1_hadr; /* high address of packet */
+ volatile short length; /* This length is 2s complement (negative)!
+ * Buffer length */
+ volatile unsigned short mblength; /* Actual number of bytes received */
+};
+
+/* Ditto for TMD: */
+struct lance_tx_desc {
+ volatile unsigned short tmd0; /* low address of packet */
+ volatile unsigned char tmd1_bits; /* descriptor bits */
+ volatile unsigned char tmd1_hadr; /* high address of packet */
+ volatile short length; /* Length is 2s complement (negative)! */
+ volatile unsigned short misc;
+};
+
+/* There are three memory structures accessed by the LANCE:
+ * the initialization block, the receive and transmit descriptor rings,
+ * and the data buffers themselves. In fact we might as well put the
+ * init block,the Tx and Rx rings and the buffers together in memory:
+ */
+struct lance_init_block {
+ volatile unsigned short mode; /* Pre-set mode (reg. 15) */
+ volatile unsigned char phys_addr[6]; /* Physical ethernet address */
+ volatile unsigned filter[2]; /* Multicast filter (64 bits) */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ volatile unsigned short rx_ptr; /* receive descriptor addr */
+ volatile unsigned short rx_len; /* receive len and high addr */
+ volatile unsigned short tx_ptr; /* transmit descriptor addr */
+ volatile unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
+ * This will be true if this whole struct is 8-byte aligned.
+ */
+ volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
+ volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
+
+ volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
+ volatile char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+ /* we use this just to make the struct big enough that we can move its startaddr
+ * in order to force alignment to an eight byte boundary.
+ */
+};
+
+/* This is where we keep all the stuff the driver needs to know about.
+ * I'm definitely unhappy about the mechanism for allowing specific
+ * drivers to add things...
+ */
+struct lance_private {
+ const char *name;
+ unsigned long base;
+ volatile struct lance_init_block *init_block; /* CPU address of RAM */
+ volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
+
+ int tpe; /* TPE is selected */
+ int auto_select; /* cable-selection is by carrier */
+ unsigned short busmaster_regval;
+
+ unsigned int irq; /* IRQ to register */
+
+ /* This is because the HP LANCE is disgusting and you have to check
+ * a DIO-specific register every time you read/write the LANCE regs :-<
+ * [could we get away with making these some sort of macro?]
+ */
+ void (*writerap)(void *, unsigned short);
+ void (*writerdp)(void *, unsigned short);
+ unsigned short (*readrdp)(void *);
+ spinlock_t devlock;
+ char tx_full;
+};
+
+/*
+ * Am7990 Control and Status Registers
+ */
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
+#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
+#define LE_CSR3 0x0003 /* Misc */
+
+/*
+ * Bit definitions for CSR0 (LANCE Controller Status)
+ */
+#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag
+ = BABL | MISS | MERR | RINT | TINT | IDON */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
+
+
+/*
+ * Bit definitions for CSR3
+ */
+#define LE_C3_BSWP 0x0004 /* Byte Swap (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
+
+
+/*
+ * Mode Flags
+ */
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+/* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990,
+ * but they are in NetBSD's am7990.h, presumably for backwards-compatible chips
+ */
+#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
+#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
+#define LE_MO_DLNKTST 0x1000 /* disable link status */
+#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
+#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
+#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
+#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
+#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
+/* and this one is from the C-LANCE data sheet... */
+#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
+ (C-LANCE, not original LANCE) */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
+
+
+/*
+ * Receive Flags
+ */
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Transmit Flags
+ */
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+/*
+ * Error Flags
+ */
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+
+/* Miscellaneous useful macros */
+
+#define TX_BUFFS_AVAIL ((lp->tx_old <= lp->tx_new) ? \
+ lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new : \
+ lp->tx_old - lp->tx_new - 1)
+
+/* The LANCE only uses 24 bit addresses. This does the obvious thing. */
+#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
+
+/* Now the prototypes we export */
+int lance_open(struct net_device *dev);
+int lance_close(struct net_device *dev);
+netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast(struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev, unsigned int txqueue);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void lance_poll(struct net_device *dev);
+#endif
+
+#endif /* ndef _7990_H */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
new file mode 100644
index 000000000..db7d956a9
--- /dev/null
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -0,0 +1,205 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD network device configuration
+#
+
+config NET_VENDOR_AMD
+ bool "AMD devices"
+ default y
+ depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
+ SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
+ (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA || ARM64
+ help
+ If you have a network (Ethernet) chipset belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just cause the configurator to skip all
+ the questions regarding AMD chipsets. If you say Y, you will be asked
+ for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_AMD
+
+config A2065
+ tristate "A2065 support"
+ depends on ZORRO
+ select CRC32
+ help
+ If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise,
+ say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called a2065.
+
+config AMD8111_ETH
+ tristate "AMD 8111 (new PCI LANCE) support"
+ depends on PCI
+ select CRC32
+ select MII
+ help
+ If you have an AMD 8111-based PCI LANCE ethernet card,
+ answer Y here.
+
+ To compile this driver as a module, choose M here. The module
+ will be called amd8111e.
+
+config LANCE
+ tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
+ help
+ If you have a network (Ethernet) card of this type, say Y here.
+ Some LinkSys cards are of this type.
+
+ To compile this driver as a module, choose M here: the module
+ will be called lance. This is recommended.
+
+config PCNET32
+ tristate "AMD PCnet32 PCI support"
+ depends on PCI
+ select CRC32
+ select MII
+ help
+ If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
+ answer Y here.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pcnet32.
+
+config ARIADNE
+ tristate "Ariadne support"
+ depends on ZORRO
+ help
+ If you have a Village Tronic Ariadne Ethernet adapter, say Y.
+ Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ariadne.
+
+config ARM_AM79C961A
+ bool "ARM EBSA110 AM79C961A support"
+ depends on ARM && ARCH_EBSA110
+ select CRC32
+ help
+ If you wish to compile a kernel for the EBSA-110, then you should
+ always answer Y to this.
+
+config ATARILANCE
+ tristate "Atari LANCE support"
+ depends on ATARI
+ help
+ Say Y to include support for several Atari Ethernet adapters based
+ on the AMD LANCE chipset: RieblCard (with or without battery), or
+ PAMCard VME (also the version by Rhotron, with different addresses).
+
+config DECLANCE
+ tristate "DEC LANCE ethernet controller support"
+ depends on MACH_DECSTATION
+ select CRC32
+ help
+ This driver is for the series of Ethernet controllers produced by
+ DEC (now Compaq) based on the AMD LANCE chipset, including the
+ DEPCA series. (This chipset is better known via the NE2100 cards.)
+
+config HPLANCE
+ tristate "HP on-board LANCE support"
+ depends on DIO
+ select CRC32
+ help
+ If you want to use the builtin "LANCE" Ethernet controller on an
+ HP300 machine, say Y here.
+
+config MIPS_AU1X00_ENET
+ tristate "MIPS AU1000 Ethernet support"
+ depends on MIPS_ALCHEMY
+ select PHYLIB
+ select CRC32
+ help
+ If you have an Alchemy Semi AU1X00 based system
+ say Y. Otherwise, say N.
+
+config MVME147_NET
+ tristate "MVME147 (LANCE) Ethernet support"
+ depends on MVME147
+ select CRC32
+ help
+ Support for the on-board Ethernet interface on the Motorola MVME147
+ single-board computer. Say Y here to include the
+ driver for this chip in your kernel.
+ To compile this driver as a module, choose M here.
+
+config PCMCIA_NMCLAN
+ tristate "New Media PCMCIA support"
+ depends on PCMCIA
+ help
+ Say Y here if you intend to attach a New Media Ethernet or LiveWire
+ PCMCIA (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called nmclan_cs. If unsure, say N.
+
+config NI65
+ tristate "NI6510 support"
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
+ help
+ If you have a network (Ethernet) card of this type, say Y here.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ni65.
+
+config SUN3LANCE
+ tristate "Sun3/Sun3x on-board LANCE support"
+ depends on (SUN3 || SUN3X)
+ help
+ Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80)
+ featured an AMD LANCE 10Mbit Ethernet controller on board; say Y
+ here to compile in the Linux driver for this and enable Ethernet.
+ General Linux information on the Sun 3 and 3x series (now
+ discontinued) is at
+ <http://www.angelfire.com/ca2/tech68k/sun3.html>.
+
+ If you're not building a kernel for a Sun 3, say N.
+
+config SUNLANCE
+ tristate "Sun LANCE support"
+ depends on SBUS
+ select CRC32
+ help
+ This driver supports the "le" interface present on all 32-bit Sparc
+ systems, on some older Ultra systems and as an Sbus option. These
+ cards are based on the AMD LANCE chipset, which is better known
+ via the NE2100 cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunlance.
+
+config AMD_XGBE
+ tristate "AMD 10GbE Ethernet driver"
+ depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
+ depends on X86 || ARM64 || COMPILE_TEST
+ select BITREVERSE
+ select CRC32
+ select PHYLIB
+ select AMD_XGBE_HAVE_ECC if X86
+ imply PTP_1588_CLOCK
+ help
+ This driver supports the AMD 10GbE Ethernet device found on an
+ AMD SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called amd-xgbe.
+
+config AMD_XGBE_DCB
+ bool "Data Center Bridging (DCB) support"
+ default n
+ depends on AMD_XGBE && DCB
+ help
+ Say Y here to enable Data Center Bridging (DCB) support in the
+ driver.
+
+ If unsure, say N.
+
+config AMD_XGBE_HAVE_ECC
+ bool
+ default n
+
+endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
new file mode 100644
index 000000000..45f86822a
--- /dev/null
+++ b/drivers/net/ethernet/amd/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the AMD network device drivers.
+#
+
+obj-$(CONFIG_A2065) += a2065.o
+obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
+obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
+obj-$(CONFIG_ARIADNE) += ariadne.o
+obj-$(CONFIG_ATARILANCE) += atarilance.o
+obj-$(CONFIG_DECLANCE) += declance.o
+obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
+obj-$(CONFIG_LANCE) += lance.o
+obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
+obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
+obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o
+obj-$(CONFIG_NI65) += ni65.o
+obj-$(CONFIG_PCNET32) += pcnet32.o
+obj-$(CONFIG_SUN3LANCE) += sun3lance.o
+obj-$(CONFIG_SUNLANCE) += sunlance.o
+obj-$(CONFIG_AMD_XGBE) += xgbe/
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
new file mode 100644
index 000000000..2f808dbc8
--- /dev/null
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -0,0 +1,782 @@
+/*
+ * Amiga Linux/68k A2065 Ethernet Driver
+ *
+ * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * Fixes and tips by:
+ * - Janos Farkas (CHEXUM@sparta.banki.hu)
+ * - Jes Degn Soerensen (jds@kom.auc.dk)
+ * - Matt Domsch (Matt_Domsch@dell.com)
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
+ * (C) Copyright 1995 by Geert Uytterhoeven,
+ * Peter De Schrijver
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
+ *
+ * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
+ * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/*#define DEBUG*/
+/*#define TEST_HITS*/
+
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/zorro.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+
+#include "a2065.h"
+
+/* Transmit/Receive Ring Definitions */
+
+#define LANCE_LOG_TX_BUFFERS (2)
+#define LANCE_LOG_RX_BUFFERS (4)
+
+#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
+
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define PKT_BUF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUF_SIZE
+#define TX_BUFF_SIZE PKT_BUF_SIZE
+
+/* Layout of the Lance's RAM Buffer */
+
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode (reg. 15) */
+ unsigned char phys_addr[6]; /* Physical ethernet address */
+ unsigned filter[2]; /* Multicast filter. */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ unsigned short rx_ptr; /* receive descriptor addr */
+ unsigned short rx_len; /* receive len and high addr */
+ unsigned short tx_ptr; /* transmit descriptor addr */
+ unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+
+ char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+ char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
+};
+
+/* Private Device Data */
+
+struct lance_private {
+ char *name;
+ volatile struct lance_regs *ll;
+ volatile struct lance_init_block *init_block; /* Hosts view */
+ volatile struct lance_init_block *lance_init_block; /* Lance view */
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
+
+ int tpe; /* cable-selection is TPE */
+ int auto_select; /* cable-selection by carrier */
+ unsigned short busmaster_regval;
+
+ struct timer_list multicast_timer;
+ struct net_device *dev;
+};
+
+#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr = LANCE_ADDR(aib);
+
+ ll->rap = LE_CSR1;
+ ll->rdp = (leptr & 0xFFFF);
+ ll->rap = LE_CSR2;
+ ll->rdp = leptr >> 16;
+ ll->rap = LE_CSR3;
+ ll->rdp = lp->busmaster_regval;
+
+ /* Point back to csr0 */
+ ll->rap = LE_CSR0;
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ ib->phys_addr[0] = dev->dev_addr[1];
+ ib->phys_addr[1] = dev->dev_addr[0];
+ ib->phys_addr[2] = dev->dev_addr[3];
+ ib->phys_addr[3] = dev->dev_addr[2];
+ ib->phys_addr[4] = dev->dev_addr[5];
+ ib->phys_addr[5] = dev->dev_addr[4];
+
+ /* Setup the Tx ring entries */
+ netdev_dbg(dev, "TX rings:\n");
+ for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (i < 3)
+ netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ netdev_dbg(dev, "RX rings:\n");
+ for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (i < 3)
+ netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ netdev_dbg(dev, "RX ptr: %08x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ netdev_dbg(dev, "TX ptr: %08x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ int i;
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_INIT;
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
+ pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp);
+ return -EIO;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ ll->rdp = LE_C0_IDON;
+ ll->rdp = LE_C0_INEA | LE_C0_STRT;
+
+ return 0;
+}
+
+static int lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
+
+#ifdef TEST_HITS
+ int i;
+ char buf[RX_RING_SIZE + 1];
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN;
+ if (i == lp->rx_new)
+ buf[i] = r1_own ? '_' : 'X';
+ else
+ buf[i] = r1_own ? '.' : '1';
+ }
+ buf[RX_RING_SIZE] = 0;
+
+ pr_debug("RxRing TestHits: [%s]\n", buf);
+#endif
+
+ ll->rdp = LE_C0_RINT | LE_C0_INEA;
+ for (rd = &ib->brx_ring[lp->rx_new];
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring[lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
+ int len = (rd->mblength & 0xfff) - 4;
+ struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
+
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&ib->rx_buf[lp->rx_new][0],
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
+}
+
+static int lance_tx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
+
+ /* csr0 is 2f3 */
+ ll->rdp = LE_C0_TINT | LE_C0_INEA;
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring[i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ netdev_err(dev, "Carrier Lost, trying %s\n",
+ lp->tpe ? "TPE" : "AUI");
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off
+ * the transmitter, so restart the adapter
+ */
+ if (status & (LE_T3_BUF | LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n");
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /* So we don't count the packet more than once. */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ ll->rdp = LE_C0_TINT | LE_C0_INEA;
+ return 0;
+}
+
+static int lance_tx_buffs_avail(struct lance_private *lp)
+{
+ if (lp->tx_old <= lp->tx_new)
+ return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new;
+ return lp->tx_old - lp->tx_new - 1;
+}
+
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int csr0;
+
+ ll->rap = LE_CSR0; /* LANCE Controller Status */
+ csr0 = ll->rdp;
+
+ if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */
+ return IRQ_NONE; /* been generated by the Lance. */
+
+ /* Acknowledge all the interrupt sources ASAP */
+ ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT |
+ LE_C0_INIT);
+
+ if (csr0 & LE_C0_ERR) {
+ /* Clear the error condition */
+ ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA;
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ netdev_err(dev, "Bus master arbitration failure, status %04x\n",
+ csr0);
+ /* Restart the chip. */
+ ll->rdp = LE_C0_STRT;
+ }
+
+ if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0)
+ netif_wake_queue(dev);
+
+ ll->rap = LE_CSR0;
+ ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR |
+ LE_C0_IDON | LE_C0_INEA);
+ return IRQ_HANDLED;
+}
+
+static int lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int ret;
+
+ /* Stop the Lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ /* Install the Interrupt handler */
+ ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (ret)
+ return ret;
+
+ load_csrs(lp);
+ lance_init_ring(dev);
+
+ netif_start_queue(dev);
+
+ return init_restart_lance(lp);
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ /* Stop the card */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ return 0;
+}
+
+static inline int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status;
+
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ load_csrs(lp);
+
+ lance_init_ring(dev);
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_start_queue(dev);
+
+ status = init_restart_lance(lp);
+ netdev_dbg(dev, "Lance restart=%d\n", status);
+
+ return status;
+}
+
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp);
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen;
+ int status = NETDEV_TX_OK;
+ unsigned long flags;
+
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ skblen = max_t(unsigned, skb->len, ETH_ZLEN);
+
+ local_irq_save(flags);
+
+ if (!lance_tx_buffs_avail(lp))
+ goto out_free;
+
+ /* dump the packet */
+ print_hex_dump_debug("skb->data: ", DUMP_PREFIX_NONE, 16, 1, skb->data,
+ 64, true);
+
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring[entry].length = (-skblen) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
+
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+
+ /* Now, give the packet to the lance */
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
+ lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+ dev->stats.tx_bytes += skblen;
+
+ if (lance_tx_buffs_avail(lp) <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ ll->rdp = LE_C0_INEA | LE_C0_TDMD;
+ out_free:
+ dev_kfree_skb(skb);
+
+ local_irq_restore(flags);
+
+ return status;
+}
+
+/* taken from the depca driver */
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static void lance_set_multicast_retry(struct timer_list *t)
+{
+ struct lance_private *lp = from_timer(lp, t, multicast_timer);
+
+ lance_set_multicast(lp->dev);
+}
+
+static int a2065_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent);
+static void a2065_remove_one(struct zorro_dev *z);
+
+
+static const struct zorro_device_id a2065_zorro_tbl[] = {
+ { ZORRO_PROD_CBM_A2065_1 },
+ { ZORRO_PROD_CBM_A2065_2 },
+ { ZORRO_PROD_AMERISTAR_A2065 },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
+
+static struct zorro_driver a2065_driver = {
+ .name = "a2065",
+ .id_table = a2065_zorro_tbl,
+ .probe = a2065_init_one,
+ .remove = a2065_remove_one,
+};
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int a2065_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct net_device *dev;
+ struct lance_private *priv;
+ unsigned long board = z->resource.start;
+ unsigned long base_addr = board + A2065_LANCE;
+ unsigned long mem_start = board + A2065_RAM;
+ struct resource *r1, *r2;
+ u32 serial;
+ int err;
+
+ r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
+ "Am7990");
+ if (!r1)
+ return -EBUSY;
+ r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
+ if (!r2) {
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (dev == NULL) {
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ release_mem_region(mem_start, A2065_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(dev);
+
+ r1->name = dev->name;
+ r2->name = dev->name;
+
+ serial = be32_to_cpu(z->rom.er_SerialNumber);
+ dev->dev_addr[0] = 0x00;
+ if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
+ dev->dev_addr[1] = 0x80;
+ dev->dev_addr[2] = 0x10;
+ } else { /* Ameristar */
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x9f;
+ }
+ dev->dev_addr[3] = (serial >> 16) & 0xff;
+ dev->dev_addr[4] = (serial >> 8) & 0xff;
+ dev->dev_addr[5] = serial & 0xff;
+ dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
+ dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
+ dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
+
+ priv->ll = (volatile struct lance_regs *)dev->base_addr;
+ priv->init_block = (struct lance_init_block *)dev->mem_start;
+ priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
+ priv->auto_select = 0;
+ priv->busmaster_regval = LE_C3_BSWP;
+
+ priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
+ priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
+ priv->dev = dev;
+
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->watchdog_timeo = 5*HZ;
+ dev->dma = 0;
+
+ timer_setup(&priv->multicast_timer, lance_set_multicast_retry, 0);
+
+ err = register_netdev(dev);
+ if (err) {
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ release_mem_region(mem_start, A2065_RAM_SIZE);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+
+ netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n",
+ board, dev->dev_addr);
+
+ return 0;
+}
+
+
+static void a2065_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr),
+ sizeof(struct lance_regs));
+ release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
+ free_netdev(dev);
+}
+
+static int __init a2065_init_module(void)
+{
+ return zorro_register_driver(&a2065_driver);
+}
+
+static void __exit a2065_cleanup_module(void)
+{
+ zorro_unregister_driver(&a2065_driver);
+}
+
+module_init(a2065_init_module);
+module_exit(a2065_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/a2065.h b/drivers/net/ethernet/amd/a2065.h
new file mode 100644
index 000000000..5117759d4
--- /dev/null
+++ b/drivers/net/ethernet/amd/a2065.h
@@ -0,0 +1,173 @@
+/*
+ * Amiga Linux/68k A2065 Ethernet Driver
+ *
+ * (C) Copyright 1995 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
+ * (C) Copyright 1995 by Geert Uytterhoeven,
+ * Peter De Schrijver
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
+ *
+ * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
+ * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
+ */
+
+
+/*
+ * Am7990 Local Area Network Controller for Ethernet (LANCE)
+ */
+
+struct lance_regs {
+ unsigned short rdp; /* Register Data Port */
+ unsigned short rap; /* Register Address Port */
+};
+
+
+/*
+ * Am7990 Control and Status Registers
+ */
+
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] */
+#define LE_CSR2 0x0002 /* IADR[23:16] */
+#define LE_CSR3 0x0003 /* Misc */
+
+
+/*
+ * Bit definitions for CSR0 (LANCE Controller Status)
+ */
+
+#define LE_C0_ERR 0x8000 /* Error */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
+
+
+/*
+ * Bit definitions for CSR3
+ */
+
+#define LE_C3_BSWP 0x0004 /* Byte Swap
+ (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control
+ (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
+
+
+/*
+ * Mode Flags
+ */
+
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
+
+
+struct lance_rx_desc {
+ unsigned short rmd0; /* low address of packet */
+ unsigned char rmd1_bits; /* descriptor bits */
+ unsigned char rmd1_hadr; /* high address of packet */
+ short length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ unsigned short mblength; /* Aactual number of bytes received */
+};
+
+struct lance_tx_desc {
+ unsigned short tmd0; /* low address of packet */
+ unsigned char tmd1_bits; /* descriptor bits */
+ unsigned char tmd1_hadr; /* high address of packet */
+ short length; /* Length is 2s complement (negative)! */
+ unsigned short misc;
+};
+
+
+/*
+ * Receive Flags
+ */
+
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Transmit Flags
+ */
+
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved,
+ LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Error Flags
+ */
+
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+
+
+/*
+ * A2065 Expansion Board Structure
+ */
+
+#define A2065_LANCE 0x4000
+
+#define A2065_RAM 0x8000
+#define A2065_RAM_SIZE 0x8000
+
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
new file mode 100644
index 000000000..1c53408f5
--- /dev/null
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/net/ethernet/amd/am79c961a.c
+ *
+ * by Russell King <rmk@arm.linux.org.uk> 1995-2001.
+ *
+ * Derived from various things including skeleton.c
+ *
+ * This is a special driver for the am79c961A Lance chip used in the
+ * Intel (formally Digital Equipment Corp) EBSA110 platform. Please
+ * note that this can not be built as a module (it doesn't make sense).
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+
+#define TX_BUFFERS 15
+#define RX_BUFFERS 25
+
+#include "am79c961a.h"
+
+static irqreturn_t
+am79c961_interrupt (int irq, void *dev_id);
+
+static unsigned int net_debug = NET_DEBUG;
+
+static const char version[] =
+ "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n";
+
+/* --------------------------------------------------------------------------- */
+
+#ifdef __arm__
+static void write_rreg(u_long base, u_int reg, u_int val)
+{
+ asm volatile(
+ "strh %1, [%2] @ NET_RAP\n\t"
+ "strh %0, [%2, #-4] @ NET_RDP"
+ :
+ : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
+}
+
+static inline unsigned short read_rreg(u_long base_addr, u_int reg)
+{
+ unsigned short v;
+ asm volatile(
+ "strh %1, [%2] @ NET_RAP\n\t"
+ "ldrh %0, [%2, #-4] @ NET_RDP"
+ : "=r" (v)
+ : "r" (reg), "r" (ISAIO_BASE + 0x0464));
+ return v;
+}
+
+static inline void write_ireg(u_long base, u_int reg, u_int val)
+{
+ asm volatile(
+ "strh %1, [%2] @ NET_RAP\n\t"
+ "strh %0, [%2, #8] @ NET_IDP"
+ :
+ : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
+}
+
+static inline unsigned short read_ireg(u_long base_addr, u_int reg)
+{
+ u_short v;
+ asm volatile(
+ "strh %1, [%2] @ NAT_RAP\n\t"
+ "ldrh %0, [%2, #8] @ NET_IDP\n\t"
+ : "=r" (v)
+ : "r" (reg), "r" (ISAIO_BASE + 0x0464));
+ return v;
+}
+
+#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
+#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
+
+static void
+am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
+{
+ offset = ISAMEM_BASE + (offset << 1);
+ length = (length + 1) & ~1;
+ if ((int)buf & 2) {
+ asm volatile("strh %2, [%0], #4"
+ : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
+ buf += 2;
+ length -= 2;
+ }
+ while (length > 8) {
+ register unsigned int tmp asm("r2"), tmp2 asm("r3");
+ asm volatile(
+ "ldmia %0!, {%1, %2}"
+ : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
+ length -= 8;
+ asm volatile(
+ "strh %1, [%0], #4\n\t"
+ "mov %1, %1, lsr #16\n\t"
+ "strh %1, [%0], #4\n\t"
+ "strh %2, [%0], #4\n\t"
+ "mov %2, %2, lsr #16\n\t"
+ "strh %2, [%0], #4"
+ : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
+ }
+ while (length > 0) {
+ asm volatile("strh %2, [%0], #4"
+ : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
+ buf += 2;
+ length -= 2;
+ }
+}
+
+static void
+am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
+{
+ offset = ISAMEM_BASE + (offset << 1);
+ length = (length + 1) & ~1;
+ if ((int)buf & 2) {
+ unsigned int tmp;
+ asm volatile(
+ "ldrh %2, [%0], #4\n\t"
+ "strb %2, [%1], #1\n\t"
+ "mov %2, %2, lsr #8\n\t"
+ "strb %2, [%1], #1"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
+ length -= 2;
+ }
+ while (length > 8) {
+ register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
+ asm volatile(
+ "ldrh %2, [%0], #4\n\t"
+ "ldrh %4, [%0], #4\n\t"
+ "ldrh %3, [%0], #4\n\t"
+ "orr %2, %2, %4, lsl #16\n\t"
+ "ldrh %4, [%0], #4\n\t"
+ "orr %3, %3, %4, lsl #16\n\t"
+ "stmia %1!, {%2, %3}"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
+ : "0" (offset), "1" (buf));
+ length -= 8;
+ }
+ while (length > 0) {
+ unsigned int tmp;
+ asm volatile(
+ "ldrh %2, [%0], #4\n\t"
+ "strb %2, [%1], #1\n\t"
+ "mov %2, %2, lsr #8\n\t"
+ "strb %2, [%1], #1"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
+ length -= 2;
+ }
+}
+#else
+#error Not compatible
+#endif
+
+static int
+am79c961_ramtest(struct net_device *dev, unsigned int val)
+{
+ unsigned char *buffer = kmalloc (65536, GFP_KERNEL);
+ int i, error = 0, errorcount = 0;
+
+ if (!buffer)
+ return 0;
+ memset (buffer, val, 65536);
+ am_writebuffer(dev, 0, buffer, 65536);
+ memset (buffer, val ^ 255, 65536);
+ am_readbuffer(dev, 0, buffer, 65536);
+ for (i = 0; i < 65536; i++) {
+ if (buffer[i] != val && !error) {
+ printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i);
+ error = 1;
+ errorcount ++;
+ } else if (error && buffer[i] == val) {
+ printk ("%05X\n", i);
+ error = 0;
+ }
+ }
+ if (error)
+ printk ("10000\n");
+ kfree (buffer);
+ return errorcount;
+}
+
+static void am79c961_mc_hash(char *addr, u16 *hash)
+{
+ int idx, bit;
+ u32 crc;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+
+ idx = crc >> 30;
+ bit = (crc >> 26) & 15;
+
+ hash[idx] |= 1 << bit;
+}
+
+static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
+{
+ unsigned int mode = MODE_PORT_10BT;
+
+ if (dev->flags & IFF_PROMISC) {
+ mode |= MODE_PROMISC;
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else if (dev->flags & IFF_ALLMULTI) {
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(hash, 0, 4 * sizeof(*hash));
+
+ netdev_for_each_mc_addr(ha, dev)
+ am79c961_mc_hash(ha->addr, hash);
+ }
+
+ return mode;
+}
+
+static void
+am79c961_init_for_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ unsigned char *p;
+ u_int hdr_addr, first_free_addr;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
+ int i;
+
+ /*
+ * Stop the chip.
+ */
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
+ write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
+ write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */
+ write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
+
+ for (i = LADRL; i <= LADRH; i++)
+ write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
+
+ for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
+ write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
+
+ write_rreg (dev->base_addr, MODE, mode);
+ write_rreg (dev->base_addr, POLLINT, 0);
+ write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
+ write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
+
+ first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16;
+ hdr_addr = 0;
+
+ priv->rxhead = 0;
+ priv->rxtail = 0;
+ priv->rxhdr = hdr_addr;
+
+ for (i = 0; i < RX_BUFFERS; i++) {
+ priv->rxbuffer[i] = first_free_addr;
+ am_writeword (dev, hdr_addr, first_free_addr);
+ am_writeword (dev, hdr_addr + 2, RMD_OWN);
+ am_writeword (dev, hdr_addr + 4, (-1600));
+ am_writeword (dev, hdr_addr + 6, 0);
+ first_free_addr += 1600;
+ hdr_addr += 8;
+ }
+ priv->txhead = 0;
+ priv->txtail = 0;
+ priv->txhdr = hdr_addr;
+ for (i = 0; i < TX_BUFFERS; i++) {
+ priv->txbuffer[i] = first_free_addr;
+ am_writeword (dev, hdr_addr, first_free_addr);
+ am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP);
+ am_writeword (dev, hdr_addr + 4, 0xf000);
+ am_writeword (dev, hdr_addr + 6, 0);
+ first_free_addr += 1600;
+ hdr_addr += 8;
+ }
+
+ write_rreg (dev->base_addr, BASERXL, priv->rxhdr);
+ write_rreg (dev->base_addr, BASERXH, 0);
+ write_rreg (dev->base_addr, BASETXL, priv->txhdr);
+ write_rreg (dev->base_addr, BASERXH, 0);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO);
+ write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM);
+ write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
+}
+
+static void am79c961_timer(struct timer_list *t)
+{
+ struct dev_priv *priv = from_timer(priv, t, timer);
+ struct net_device *dev = priv->dev;
+ unsigned int lnkstat, carrier;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+ carrier = netif_carrier_ok(dev);
+
+ if (lnkstat && !carrier) {
+ netif_carrier_on(dev);
+ printk("%s: link up\n", dev->name);
+ } else if (!lnkstat && carrier) {
+ netif_carrier_off(dev);
+ printk("%s: link down\n", dev->name);
+ }
+
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
+}
+
+/*
+ * Open/initialize the board.
+ */
+static int
+am79c961_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ am79c961_init_for_open(dev);
+
+ netif_carrier_off(dev);
+
+ priv->timer.expires = jiffies;
+ add_timer(&priv->timer);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * The inverse routine to am79c961_open().
+ */
+static int
+am79c961_close(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ del_timer_sync(&priv->timer);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ free_irq (dev->irq, dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear promiscuous/multicast mode filter for this adapter.
+ */
+static void am79c961_setmulticastlist (struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
+ int i, stopped;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+
+ stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
+
+ if (!stopped) {
+ /*
+ * Put the chip into suspend mode
+ */
+ write_rreg(dev->base_addr, CTRL1, CTRL1_SPND);
+
+ /*
+ * Spin waiting for chip to report suspend mode
+ */
+ while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+ nop();
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ }
+ }
+
+ /*
+ * Update the multicast hash table
+ */
+ for (i = 0; i < ARRAY_SIZE(multi_hash); i++)
+ write_rreg(dev->base_addr, i + LADRL, multi_hash[i]);
+
+ /*
+ * Write the mode register
+ */
+ write_rreg(dev->base_addr, MODE, mode);
+
+ if (!stopped) {
+ /*
+ * Put the chip back into running mode
+ */
+ write_rreg(dev->base_addr, CTRL1, 0);
+ }
+
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+}
+
+static void am79c961_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
+ dev->name);
+
+ /*
+ * ought to do some setup of the tx side here
+ */
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Transmit a packet
+ */
+static netdev_tx_t
+am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned int hdraddr, bufaddr;
+ unsigned int head;
+ unsigned long flags;
+
+ head = priv->txhead;
+ hdraddr = priv->txhdr + (head << 3);
+ bufaddr = priv->txbuffer[head];
+ head += 1;
+ if (head >= TX_BUFFERS)
+ head = 0;
+
+ am_writebuffer (dev, bufaddr, skb->data, skb->len);
+ am_writeword (dev, hdraddr + 4, -skb->len);
+ am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
+ priv->txhead = head;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ /*
+ * If the next packet is owned by the ethernet device,
+ * then the tx ring is full and we can't add another
+ * packet.
+ */
+ if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
+ netif_stop_queue(dev);
+
+ dev_consume_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * If we have a good packet(s), get it/them out of the buffers.
+ */
+static void
+am79c961_rx(struct net_device *dev, struct dev_priv *priv)
+{
+ do {
+ struct sk_buff *skb;
+ u_int hdraddr;
+ u_int pktaddr;
+ u_int status;
+ int len;
+
+ hdraddr = priv->rxhdr + (priv->rxtail << 3);
+ pktaddr = priv->rxbuffer[priv->rxtail];
+
+ status = am_readword (dev, hdraddr + 2);
+ if (status & RMD_OWN) /* do we own it? */
+ break;
+
+ priv->rxtail ++;
+ if (priv->rxtail >= RX_BUFFERS)
+ priv->rxtail = 0;
+
+ if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
+ am_writeword (dev, hdraddr + 2, RMD_OWN);
+ dev->stats.rx_errors++;
+ if (status & RMD_ERR) {
+ if (status & RMD_FRAM)
+ dev->stats.rx_frame_errors++;
+ if (status & RMD_CRC)
+ dev->stats.rx_crc_errors++;
+ } else if (status & RMD_STP)
+ dev->stats.rx_length_errors++;
+ continue;
+ }
+
+ len = am_readword(dev, hdraddr + 6);
+ skb = netdev_alloc_skb(dev, len + 2);
+
+ if (skb) {
+ skb_reserve(skb, 2);
+
+ am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
+ am_writeword(dev, hdraddr + 2, RMD_OWN);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ } else {
+ am_writeword (dev, hdraddr + 2, RMD_OWN);
+ dev->stats.rx_dropped++;
+ break;
+ }
+ } while (1);
+}
+
+/*
+ * Update stats for the transmitted packet
+ */
+static void
+am79c961_tx(struct net_device *dev, struct dev_priv *priv)
+{
+ do {
+ short len;
+ u_int hdraddr;
+ u_int status;
+
+ hdraddr = priv->txhdr + (priv->txtail << 3);
+ status = am_readword (dev, hdraddr + 2);
+ if (status & TMD_OWN)
+ break;
+
+ priv->txtail ++;
+ if (priv->txtail >= TX_BUFFERS)
+ priv->txtail = 0;
+
+ if (status & TMD_ERR) {
+ u_int status2;
+
+ dev->stats.tx_errors++;
+
+ status2 = am_readword (dev, hdraddr + 6);
+
+ /*
+ * Clear the error byte
+ */
+ am_writeword (dev, hdraddr + 6, 0);
+
+ if (status2 & TST_RTRY)
+ dev->stats.collisions += 16;
+ if (status2 & TST_LCOL)
+ dev->stats.tx_window_errors++;
+ if (status2 & TST_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if (status2 & TST_UFLO)
+ dev->stats.tx_fifo_errors++;
+ continue;
+ }
+ dev->stats.tx_packets++;
+ len = am_readword (dev, hdraddr + 4);
+ dev->stats.tx_bytes += -len;
+ } while (priv->txtail != priv->txhead);
+
+ netif_wake_queue(dev);
+}
+
+static irqreturn_t
+am79c961_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct dev_priv *priv = netdev_priv(dev);
+ u_int status, n = 100;
+ int handled = 0;
+
+ do {
+ status = read_rreg(dev->base_addr, CSR0);
+ write_rreg(dev->base_addr, CSR0, status &
+ (CSR0_IENA|CSR0_TINT|CSR0_RINT|
+ CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL));
+
+ if (status & CSR0_RINT) {
+ handled = 1;
+ am79c961_rx(dev, priv);
+ }
+ if (status & CSR0_TINT) {
+ handled = 1;
+ am79c961_tx(dev, priv);
+ }
+ if (status & CSR0_MISS) {
+ handled = 1;
+ dev->stats.rx_dropped++;
+ }
+ if (status & CSR0_CERR) {
+ handled = 1;
+ mod_timer(&priv->timer, jiffies);
+ }
+ } while (--n && status & (CSR0_RINT | CSR0_TINT));
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void am79c961_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ am79c961_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Initialise the chip. Note that we always expect
+ * to be entered with interrupts enabled.
+ */
+static int
+am79c961_hw_init(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->chip_lock);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
+ spin_unlock_irq(&priv->chip_lock);
+
+ am79c961_ramtest(dev, 0x66);
+ am79c961_ramtest(dev, 0x99);
+
+ return 0;
+}
+
+static void __init am79c961_banner(void)
+{
+ static unsigned version_printed;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+static const struct net_device_ops am79c961_netdev_ops = {
+ .ndo_open = am79c961_open,
+ .ndo_stop = am79c961_close,
+ .ndo_start_xmit = am79c961_sendpacket,
+ .ndo_set_rx_mode = am79c961_setmulticastlist,
+ .ndo_tx_timeout = am79c961_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = am79c961_poll_controller,
+#endif
+};
+
+static int am79c961_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct net_device *dev;
+ struct dev_priv *priv;
+ int i, ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ ret = -ENOMEM;
+ if (!dev)
+ goto out;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ priv = netdev_priv(dev);
+
+ /*
+ * Fixed address and IRQ lines here.
+ * The PNP initialisation should have been
+ * done by the ether bootp loader.
+ */
+ dev->base_addr = res->start;
+ ret = platform_get_irq(pdev, 0);
+
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto nodev;
+ }
+ dev->irq = ret;
+
+ ret = -ENODEV;
+ if (!request_region(dev->base_addr, 0x18, dev->name))
+ goto nodev;
+
+ /*
+ * Reset the device.
+ */
+ inb(dev->base_addr + NET_RESET);
+ udelay(5);
+
+ /*
+ * Check the manufacturer part of the
+ * ether address.
+ */
+ if (inb(dev->base_addr) != 0x08 ||
+ inb(dev->base_addr + 2) != 0x00 ||
+ inb(dev->base_addr + 4) != 0x2b)
+ goto release;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
+
+ am79c961_banner();
+
+ spin_lock_init(&priv->chip_lock);
+ priv->dev = dev;
+ timer_setup(&priv->timer, am79c961_timer, 0);
+
+ if (am79c961_hw_init(dev))
+ goto release;
+
+ dev->netdev_ops = &am79c961_netdev_ops;
+
+ ret = register_netdev(dev);
+ if (ret == 0) {
+ printk(KERN_INFO "%s: ether address %pM\n",
+ dev->name, dev->dev_addr);
+ return 0;
+ }
+
+release:
+ release_region(dev->base_addr, 0x18);
+nodev:
+ free_netdev(dev);
+out:
+ return ret;
+}
+
+static struct platform_driver am79c961_driver = {
+ .probe = am79c961_probe,
+ .driver = {
+ .name = "am79c961",
+ },
+};
+
+static int __init am79c961_init(void)
+{
+ return platform_driver_register(&am79c961_driver);
+}
+
+__initcall(am79c961_init);
diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h
new file mode 100644
index 000000000..73679e053
--- /dev/null
+++ b/drivers/net/ethernet/amd/am79c961a.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/drivers/net/ethernet/amd/am79c961a.h
+ */
+
+#ifndef _LINUX_am79c961a_H
+#define _LINUX_am79c961a_H
+
+/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
+#define DEBUG_TX 2
+#define DEBUG_RX 4
+#define DEBUG_INT 8
+#define DEBUG_IC 16
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+#define NET_UID 0
+#define NET_RDP 0x10
+#define NET_RAP 0x12
+#define NET_RESET 0x14
+#define NET_IDP 0x16
+
+/*
+ * RAP registers
+ */
+#define CSR0 0
+#define CSR0_INIT 0x0001
+#define CSR0_STRT 0x0002
+#define CSR0_STOP 0x0004
+#define CSR0_TDMD 0x0008
+#define CSR0_TXON 0x0010
+#define CSR0_RXON 0x0020
+#define CSR0_IENA 0x0040
+#define CSR0_INTR 0x0080
+#define CSR0_IDON 0x0100
+#define CSR0_TINT 0x0200
+#define CSR0_RINT 0x0400
+#define CSR0_MERR 0x0800
+#define CSR0_MISS 0x1000
+#define CSR0_CERR 0x2000
+#define CSR0_BABL 0x4000
+#define CSR0_ERR 0x8000
+
+#define CSR3 3
+#define CSR3_EMBA 0x0008
+#define CSR3_DXMT2PD 0x0010
+#define CSR3_LAPPEN 0x0020
+#define CSR3_DXSUFLO 0x0040
+#define CSR3_IDONM 0x0100
+#define CSR3_TINTM 0x0200
+#define CSR3_RINTM 0x0400
+#define CSR3_MERRM 0x0800
+#define CSR3_MISSM 0x1000
+#define CSR3_BABLM 0x4000
+#define CSR3_MASKALL 0x5F00
+
+#define CSR4 4
+#define CSR4_JABM 0x0001
+#define CSR4_JAB 0x0002
+#define CSR4_TXSTRTM 0x0004
+#define CSR4_TXSTRT 0x0008
+#define CSR4_RCVCCOM 0x0010
+#define CSR4_RCVCCO 0x0020
+#define CSR4_MFCOM 0x0100
+#define CSR4_MFCO 0x0200
+#define CSR4_ASTRP_RCV 0x0400
+#define CSR4_APAD_XMIT 0x0800
+
+#define CTRL1 5
+#define CTRL1_SPND 0x0001
+
+#define LADRL 8
+#define LADRM1 9
+#define LADRM2 10
+#define LADRH 11
+#define PADRL 12
+#define PADRM 13
+#define PADRH 14
+
+#define MODE 15
+#define MODE_DISRX 0x0001
+#define MODE_DISTX 0x0002
+#define MODE_LOOP 0x0004
+#define MODE_DTCRC 0x0008
+#define MODE_COLL 0x0010
+#define MODE_DRETRY 0x0020
+#define MODE_INTLOOP 0x0040
+#define MODE_PORT_AUI 0x0000
+#define MODE_PORT_10BT 0x0080
+#define MODE_DRXPA 0x2000
+#define MODE_DRXBA 0x4000
+#define MODE_PROMISC 0x8000
+
+#define BASERXL 24
+#define BASERXH 25
+#define BASETXL 30
+#define BASETXH 31
+
+#define POLLINT 47
+
+#define SIZERXR 76
+#define SIZETXR 78
+
+#define CSR_MFC 112
+
+#define RMD_ENP 0x0100
+#define RMD_STP 0x0200
+#define RMD_CRC 0x0800
+#define RMD_FRAM 0x2000
+#define RMD_ERR 0x4000
+#define RMD_OWN 0x8000
+
+#define TMD_ENP 0x0100
+#define TMD_STP 0x0200
+#define TMD_MORE 0x1000
+#define TMD_ERR 0x4000
+#define TMD_OWN 0x8000
+
+#define TST_RTRY 0x0400
+#define TST_LCAR 0x0800
+#define TST_LCOL 0x1000
+#define TST_UFLO 0x4000
+#define TST_BUFF 0x8000
+
+#define ISALED0 0x0004
+#define ISALED0_LNKST 0x8000
+
+struct dev_priv {
+ unsigned long rxbuffer[RX_BUFFERS];
+ unsigned long txbuffer[TX_BUFFERS];
+ unsigned char txhead;
+ unsigned char txtail;
+ unsigned char rxhead;
+ unsigned char rxtail;
+ unsigned long rxhdr;
+ unsigned long txhdr;
+ spinlock_t chip_lock;
+ struct timer_list timer;
+ struct net_device *dev;
+};
+
+#endif
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
new file mode 100644
index 000000000..960d483e8
--- /dev/null
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -0,0 +1,1929 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
+ * Copyright (C) 2004 Advanced Micro Devices
+ *
+ * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
+ * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
+ * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
+ * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.[ pcnet32.c ]
+ * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+
+Module Name:
+
+ amd8111e.c
+
+Abstract:
+
+ AMD8111 based 10/100 Ethernet Controller Driver.
+
+Environment:
+
+ Kernel Mode
+
+Revision History:
+ 3.0.0
+ Initial Revision.
+ 3.0.1
+ 1. Dynamic interrupt coalescing.
+ 2. Removed prev_stats.
+ 3. MII support.
+ 4. Dynamic IPG support
+ 3.0.2 05/29/2003
+ 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
+ 2. Bug fix: Fixed VLAN support failure.
+ 3. Bug fix: Fixed receive interrupt coalescing bug.
+ 4. Dynamic IPG support is disabled by default.
+ 3.0.3 06/05/2003
+ 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
+ 3.0.4 12/09/2003
+ 1. Added set_mac_address routine for bonding driver support.
+ 2. Tested the driver for bonding support
+ 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
+ indicated to the h/w.
+ 4. Modified amd8111e_rx() routine to receive all the received packets
+ in the first interrupt.
+ 5. Bug fix: Corrected rx_errors reported in get_stats() function.
+ 3.0.5 03/22/2004
+ 1. Added NAPI support
+
+*/
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/ctype.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+#define AMD8111E_VLAN_TAG_USED 1
+#else
+#define AMD8111E_VLAN_TAG_USED 0
+#endif
+
+#include "amd8111e.h"
+#define MODULE_NAME "amd8111e"
+MODULE_AUTHOR("Advanced Micro Devices, Inc.");
+MODULE_DESCRIPTION("AMD8111 based 10/100 Ethernet Controller.");
+MODULE_LICENSE("GPL");
+module_param_array(speed_duplex, int, NULL, 0);
+MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
+module_param_array(coalesce, bool, NULL, 0);
+MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
+module_param_array(dynamic_ipg, bool, NULL, 0);
+MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
+
+/* This function will read the PHY registers. */
+static int amd8111e_read_phy(struct amd8111e_priv *lp,
+ int phy_id, int reg, u32 *val)
+{
+ void __iomem *mmio = lp->mmio;
+ unsigned int reg_val;
+ unsigned int repeat= REPEAT_CNT;
+
+ reg_val = readl(mmio + PHY_ACCESS);
+ while (reg_val & PHY_CMD_ACTIVE)
+ reg_val = readl( mmio + PHY_ACCESS );
+
+ writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
+ ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
+ do{
+ reg_val = readl(mmio + PHY_ACCESS);
+ udelay(30); /* It takes 30 us to read/write data */
+ } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
+ if(reg_val & PHY_RD_ERR)
+ goto err_phy_read;
+
+ *val = reg_val & 0xffff;
+ return 0;
+err_phy_read:
+ *val = 0;
+ return -EINVAL;
+
+}
+
+/* This function will write into PHY registers. */
+static int amd8111e_write_phy(struct amd8111e_priv *lp,
+ int phy_id, int reg, u32 val)
+{
+ unsigned int repeat = REPEAT_CNT;
+ void __iomem *mmio = lp->mmio;
+ unsigned int reg_val;
+
+ reg_val = readl(mmio + PHY_ACCESS);
+ while (reg_val & PHY_CMD_ACTIVE)
+ reg_val = readl( mmio + PHY_ACCESS );
+
+ writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
+ ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
+
+ do{
+ reg_val = readl(mmio + PHY_ACCESS);
+ udelay(30); /* It takes 30 us to read/write the data */
+ } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
+
+ if(reg_val & PHY_RD_ERR)
+ goto err_phy_write;
+
+ return 0;
+
+err_phy_write:
+ return -EINVAL;
+
+}
+
+/* This is the mii register read function provided to the mii interface. */
+static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ unsigned int reg_val;
+
+ amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
+ return reg_val;
+
+}
+
+/* This is the mii register write function provided to the mii interface. */
+static void amd8111e_mdio_write(struct net_device *dev,
+ int phy_id, int reg_num, int val)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ amd8111e_write_phy(lp, phy_id, reg_num, val);
+}
+
+/* This function will set PHY speed. During initialization sets
+ * the original speed to 100 full
+ */
+static void amd8111e_set_ext_phy(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ u32 bmcr,advert,tmp;
+
+ /* Determine mii register values to set the speed */
+ advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ switch (lp->ext_phy_option){
+
+ default:
+ case SPEED_AUTONEG: /* advertise all values */
+ tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
+ ADVERTISE_100HALF|ADVERTISE_100FULL) ;
+ break;
+ case SPEED10_HALF:
+ tmp |= ADVERTISE_10HALF;
+ break;
+ case SPEED10_FULL:
+ tmp |= ADVERTISE_10FULL;
+ break;
+ case SPEED100_HALF:
+ tmp |= ADVERTISE_100HALF;
+ break;
+ case SPEED100_FULL:
+ tmp |= ADVERTISE_100FULL;
+ break;
+ }
+
+ if(advert != tmp)
+ amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
+ /* Restart auto negotiation */
+ bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
+
+}
+
+/* This function will unmap skb->data space and will free
+ * all transmit and receive skbuffs.
+ */
+static int amd8111e_free_skbs(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct sk_buff *rx_skbuff;
+ int i;
+
+ /* Freeing transmit skbs */
+ for(i = 0; i < NUM_TX_BUFFERS; i++){
+ if(lp->tx_skbuff[i]){
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len, DMA_TO_DEVICE);
+ dev_kfree_skb (lp->tx_skbuff[i]);
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+ }
+ /* Freeing previously allocated receive buffers */
+ for (i = 0; i < NUM_RX_BUFFERS; i++){
+ rx_skbuff = lp->rx_skbuff[i];
+ if(rx_skbuff != NULL){
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->rx_dma_addr[i],
+ lp->rx_buff_len - 2, DMA_FROM_DEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+/* This will set the receive buffer length corresponding
+ * to the mtu size of networkinterface.
+ */
+static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ unsigned int mtu = dev->mtu;
+
+ if (mtu > ETH_DATA_LEN){
+ /* MTU + ethernet header + FCS
+ * + optional VLAN tag + skb reserve space 2
+ */
+ lp->rx_buff_len = mtu + ETH_HLEN + 10;
+ lp->options |= OPTION_JUMBO_ENABLE;
+ } else{
+ lp->rx_buff_len = PKT_BUFF_SZ;
+ lp->options &= ~OPTION_JUMBO_ENABLE;
+ }
+}
+
+/* This function will free all the previously allocated buffers,
+ * determine new receive buffer length and will allocate new receive buffers.
+ * This function also allocates and initializes both the transmitter
+ * and receive hardware descriptors.
+ */
+static int amd8111e_init_ring(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+
+ lp->rx_idx = lp->tx_idx = 0;
+ lp->tx_complete_idx = 0;
+ lp->tx_ring_idx = 0;
+
+
+ if(lp->opened)
+ /* Free previously allocated transmit and receive skbs */
+ amd8111e_free_skbs(dev);
+
+ else{
+ /* allocate the tx and rx descriptors */
+ lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
+ sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
+ &lp->tx_ring_dma_addr, GFP_ATOMIC);
+ if (!lp->tx_ring)
+ goto err_no_mem;
+
+ lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
+ sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
+ &lp->rx_ring_dma_addr, GFP_ATOMIC);
+ if (!lp->rx_ring)
+ goto err_free_tx_ring;
+ }
+
+ /* Set new receive buff size */
+ amd8111e_set_rx_buff_len(dev);
+
+ /* Allocating receive skbs */
+ for (i = 0; i < NUM_RX_BUFFERS; i++) {
+
+ lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
+ if (!lp->rx_skbuff[i]) {
+ /* Release previos allocated skbs */
+ for(--i; i >= 0 ;i--)
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ goto err_free_rx_ring;
+ }
+ skb_reserve(lp->rx_skbuff[i],2);
+ }
+ /* Initilaizing receive descriptors */
+ for (i = 0; i < NUM_RX_BUFFERS; i++) {
+ lp->rx_dma_addr[i] = dma_map_single(&lp->pci_dev->dev,
+ lp->rx_skbuff[i]->data,
+ lp->rx_buff_len - 2,
+ DMA_FROM_DEVICE);
+
+ lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
+ wmb();
+ lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
+ }
+
+ /* Initializing transmit descriptors */
+ for (i = 0; i < NUM_TX_RING_DR; i++) {
+ lp->tx_ring[i].buff_phy_addr = 0;
+ lp->tx_ring[i].tx_flags = 0;
+ lp->tx_ring[i].buff_count = 0;
+ }
+
+ return 0;
+
+err_free_rx_ring:
+
+ dma_free_coherent(&lp->pci_dev->dev,
+ sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
+ lp->rx_ring, lp->rx_ring_dma_addr);
+
+err_free_tx_ring:
+
+ dma_free_coherent(&lp->pci_dev->dev,
+ sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
+ lp->tx_ring, lp->tx_ring_dma_addr);
+
+err_no_mem:
+ return -ENOMEM;
+}
+
+/* This function will set the interrupt coalescing according
+ * to the input arguments
+ */
+static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
+{
+ unsigned int timeout;
+ unsigned int event_count;
+
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
+
+
+ switch(cmod)
+ {
+ case RX_INTR_COAL :
+ timeout = coal_conf->rx_timeout;
+ event_count = coal_conf->rx_event_count;
+ if( timeout > MAX_TIMEOUT ||
+ event_count > MAX_EVENT_COUNT )
+ return -EINVAL;
+
+ timeout = timeout * DELAY_TIMER_CONV;
+ writel(VAL0|STINTEN, mmio+INTEN0);
+ writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
+ mmio+DLY_INT_A);
+ break;
+
+ case TX_INTR_COAL :
+ timeout = coal_conf->tx_timeout;
+ event_count = coal_conf->tx_event_count;
+ if( timeout > MAX_TIMEOUT ||
+ event_count > MAX_EVENT_COUNT )
+ return -EINVAL;
+
+
+ timeout = timeout * DELAY_TIMER_CONV;
+ writel(VAL0|STINTEN,mmio+INTEN0);
+ writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
+ mmio+DLY_INT_B);
+ break;
+
+ case DISABLE_COAL:
+ writel(0,mmio+STVAL);
+ writel(STINTEN, mmio+INTEN0);
+ writel(0, mmio +DLY_INT_B);
+ writel(0, mmio+DLY_INT_A);
+ break;
+ case ENABLE_COAL:
+ /* Start the timer */
+ writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
+ writel(VAL0|STINTEN, mmio+INTEN0);
+ break;
+ default:
+ break;
+
+ }
+ return 0;
+
+}
+
+/* This function initializes the device registers and starts the device. */
+static int amd8111e_restart(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ int i,reg_val;
+
+ /* stop the chip */
+ writel(RUN, mmio + CMD0);
+
+ if(amd8111e_init_ring(dev))
+ return -ENOMEM;
+
+ /* enable the port manager and set auto negotiation always */
+ writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
+ writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
+
+ amd8111e_set_ext_phy(dev);
+
+ /* set control registers */
+ reg_val = readl(mmio + CTRL1);
+ reg_val &= ~XMTSP_MASK;
+ writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
+
+ /* enable interrupt */
+ writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
+ APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
+ SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
+
+ writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
+
+ /* initialize tx and rx ring base addresses */
+ writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
+ writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
+
+ writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
+ writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
+
+ /* set default IPG to 96 */
+ writew((u32)DEFAULT_IPG,mmio+IPG);
+ writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
+
+ if(lp->options & OPTION_JUMBO_ENABLE){
+ writel((u32)VAL2|JUMBO, mmio + CMD3);
+ /* Reset REX_UFLO */
+ writel( REX_UFLO, mmio + CMD2);
+ /* Should not set REX_UFLO for jumbo frames */
+ writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
+ }else{
+ writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
+ writel((u32)JUMBO, mmio + CMD3);
+ }
+
+#if AMD8111E_VLAN_TAG_USED
+ writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
+#endif
+ writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
+
+ /* Setting the MAC address to the device */
+ for (i = 0; i < ETH_ALEN; i++)
+ writeb( dev->dev_addr[i], mmio + PADR + i );
+
+ /* Enable interrupt coalesce */
+ if(lp->options & OPTION_INTR_COAL_ENABLE){
+ netdev_info(dev, "Interrupt Coalescing Enabled.\n");
+ amd8111e_set_coalesce(dev,ENABLE_COAL);
+ }
+
+ /* set RUN bit to start the chip */
+ writel(VAL2 | RDMD0, mmio + CMD0);
+ writel(VAL0 | INTREN | RUN, mmio + CMD0);
+
+ /* To avoid PCI posting bug */
+ readl(mmio+CMD0);
+ return 0;
+}
+
+/* This function clears necessary the device registers. */
+static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
+{
+ unsigned int reg_val;
+ unsigned int logic_filter[2] ={0,};
+ void __iomem *mmio = lp->mmio;
+
+
+ /* stop the chip */
+ writel(RUN, mmio + CMD0);
+
+ /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
+ writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
+
+ /* Clear RCV_RING_BASE_ADDR */
+ writel(0, mmio + RCV_RING_BASE_ADDR0);
+
+ /* Clear XMT_RING_BASE_ADDR */
+ writel(0, mmio + XMT_RING_BASE_ADDR0);
+ writel(0, mmio + XMT_RING_BASE_ADDR1);
+ writel(0, mmio + XMT_RING_BASE_ADDR2);
+ writel(0, mmio + XMT_RING_BASE_ADDR3);
+
+ /* Clear CMD0 */
+ writel(CMD0_CLEAR,mmio + CMD0);
+
+ /* Clear CMD2 */
+ writel(CMD2_CLEAR, mmio +CMD2);
+
+ /* Clear CMD7 */
+ writel(CMD7_CLEAR , mmio + CMD7);
+
+ /* Clear DLY_INT_A and DLY_INT_B */
+ writel(0x0, mmio + DLY_INT_A);
+ writel(0x0, mmio + DLY_INT_B);
+
+ /* Clear FLOW_CONTROL */
+ writel(0x0, mmio + FLOW_CONTROL);
+
+ /* Clear INT0 write 1 to clear register */
+ reg_val = readl(mmio + INT0);
+ writel(reg_val, mmio + INT0);
+
+ /* Clear STVAL */
+ writel(0x0, mmio + STVAL);
+
+ /* Clear INTEN0 */
+ writel( INTEN0_CLEAR, mmio + INTEN0);
+
+ /* Clear LADRF */
+ writel(0x0 , mmio + LADRF);
+
+ /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
+ writel( 0x80010,mmio + SRAM_SIZE);
+
+ /* Clear RCV_RING0_LEN */
+ writel(0x0, mmio + RCV_RING_LEN0);
+
+ /* Clear XMT_RING0/1/2/3_LEN */
+ writel(0x0, mmio + XMT_RING_LEN0);
+ writel(0x0, mmio + XMT_RING_LEN1);
+ writel(0x0, mmio + XMT_RING_LEN2);
+ writel(0x0, mmio + XMT_RING_LEN3);
+
+ /* Clear XMT_RING_LIMIT */
+ writel(0x0, mmio + XMT_RING_LIMIT);
+
+ /* Clear MIB */
+ writew(MIB_CLEAR, mmio + MIB_ADDR);
+
+ /* Clear LARF */
+ amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF);
+
+ /* SRAM_SIZE register */
+ reg_val = readl(mmio + SRAM_SIZE);
+
+ if(lp->options & OPTION_JUMBO_ENABLE)
+ writel( VAL2|JUMBO, mmio + CMD3);
+#if AMD8111E_VLAN_TAG_USED
+ writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
+#endif
+ /* Set default value to CTRL1 Register */
+ writel(CTRL1_DEFAULT, mmio + CTRL1);
+
+ /* To avoid PCI posting bug */
+ readl(mmio + CMD2);
+
+}
+
+/* This function disables the interrupt and clears all the pending
+ * interrupts in INT0
+ */
+static void amd8111e_disable_interrupt(struct amd8111e_priv *lp)
+{
+ u32 intr0;
+
+ /* Disable interrupt */
+ writel(INTREN, lp->mmio + CMD0);
+
+ /* Clear INT0 */
+ intr0 = readl(lp->mmio + INT0);
+ writel(intr0, lp->mmio + INT0);
+
+ /* To avoid PCI posting bug */
+ readl(lp->mmio + INT0);
+
+}
+
+/* This function stops the chip. */
+static void amd8111e_stop_chip(struct amd8111e_priv *lp)
+{
+ writel(RUN, lp->mmio + CMD0);
+
+ /* To avoid PCI posting bug */
+ readl(lp->mmio + CMD0);
+}
+
+/* This function frees the transmiter and receiver descriptor rings. */
+static void amd8111e_free_ring(struct amd8111e_priv *lp)
+{
+ /* Free transmit and receive descriptor rings */
+ if(lp->rx_ring){
+ dma_free_coherent(&lp->pci_dev->dev,
+ sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
+ lp->rx_ring, lp->rx_ring_dma_addr);
+ lp->rx_ring = NULL;
+ }
+
+ if(lp->tx_ring){
+ dma_free_coherent(&lp->pci_dev->dev,
+ sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
+ lp->tx_ring, lp->tx_ring_dma_addr);
+
+ lp->tx_ring = NULL;
+ }
+
+}
+
+/* This function will free all the transmit skbs that are actually
+ * transmitted by the device. It will check the ownership of the
+ * skb before freeing the skb.
+ */
+static int amd8111e_tx(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int tx_index;
+ int status;
+ /* Complete all the transmit packet */
+ while (lp->tx_complete_idx != lp->tx_idx){
+ tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
+ status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
+
+ if(status & OWN_BIT)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[tx_index].buff_phy_addr = 0;
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[tx_index]) {
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->tx_dma_addr[tx_index],
+ lp->tx_skbuff[tx_index]->len,
+ DMA_TO_DEVICE);
+ dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
+ lp->tx_skbuff[tx_index] = NULL;
+ lp->tx_dma_addr[tx_index] = 0;
+ }
+ lp->tx_complete_idx++;
+ /*COAL update tx coalescing parameters */
+ lp->coal_conf.tx_packets++;
+ lp->coal_conf.tx_bytes +=
+ le16_to_cpu(lp->tx_ring[tx_index].buff_count);
+
+ if (netif_queue_stopped(dev) &&
+ lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
+ /* The ring is no longer full, clear tbusy. */
+ /* lp->tx_full = 0; */
+ netif_wake_queue (dev);
+ }
+ }
+ return 0;
+}
+
+/* This function handles the driver receive operation in polling mode */
+static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
+ struct net_device *dev = lp->amd8111e_net_dev;
+ int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
+ void __iomem *mmio = lp->mmio;
+ struct sk_buff *skb,*new_skb;
+ int min_pkt_len, status;
+ int num_rx_pkt = 0;
+ short pkt_len;
+#if AMD8111E_VLAN_TAG_USED
+ short vtag;
+#endif
+
+ while (num_rx_pkt < budget) {
+ status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
+ if (status & OWN_BIT)
+ break;
+
+ /* There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with
+ * full-sized * buffers it's possible for a
+ * jabber packet to use two buffers, with only
+ * the last correctly noting the error.
+ */
+ if (status & ERR_BIT) {
+ /* resetting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ /* check for STP and ENP */
+ if (!((status & STP_BIT) && (status & ENP_BIT))){
+ /* resetting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
+
+#if AMD8111E_VLAN_TAG_USED
+ vtag = status & TT_MASK;
+ /* MAC will strip vlan tag */
+ if (vtag != 0)
+ min_pkt_len = MIN_PKT_LEN - 4;
+ else
+#endif
+ min_pkt_len = MIN_PKT_LEN;
+
+ if (pkt_len < min_pkt_len) {
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+ new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
+ if (!new_skb) {
+ /* if allocation fail,
+ * ignore that pkt and go to next one
+ */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+
+ skb_reserve(new_skb, 2);
+ skb = lp->rx_skbuff[rx_index];
+ dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[rx_index],
+ lp->rx_buff_len - 2, DMA_FROM_DEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[rx_index] = new_skb;
+ lp->rx_dma_addr[rx_index] = dma_map_single(&lp->pci_dev->dev,
+ new_skb->data,
+ lp->rx_buff_len - 2,
+ DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+#if AMD8111E_VLAN_TAG_USED
+ if (vtag == TT_VLAN_TAGGED){
+ u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ }
+#endif
+ napi_gro_receive(napi, skb);
+ /* COAL update rx coalescing parameters */
+ lp->coal_conf.rx_packets++;
+ lp->coal_conf.rx_bytes += pkt_len;
+ num_rx_pkt++;
+
+err_next_pkt:
+ lp->rx_ring[rx_index].buff_phy_addr
+ = cpu_to_le32(lp->rx_dma_addr[rx_index]);
+ lp->rx_ring[rx_index].buff_count =
+ cpu_to_le16(lp->rx_buff_len-2);
+ wmb();
+ lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
+ rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+ }
+
+ if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
+ unsigned long flags;
+
+ /* Receive descriptor is empty now */
+ spin_lock_irqsave(&lp->lock, flags);
+ writel(VAL0|RINTEN0, mmio + INTEN0);
+ writel(VAL2 | RDMD0, mmio + CMD0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ return num_rx_pkt;
+}
+
+/* This function will indicate the link status to the kernel. */
+static int amd8111e_link_change(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int status0,speed;
+
+ /* read the link change */
+ status0 = readl(lp->mmio + STAT0);
+
+ if(status0 & LINK_STATS){
+ if(status0 & AUTONEG_COMPLETE)
+ lp->link_config.autoneg = AUTONEG_ENABLE;
+ else
+ lp->link_config.autoneg = AUTONEG_DISABLE;
+
+ if(status0 & FULL_DPLX)
+ lp->link_config.duplex = DUPLEX_FULL;
+ else
+ lp->link_config.duplex = DUPLEX_HALF;
+ speed = (status0 & SPEED_MASK) >> 7;
+ if(speed == PHY_SPEED_10)
+ lp->link_config.speed = SPEED_10;
+ else if(speed == PHY_SPEED_100)
+ lp->link_config.speed = SPEED_100;
+
+ netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
+ (lp->link_config.speed == SPEED_100) ?
+ "100" : "10",
+ (lp->link_config.duplex == DUPLEX_FULL) ?
+ "Full" : "Half");
+
+ netif_carrier_on(dev);
+ }
+ else{
+ lp->link_config.speed = SPEED_INVALID;
+ lp->link_config.duplex = DUPLEX_INVALID;
+ lp->link_config.autoneg = AUTONEG_INVALID;
+ netdev_info(dev, "Link is Down.\n");
+ netif_carrier_off(dev);
+ }
+
+ return 0;
+}
+
+/* This function reads the mib counters. */
+static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
+{
+ unsigned int status;
+ unsigned int data;
+ unsigned int repeat = REPEAT_CNT;
+
+ writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
+ do {
+ status = readw(mmio + MIB_ADDR);
+ udelay(2); /* controller takes MAX 2 us to get mib data */
+ }
+ while (--repeat && (status & MIB_CMD_ACTIVE));
+
+ data = readl(mmio + MIB_DATA);
+ return data;
+}
+
+/* This function reads the mib registers and returns the hardware statistics.
+ * It updates previous internal driver statistics with new values.
+ */
+static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ unsigned long flags;
+ struct net_device_stats *new_stats = &dev->stats;
+
+ if (!lp->opened)
+ return new_stats;
+ spin_lock_irqsave (&lp->lock, flags);
+
+ /* stats.rx_packets */
+ new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
+ amd8111e_read_mib(mmio, rcv_multicast_pkts)+
+ amd8111e_read_mib(mmio, rcv_unicast_pkts);
+
+ /* stats.tx_packets */
+ new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
+
+ /*stats.rx_bytes */
+ new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
+
+ /* stats.tx_bytes */
+ new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
+
+ /* stats.rx_errors */
+ /* hw errors + errors driver reported */
+ new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
+ amd8111e_read_mib(mmio, rcv_fragments)+
+ amd8111e_read_mib(mmio, rcv_jabbers)+
+ amd8111e_read_mib(mmio, rcv_alignment_errors)+
+ amd8111e_read_mib(mmio, rcv_fcs_errors)+
+ amd8111e_read_mib(mmio, rcv_miss_pkts)+
+ lp->drv_rx_errors;
+
+ /* stats.tx_errors */
+ new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.rx_dropped*/
+ new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.tx_dropped*/
+ new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.multicast*/
+ new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
+
+ /* stats.collisions*/
+ new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
+
+ /* stats.rx_length_errors*/
+ new_stats->rx_length_errors =
+ amd8111e_read_mib(mmio, rcv_undersize_pkts)+
+ amd8111e_read_mib(mmio, rcv_oversize_pkts);
+
+ /* stats.rx_over_errors*/
+ new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.rx_crc_errors*/
+ new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
+
+ /* stats.rx_frame_errors*/
+ new_stats->rx_frame_errors =
+ amd8111e_read_mib(mmio, rcv_alignment_errors);
+
+ /* stats.rx_fifo_errors */
+ new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.rx_missed_errors */
+ new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.tx_aborted_errors*/
+ new_stats->tx_aborted_errors =
+ amd8111e_read_mib(mmio, xmt_excessive_collision);
+
+ /* stats.tx_carrier_errors*/
+ new_stats->tx_carrier_errors =
+ amd8111e_read_mib(mmio, xmt_loss_carrier);
+
+ /* stats.tx_fifo_errors*/
+ new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.tx_window_errors*/
+ new_stats->tx_window_errors =
+ amd8111e_read_mib(mmio, xmt_late_collision);
+
+ /* Reset the mibs for collecting new statistics */
+ /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ return new_stats;
+}
+
+/* This function recalculate the interrupt coalescing mode on every interrupt
+ * according to the datarate and the packet rate.
+ */
+static int amd8111e_calc_coalesce(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
+ int tx_pkt_rate;
+ int rx_pkt_rate;
+ int tx_data_rate;
+ int rx_data_rate;
+ int rx_pkt_size;
+ int tx_pkt_size;
+
+ tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
+ coal_conf->tx_prev_packets = coal_conf->tx_packets;
+
+ tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
+ coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
+
+ rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
+ coal_conf->rx_prev_packets = coal_conf->rx_packets;
+
+ rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
+ coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
+
+ if(rx_pkt_rate < 800){
+ if(coal_conf->rx_coal_type != NO_COALESCE){
+
+ coal_conf->rx_timeout = 0x0;
+ coal_conf->rx_event_count = 0;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = NO_COALESCE;
+ }
+ }
+ else{
+
+ rx_pkt_size = rx_data_rate/rx_pkt_rate;
+ if (rx_pkt_size < 128){
+ if(coal_conf->rx_coal_type != NO_COALESCE){
+
+ coal_conf->rx_timeout = 0;
+ coal_conf->rx_event_count = 0;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = NO_COALESCE;
+ }
+
+ }
+ else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
+
+ if(coal_conf->rx_coal_type != LOW_COALESCE){
+ coal_conf->rx_timeout = 1;
+ coal_conf->rx_event_count = 4;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = LOW_COALESCE;
+ }
+ }
+ else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
+
+ if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
+ coal_conf->rx_timeout = 1;
+ coal_conf->rx_event_count = 4;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = MEDIUM_COALESCE;
+ }
+
+ }
+ else if(rx_pkt_size >= 1024){
+ if(coal_conf->rx_coal_type != HIGH_COALESCE){
+ coal_conf->rx_timeout = 2;
+ coal_conf->rx_event_count = 3;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = HIGH_COALESCE;
+ }
+ }
+ }
+ /* NOW FOR TX INTR COALESC */
+ if(tx_pkt_rate < 800){
+ if(coal_conf->tx_coal_type != NO_COALESCE){
+
+ coal_conf->tx_timeout = 0x0;
+ coal_conf->tx_event_count = 0;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = NO_COALESCE;
+ }
+ }
+ else{
+
+ tx_pkt_size = tx_data_rate/tx_pkt_rate;
+ if (tx_pkt_size < 128){
+
+ if(coal_conf->tx_coal_type != NO_COALESCE){
+
+ coal_conf->tx_timeout = 0;
+ coal_conf->tx_event_count = 0;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = NO_COALESCE;
+ }
+
+ }
+ else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
+
+ if(coal_conf->tx_coal_type != LOW_COALESCE){
+ coal_conf->tx_timeout = 1;
+ coal_conf->tx_event_count = 2;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = LOW_COALESCE;
+
+ }
+ }
+ else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
+
+ if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
+ coal_conf->tx_timeout = 2;
+ coal_conf->tx_event_count = 5;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = MEDIUM_COALESCE;
+ }
+ } else if (tx_pkt_size >= 1024) {
+ if (coal_conf->tx_coal_type != HIGH_COALESCE) {
+ coal_conf->tx_timeout = 4;
+ coal_conf->tx_event_count = 8;
+ amd8111e_set_coalesce(dev, TX_INTR_COAL);
+ coal_conf->tx_coal_type = HIGH_COALESCE;
+ }
+ }
+ }
+ return 0;
+
+}
+
+/* This is device interrupt function. It handles transmit,
+ * receive,link change and hardware timer interrupts.
+ */
+static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
+{
+
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ unsigned int intr0, intren0;
+ unsigned int handled = 1;
+
+ if(unlikely(dev == NULL))
+ return IRQ_NONE;
+
+ spin_lock(&lp->lock);
+
+ /* disabling interrupt */
+ writel(INTREN, mmio + CMD0);
+
+ /* Read interrupt status */
+ intr0 = readl(mmio + INT0);
+ intren0 = readl(mmio + INTEN0);
+
+ /* Process all the INT event until INTR bit is clear. */
+
+ if (!(intr0 & INTR)){
+ handled = 0;
+ goto err_no_interrupt;
+ }
+
+ /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
+ writel(intr0, mmio + INT0);
+
+ /* Check if Receive Interrupt has occurred. */
+ if (intr0 & RINT0) {
+ if (napi_schedule_prep(&lp->napi)) {
+ /* Disable receive interupts */
+ writel(RINTEN0, mmio + INTEN0);
+ /* Schedule a polling routine */
+ __napi_schedule(&lp->napi);
+ } else if (intren0 & RINTEN0) {
+ netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
+ /* Fix by disable receive interrupts */
+ writel(RINTEN0, mmio + INTEN0);
+ }
+ }
+
+ /* Check if Transmit Interrupt has occurred. */
+ if (intr0 & TINT0)
+ amd8111e_tx(dev);
+
+ /* Check if Link Change Interrupt has occurred. */
+ if (intr0 & LCINT)
+ amd8111e_link_change(dev);
+
+ /* Check if Hardware Timer Interrupt has occurred. */
+ if (intr0 & STINT)
+ amd8111e_calc_coalesce(dev);
+
+err_no_interrupt:
+ writel( VAL0 | INTREN,mmio + CMD0);
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void amd8111e_poll(struct net_device *dev)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ amd8111e_interrupt(0, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+
+/* This function closes the network interface and updates
+ * the statistics so that most recent statistics will be
+ * available after the interface is down.
+ */
+static int amd8111e_close(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ netif_stop_queue(dev);
+
+ napi_disable(&lp->napi);
+
+ spin_lock_irq(&lp->lock);
+
+ amd8111e_disable_interrupt(lp);
+ amd8111e_stop_chip(lp);
+
+ /* Free transmit and receive skbs */
+ amd8111e_free_skbs(lp->amd8111e_net_dev);
+
+ netif_carrier_off(lp->amd8111e_net_dev);
+
+ /* Delete ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ del_timer_sync(&lp->ipg_data.ipg_timer);
+
+ spin_unlock_irq(&lp->lock);
+ free_irq(dev->irq, dev);
+ amd8111e_free_ring(lp);
+
+ /* Update the statistics before closing */
+ amd8111e_get_stats(dev);
+ lp->opened = 0;
+ return 0;
+}
+
+/* This function opens new interface.It requests irq for the device,
+ * initializes the device,buffers and descriptors, and starts the device.
+ */
+static int amd8111e_open(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
+ dev->name, dev))
+ return -EAGAIN;
+
+ napi_enable(&lp->napi);
+
+ spin_lock_irq(&lp->lock);
+
+ amd8111e_init_hw_default(lp);
+
+ if(amd8111e_restart(dev)){
+ spin_unlock_irq(&lp->lock);
+ napi_disable(&lp->napi);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+ return -ENOMEM;
+ }
+ /* Start ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE){
+ add_timer(&lp->ipg_data.ipg_timer);
+ netdev_info(dev, "Dynamic IPG Enabled\n");
+ }
+
+ lp->opened = 1;
+
+ spin_unlock_irq(&lp->lock);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* This function checks if there is any transmit descriptors
+ * available to queue more packet.
+ */
+static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp)
+{
+ int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
+ if (lp->tx_skbuff[tx_index])
+ return -1;
+ else
+ return 0;
+
+}
+
+/* This function will queue the transmit packets to the
+ * descriptors and will trigger the send operation. It also
+ * initializes the transmit descriptors with buffer physical address,
+ * byte count, ownership to hardware etc.
+ */
+static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int tx_index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
+
+ lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
+
+ lp->tx_skbuff[tx_index] = skb;
+ lp->tx_ring[tx_index].tx_flags = 0;
+
+#if AMD8111E_VLAN_TAG_USED
+ if (skb_vlan_tag_present(skb)) {
+ lp->tx_ring[tx_index].tag_ctrl_cmd |=
+ cpu_to_le16(TCC_VLAN_INSERT);
+ lp->tx_ring[tx_index].tag_ctrl_info =
+ cpu_to_le16(skb_vlan_tag_get(skb));
+
+ }
+#endif
+ lp->tx_dma_addr[tx_index] =
+ dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ lp->tx_ring[tx_index].buff_phy_addr =
+ cpu_to_le32(lp->tx_dma_addr[tx_index]);
+
+ /* Set FCS and LTINT bits */
+ wmb();
+ lp->tx_ring[tx_index].tx_flags |=
+ cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
+
+ lp->tx_idx++;
+
+ /* Trigger an immediate send poll. */
+ writel( VAL1 | TDMD0, lp->mmio + CMD0);
+ writel( VAL2 | RDMD0,lp->mmio + CMD0);
+
+ if(amd8111e_tx_queue_avail(lp) < 0){
+ netif_stop_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return NETDEV_TX_OK;
+}
+/* This function returns all the memory mapped registers of the device. */
+static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
+{
+ void __iomem *mmio = lp->mmio;
+ /* Read only necessary registers */
+ buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
+ buf[1] = readl(mmio + XMT_RING_LEN0);
+ buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
+ buf[3] = readl(mmio + RCV_RING_LEN0);
+ buf[4] = readl(mmio + CMD0);
+ buf[5] = readl(mmio + CMD2);
+ buf[6] = readl(mmio + CMD3);
+ buf[7] = readl(mmio + CMD7);
+ buf[8] = readl(mmio + INT0);
+ buf[9] = readl(mmio + INTEN0);
+ buf[10] = readl(mmio + LADRF);
+ buf[11] = readl(mmio + LADRF+4);
+ buf[12] = readl(mmio + STAT0);
+}
+
+
+/* This function sets promiscuos mode, all-multi mode or the multicast address
+ * list to the device.
+ */
+static void amd8111e_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ u32 mc_filter[2] ;
+ int bit_num;
+
+ if(dev->flags & IFF_PROMISC){
+ writel( VAL2 | PROM, lp->mmio + CMD2);
+ return;
+ }
+ else
+ writel( PROM, lp->mmio + CMD2);
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > MAX_FILTER_SIZE) {
+ /* get all multicast packet */
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ lp->options |= OPTION_MULTICAST_ENABLE;
+ amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
+ return;
+ }
+ if (netdev_mc_empty(dev)) {
+ /* get only own packets */
+ mc_filter[1] = mc_filter[0] = 0;
+ lp->options &= ~OPTION_MULTICAST_ENABLE;
+ amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
+ /* disable promiscuous mode */
+ writel(PROM, lp->mmio + CMD2);
+ return;
+ }
+ /* load all the multicast addresses in the logic filter */
+ lp->options |= OPTION_MULTICAST_ENABLE;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
+ mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
+ }
+ amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD2);
+
+}
+
+static void amd8111e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct pci_dev *pci_dev = lp->pci_dev;
+ strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u", chip_version);
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+}
+
+static int amd8111e_get_regs_len(struct net_device *dev)
+{
+ return AMD8111E_REG_DUMP_LEN;
+}
+
+static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ regs->version = 0;
+ amd8111e_read_regs(lp, buf);
+}
+
+static int amd8111e_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ spin_lock_irq(&lp->lock);
+ mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static int amd8111e_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&lp->lock);
+ res = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
+ spin_unlock_irq(&lp->lock);
+ return res;
+}
+
+static int amd8111e_nway_reset(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ return mii_nway_restart(&lp->mii_if);
+}
+
+static u32 amd8111e_get_link(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ return mii_link_ok(&lp->mii_if);
+}
+
+static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ wol_info->supported = WAKE_MAGIC|WAKE_PHY;
+ if (lp->options & OPTION_WOL_ENABLE)
+ wol_info->wolopts = WAKE_MAGIC;
+}
+
+static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
+ return -EINVAL;
+ spin_lock_irq(&lp->lock);
+ if (wol_info->wolopts & WAKE_MAGIC)
+ lp->options |=
+ (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
+ else if(wol_info->wolopts & WAKE_PHY)
+ lp->options |=
+ (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
+ else
+ lp->options &= ~OPTION_WOL_ENABLE;
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static const struct ethtool_ops ops = {
+ .get_drvinfo = amd8111e_get_drvinfo,
+ .get_regs_len = amd8111e_get_regs_len,
+ .get_regs = amd8111e_get_regs,
+ .nway_reset = amd8111e_nway_reset,
+ .get_link = amd8111e_get_link,
+ .get_wol = amd8111e_get_wol,
+ .set_wol = amd8111e_set_wol,
+ .get_link_ksettings = amd8111e_get_link_ksettings,
+ .set_link_ksettings = amd8111e_set_link_ksettings,
+};
+
+/* This function handles all the ethtool ioctls. It gives driver info,
+ * gets/sets driver speed, gets memory mapped register values, forces
+ * auto negotiation, sets/gets WOL options for ethtool application.
+ */
+static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int err;
+ u32 mii_regval;
+
+ switch(cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = lp->ext_phy_addr;
+
+ fallthrough;
+ case SIOCGMIIREG:
+
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_read_phy(lp, data->phy_id,
+ data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
+ spin_unlock_irq(&lp->lock);
+
+ data->val_out = mii_regval;
+ return err;
+
+ case SIOCSMIIREG:
+
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_write_phy(lp, data->phy_id,
+ data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
+ spin_unlock_irq(&lp->lock);
+
+ return err;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+static int amd8111e_set_mac_address(struct net_device *dev, void *p)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+ struct sockaddr *addr = p;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ spin_lock_irq(&lp->lock);
+ /* Setting the MAC address to the device */
+ for (i = 0; i < ETH_ALEN; i++)
+ writeb( dev->dev_addr[i], lp->mmio + PADR + i );
+
+ spin_unlock_irq(&lp->lock);
+
+ return 0;
+}
+
+/* This function changes the mtu of the device. It restarts the device to
+ * initialize the descriptor with new receive buffers.
+ */
+static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev)) {
+ /* new_mtu will be used
+ * when device starts netxt time
+ */
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ spin_lock_irq(&lp->lock);
+
+ /* stop the chip */
+ writel(RUN, lp->mmio + CMD0);
+
+ dev->mtu = new_mtu;
+
+ err = amd8111e_restart(dev);
+ spin_unlock_irq(&lp->lock);
+ if(!err)
+ netif_start_queue(dev);
+ return err;
+}
+
+static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
+{
+ writel( VAL1|MPPLBA, lp->mmio + CMD3);
+ writel( VAL0|MPEN_SW, lp->mmio + CMD7);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD7);
+ return 0;
+}
+
+static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
+{
+
+ /* Adapter is already stoped/suspended/interrupt-disabled */
+ writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD7);
+ return 0;
+}
+
+/* This function is called when a packet transmission fails to complete
+ * within a reasonable period, on the assumption that an interrupt have
+ * failed or the interface is locked up. This function will reinitialize
+ * the hardware.
+ */
+static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int err;
+
+ netdev_err(dev, "transmit timed out, resetting\n");
+
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_restart(dev);
+ spin_unlock_irq(&lp->lock);
+ if(!err)
+ netif_wake_queue(dev);
+}
+
+static int __maybe_unused amd8111e_suspend(struct device *dev_d)
+{
+ struct net_device *dev = dev_get_drvdata(dev_d);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* disable the interrupt */
+ spin_lock_irq(&lp->lock);
+ amd8111e_disable_interrupt(lp);
+ spin_unlock_irq(&lp->lock);
+
+ netif_device_detach(dev);
+
+ /* stop chip */
+ spin_lock_irq(&lp->lock);
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ del_timer_sync(&lp->ipg_data.ipg_timer);
+ amd8111e_stop_chip(lp);
+ spin_unlock_irq(&lp->lock);
+
+ if(lp->options & OPTION_WOL_ENABLE){
+ /* enable wol */
+ if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
+ amd8111e_enable_magicpkt(lp);
+ if(lp->options & OPTION_WAKE_PHY_ENABLE)
+ amd8111e_enable_link_change(lp);
+
+ device_set_wakeup_enable(dev_d, 1);
+
+ }
+ else{
+ device_set_wakeup_enable(dev_d, 0);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused amd8111e_resume(struct device *dev_d)
+{
+ struct net_device *dev = dev_get_drvdata(dev_d);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_attach(dev);
+
+ spin_lock_irq(&lp->lock);
+ amd8111e_restart(dev);
+ /* Restart ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ mod_timer(&lp->ipg_data.ipg_timer,
+ jiffies + IPG_CONVERGE_JIFFIES);
+ spin_unlock_irq(&lp->lock);
+
+ return 0;
+}
+
+static void amd8111e_config_ipg(struct timer_list *t)
+{
+ struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
+ struct ipg_info *ipg_data = &lp->ipg_data;
+ void __iomem *mmio = lp->mmio;
+ unsigned int prev_col_cnt = ipg_data->col_cnt;
+ unsigned int total_col_cnt;
+ unsigned int tmp_ipg;
+
+ if(lp->link_config.duplex == DUPLEX_FULL){
+ ipg_data->ipg = DEFAULT_IPG;
+ return;
+ }
+
+ if(ipg_data->ipg_state == SSTATE){
+
+ if(ipg_data->timer_tick == IPG_STABLE_TIME){
+
+ ipg_data->timer_tick = 0;
+ ipg_data->ipg = MIN_IPG - IPG_STEP;
+ ipg_data->current_ipg = MIN_IPG;
+ ipg_data->diff_col_cnt = 0xFFFFFFFF;
+ ipg_data->ipg_state = CSTATE;
+ }
+ else
+ ipg_data->timer_tick++;
+ }
+
+ if(ipg_data->ipg_state == CSTATE){
+
+ /* Get the current collision count */
+
+ total_col_cnt = ipg_data->col_cnt =
+ amd8111e_read_mib(mmio, xmt_collisions);
+
+ if ((total_col_cnt - prev_col_cnt) <
+ (ipg_data->diff_col_cnt)){
+
+ ipg_data->diff_col_cnt =
+ total_col_cnt - prev_col_cnt ;
+
+ ipg_data->ipg = ipg_data->current_ipg;
+ }
+
+ ipg_data->current_ipg += IPG_STEP;
+
+ if (ipg_data->current_ipg <= MAX_IPG)
+ tmp_ipg = ipg_data->current_ipg;
+ else{
+ tmp_ipg = ipg_data->ipg;
+ ipg_data->ipg_state = SSTATE;
+ }
+ writew((u32)tmp_ipg, mmio + IPG);
+ writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
+ }
+ mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
+ return;
+
+}
+
+static void amd8111e_probe_ext_phy(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0x1e; i >= 0; i--) {
+ u32 id1, id2;
+
+ if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
+ continue;
+ if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
+ continue;
+ lp->ext_phy_id = (id1 << 16) | id2;
+ lp->ext_phy_addr = i;
+ return;
+ }
+ lp->ext_phy_id = 0;
+ lp->ext_phy_addr = 1;
+}
+
+static const struct net_device_ops amd8111e_netdev_ops = {
+ .ndo_open = amd8111e_open,
+ .ndo_stop = amd8111e_close,
+ .ndo_start_xmit = amd8111e_start_xmit,
+ .ndo_tx_timeout = amd8111e_tx_timeout,
+ .ndo_get_stats = amd8111e_get_stats,
+ .ndo_set_rx_mode = amd8111e_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = amd8111e_set_mac_address,
+ .ndo_do_ioctl = amd8111e_ioctl,
+ .ndo_change_mtu = amd8111e_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = amd8111e_poll,
+#endif
+};
+
+static int amd8111e_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err, i;
+ unsigned long reg_addr,reg_len;
+ struct amd8111e_priv *lp;
+ struct net_device *dev;
+
+ err = pci_enable_device(pdev);
+ if(err){
+ dev_err(&pdev->dev, "Cannot enable new PCI device\n");
+ return err;
+ }
+
+ if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
+ dev_err(&pdev->dev, "Cannot find PCI base address\n");
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, MODULE_NAME);
+ if(err){
+ dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
+ goto err_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ /* Find power-management capability. */
+ if (!pdev->pm_cap) {
+ dev_err(&pdev->dev, "No Power Management capability\n");
+ err = -ENODEV;
+ goto err_free_reg;
+ }
+
+ /* Initialize DMA */
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) < 0) {
+ dev_err(&pdev->dev, "DMA not supported\n");
+ err = -ENODEV;
+ goto err_free_reg;
+ }
+
+ reg_addr = pci_resource_start(pdev, 0);
+ reg_len = pci_resource_len(pdev, 0);
+
+ dev = alloc_etherdev(sizeof(struct amd8111e_priv));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_free_reg;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#if AMD8111E_VLAN_TAG_USED
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
+#endif
+
+ lp = netdev_priv(dev);
+ lp->pci_dev = pdev;
+ lp->amd8111e_net_dev = dev;
+ lp->pm_cap = pdev->pm_cap;
+
+ spin_lock_init(&lp->lock);
+
+ lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
+ if (!lp->mmio) {
+ dev_err(&pdev->dev, "Cannot map device registers\n");
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ /* Initializing MAC address */
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = readb(lp->mmio + PADR + i);
+
+ /* Setting user defined parametrs */
+ lp->ext_phy_option = speed_duplex[card_idx];
+ if(coalesce[card_idx])
+ lp->options |= OPTION_INTR_COAL_ENABLE;
+ if(dynamic_ipg[card_idx++])
+ lp->options |= OPTION_DYN_IPG_ENABLE;
+
+
+ /* Initialize driver entry points */
+ dev->netdev_ops = &amd8111e_netdev_ops;
+ dev->ethtool_ops = &ops;
+ dev->irq =pdev->irq;
+ dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
+ dev->min_mtu = AMD8111E_MIN_MTU;
+ dev->max_mtu = AMD8111E_MAX_MTU;
+ netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
+
+#if AMD8111E_VLAN_TAG_USED
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+#endif
+ /* Probe the external PHY */
+ amd8111e_probe_ext_phy(dev);
+
+ /* setting mii default values */
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = amd8111e_mdio_read;
+ lp->mii_if.mdio_write = amd8111e_mdio_write;
+ lp->mii_if.phy_id = lp->ext_phy_addr;
+
+ /* Set receive buffer length and set jumbo option*/
+ amd8111e_set_rx_buff_len(dev);
+
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto err_free_dev;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ /* Initialize software ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE){
+ timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0);
+ lp->ipg_data.ipg_timer.expires = jiffies +
+ IPG_CONVERGE_JIFFIES;
+ lp->ipg_data.ipg = DEFAULT_IPG;
+ lp->ipg_data.ipg_state = CSTATE;
+ }
+
+ /* display driver and device information */
+ chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
+ dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
+ chip_version, dev->dev_addr);
+ if (lp->ext_phy_id)
+ dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
+ lp->ext_phy_id, lp->ext_phy_addr);
+ else
+ dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
+
+ return 0;
+
+err_free_dev:
+ free_netdev(dev);
+
+err_free_reg:
+ pci_release_regions(pdev);
+
+err_disable_pdev:
+ pci_disable_device(pdev);
+ return err;
+
+}
+
+static void amd8111e_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ }
+}
+
+static const struct pci_device_id amd8111e_pci_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD8111E_7462,
+ },
+ {
+ .vendor = 0,
+ }
+};
+MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
+
+static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops, amd8111e_suspend, amd8111e_resume);
+
+static struct pci_driver amd8111e_driver = {
+ .name = MODULE_NAME,
+ .id_table = amd8111e_pci_tbl,
+ .probe = amd8111e_probe_one,
+ .remove = amd8111e_remove_one,
+ .driver.pm = &amd8111e_pm_ops
+};
+
+module_pci_driver(amd8111e_driver);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
new file mode 100644
index 000000000..493f154ec
--- /dev/null
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -0,0 +1,802 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Advanced Micro Devices Inc. AMD8111E Linux Network Driver
+ * Copyright (C) 2003 Advanced Micro Devices
+ *
+
+Module Name:
+
+ amd8111e.h
+
+Abstract:
+
+ AMD8111 based 10/100 Ethernet Controller driver definitions.
+
+Environment:
+
+ Kernel Mode
+
+Revision History:
+ 3.0.0
+ Initial Revision.
+ 3.0.1
+*/
+
+#ifndef _AMD811E_H
+#define _AMD811E_H
+
+/* Command style register access
+
+Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register.
+
+eg., if the value 10011010b is written into the least significant byte of a command style register, bits 1,3 and 4 of the register will be set to 1, and the other bits will not be altered. If the value 00011010b is written into the same byte, bits 1,3 and 4 will be cleared to 0 and the other bits will not be altered.
+
+*/
+
+/* Offset for Memory Mapped Registers. */
+/* 32 bit registers */
+
+#define ASF_STAT 0x00 /* ASF status register */
+#define CHIPID 0x04 /* Chip ID register */
+#define MIB_DATA 0x10 /* MIB data register */
+#define MIB_ADDR 0x14 /* MIB address register */
+#define STAT0 0x30 /* Status0 register */
+#define INT0 0x38 /* Interrupt0 register */
+#define INTEN0 0x40 /* Interrupt0 enable register*/
+#define CMD0 0x48 /* Command0 register */
+#define CMD2 0x50 /* Command2 register */
+#define CMD3 0x54 /* Command3 resiter */
+#define CMD7 0x64 /* Command7 register */
+
+#define CTRL1 0x6C /* Control1 register */
+#define CTRL2 0x70 /* Control2 register */
+
+#define XMT_RING_LIMIT 0x7C /* Transmit ring limit register */
+
+#define AUTOPOLL0 0x88 /* Auto-poll0 register */
+#define AUTOPOLL1 0x8A /* Auto-poll1 register */
+#define AUTOPOLL2 0x8C /* Auto-poll2 register */
+#define AUTOPOLL3 0x8E /* Auto-poll3 register */
+#define AUTOPOLL4 0x90 /* Auto-poll4 register */
+#define AUTOPOLL5 0x92 /* Auto-poll5 register */
+
+#define AP_VALUE 0x98 /* Auto-poll value register */
+#define DLY_INT_A 0xA8 /* Group A delayed interrupt register */
+#define DLY_INT_B 0xAC /* Group B delayed interrupt register */
+
+#define FLOW_CONTROL 0xC8 /* Flow control register */
+#define PHY_ACCESS 0xD0 /* PHY access register */
+
+#define STVAL 0xD8 /* Software timer value register */
+
+#define XMT_RING_BASE_ADDR0 0x100 /* Transmit ring0 base addr register */
+#define XMT_RING_BASE_ADDR1 0x108 /* Transmit ring1 base addr register */
+#define XMT_RING_BASE_ADDR2 0x110 /* Transmit ring2 base addr register */
+#define XMT_RING_BASE_ADDR3 0x118 /* Transmit ring2 base addr register */
+
+#define RCV_RING_BASE_ADDR0 0x120 /* Transmit ring0 base addr register */
+
+#define PMAT0 0x190 /* OnNow pattern register0 */
+#define PMAT1 0x194 /* OnNow pattern register1 */
+
+/* 16bit registers */
+
+#define XMT_RING_LEN0 0x140 /* Transmit Ring0 length register */
+#define XMT_RING_LEN1 0x144 /* Transmit Ring1 length register */
+#define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */
+#define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */
+
+#define RCV_RING_LEN0 0x150 /* Receive Ring0 length register */
+
+#define SRAM_SIZE 0x178 /* SRAM size register */
+#define SRAM_BOUNDARY 0x17A /* SRAM boundary register */
+
+/* 48bit register */
+
+#define PADR 0x160 /* Physical address register */
+
+#define IFS1 0x18C /* Inter-frame spacing Part1 register */
+#define IFS 0x18D /* Inter-frame spacing register */
+#define IPG 0x18E /* Inter-frame gap register */
+/* 64bit register */
+
+#define LADRF 0x168 /* Logical address filter register */
+
+
+/* Register Bit Definitions */
+typedef enum {
+
+ ASF_INIT_DONE = (1 << 1),
+ ASF_INIT_PRESENT = (1 << 0),
+
+}STAT_ASF_BITS;
+
+typedef enum {
+
+ MIB_CMD_ACTIVE = (1 << 15 ),
+ MIB_RD_CMD = (1 << 13 ),
+ MIB_CLEAR = (1 << 12 ),
+ MIB_ADDRESS = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)|
+ (1 << 4) | (1 << 5),
+}MIB_ADDR_BITS;
+
+
+typedef enum {
+
+ PMAT_DET = (1 << 12),
+ MP_DET = (1 << 11),
+ LC_DET = (1 << 10),
+ SPEED_MASK = (1 << 9)|(1 << 8)|(1 << 7),
+ FULL_DPLX = (1 << 6),
+ LINK_STATS = (1 << 5),
+ AUTONEG_COMPLETE = (1 << 4),
+ MIIPD = (1 << 3),
+ RX_SUSPENDED = (1 << 2),
+ TX_SUSPENDED = (1 << 1),
+ RUNNING = (1 << 0),
+
+}STAT0_BITS;
+
+#define PHY_SPEED_10 0x2
+#define PHY_SPEED_100 0x3
+
+/* INT0 0x38, 32bit register */
+typedef enum {
+
+ INTR = (1 << 31),
+ PCSINT = (1 << 28),
+ LCINT = (1 << 27),
+ APINT5 = (1 << 26),
+ APINT4 = (1 << 25),
+ APINT3 = (1 << 24),
+ TINT_SUM = (1 << 23),
+ APINT2 = (1 << 22),
+ APINT1 = (1 << 21),
+ APINT0 = (1 << 20),
+ MIIPDTINT = (1 << 19),
+ MCCINT = (1 << 17),
+ MREINT = (1 << 16),
+ RINT_SUM = (1 << 15),
+ SPNDINT = (1 << 14),
+ MPINT = (1 << 13),
+ SINT = (1 << 12),
+ TINT3 = (1 << 11),
+ TINT2 = (1 << 10),
+ TINT1 = (1 << 9),
+ TINT0 = (1 << 8),
+ UINT = (1 << 7),
+ STINT = (1 << 4),
+ RINT0 = (1 << 0),
+
+}INT0_BITS;
+
+typedef enum {
+
+ VAL3 = (1 << 31), /* VAL bit for byte 3 */
+ VAL2 = (1 << 23), /* VAL bit for byte 2 */
+ VAL1 = (1 << 15), /* VAL bit for byte 1 */
+ VAL0 = (1 << 7), /* VAL bit for byte 0 */
+
+}VAL_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ LCINTEN = (1 << 27),
+ APINT5EN = (1 << 26),
+ APINT4EN = (1 << 25),
+ APINT3EN = (1 << 24),
+ /* VAL2 */
+ APINT2EN = (1 << 22),
+ APINT1EN = (1 << 21),
+ APINT0EN = (1 << 20),
+ MIIPDTINTEN = (1 << 19),
+ MCCIINTEN = (1 << 18),
+ MCCINTEN = (1 << 17),
+ MREINTEN = (1 << 16),
+ /* VAL1 */
+ SPNDINTEN = (1 << 14),
+ MPINTEN = (1 << 13),
+ TINTEN3 = (1 << 11),
+ SINTEN = (1 << 12),
+ TINTEN2 = (1 << 10),
+ TINTEN1 = (1 << 9),
+ TINTEN0 = (1 << 8),
+ /* VAL0 */
+ STINTEN = (1 << 4),
+ RINTEN0 = (1 << 0),
+
+ INTEN0_CLEAR = 0x1F7F7F1F, /* Command style register */
+
+}INTEN0_BITS;
+
+typedef enum {
+ /* VAL2 */
+ RDMD0 = (1 << 16),
+ /* VAL1 */
+ TDMD3 = (1 << 11),
+ TDMD2 = (1 << 10),
+ TDMD1 = (1 << 9),
+ TDMD0 = (1 << 8),
+ /* VAL0 */
+ UINTCMD = (1 << 6),
+ RX_FAST_SPND = (1 << 5),
+ TX_FAST_SPND = (1 << 4),
+ RX_SPND = (1 << 3),
+ TX_SPND = (1 << 2),
+ INTREN = (1 << 1),
+ RUN = (1 << 0),
+
+ CMD0_CLEAR = 0x000F0F7F, /* Command style register */
+
+}CMD0_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ CONDUIT_MODE = (1 << 29),
+ /* VAL2 */
+ RPA = (1 << 19),
+ DRCVPA = (1 << 18),
+ DRCVBC = (1 << 17),
+ PROM = (1 << 16),
+ /* VAL1 */
+ ASTRP_RCV = (1 << 13),
+ RCV_DROP0 = (1 << 12),
+ EMBA = (1 << 11),
+ DXMT2PD = (1 << 10),
+ LTINTEN = (1 << 9),
+ DXMTFCS = (1 << 8),
+ /* VAL0 */
+ APAD_XMT = (1 << 6),
+ DRTY = (1 << 5),
+ INLOOP = (1 << 4),
+ EXLOOP = (1 << 3),
+ REX_RTRY = (1 << 2),
+ REX_UFLO = (1 << 1),
+ REX_LCOL = (1 << 0),
+
+ CMD2_CLEAR = 0x3F7F3F7F, /* Command style register */
+
+}CMD2_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ ASF_INIT_DONE_ALIAS = (1 << 29),
+ /* VAL2 */
+ JUMBO = (1 << 21),
+ VSIZE = (1 << 20),
+ VLONLY = (1 << 19),
+ VL_TAG_DEL = (1 << 18),
+ /* VAL1 */
+ EN_PMGR = (1 << 14),
+ INTLEVEL = (1 << 13),
+ FORCE_FULL_DUPLEX = (1 << 12),
+ FORCE_LINK_STATUS = (1 << 11),
+ APEP = (1 << 10),
+ MPPLBA = (1 << 9),
+ /* VAL0 */
+ RESET_PHY_PULSE = (1 << 2),
+ RESET_PHY = (1 << 1),
+ PHY_RST_POL = (1 << 0),
+
+}CMD3_BITS;
+
+
+typedef enum {
+
+ /* VAL0 */
+ PMAT_SAVE_MATCH = (1 << 4),
+ PMAT_MODE = (1 << 3),
+ MPEN_SW = (1 << 1),
+ LCMODE_SW = (1 << 0),
+
+ CMD7_CLEAR = 0x0000001B /* Command style register */
+
+}CMD7_BITS;
+
+
+typedef enum {
+
+ RESET_PHY_WIDTH = (0xF << 16) | (0xF<< 20), /* 0x00FF0000 */
+ XMTSP_MASK = (1 << 9) | (1 << 8), /* 9:8 */
+ XMTSP_128 = (1 << 9), /* 9 */
+ XMTSP_64 = (1 << 8),
+ CACHE_ALIGN = (1 << 4),
+ BURST_LIMIT_MASK = (0xF << 0 ),
+ CTRL1_DEFAULT = 0x00010111,
+
+}CTRL1_BITS;
+
+typedef enum {
+
+ FMDC_MASK = (1 << 9)|(1 << 8), /* 9:8 */
+ XPHYRST = (1 << 7),
+ XPHYANE = (1 << 6),
+ XPHYFD = (1 << 5),
+ XPHYSP = (1 << 4) | (1 << 3), /* 4:3 */
+ APDW_MASK = (1 << 2) | (1 << 1) | (1 << 0), /* 2:0 */
+
+}CTRL2_BITS;
+
+/* XMT_RING_LIMIT 0x7C, 32bit register */
+typedef enum {
+
+ XMT_RING2_LIMIT = (0xFF << 16), /* 23:16 */
+ XMT_RING1_LIMIT = (0xFF << 8), /* 15:8 */
+ XMT_RING0_LIMIT = (0xFF << 0), /* 7:0 */
+
+}XMT_RING_LIMIT_BITS;
+
+typedef enum {
+
+ AP_REG0_EN = (1 << 15),
+ AP_REG0_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PHY0_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL0_BITS;
+
+/* AUTOPOLL1 0x8A, 16bit register */
+typedef enum {
+
+ AP_REG1_EN = (1 << 15),
+ AP_REG1_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP1 = (1 << 6),
+ AP_PHY1_DFLT = (1 << 5),
+ AP_PHY1_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL1_BITS;
+
+
+typedef enum {
+
+ AP_REG2_EN = (1 << 15),
+ AP_REG2_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP2 = (1 << 6),
+ AP_PHY2_DFLT = (1 << 5),
+ AP_PHY2_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL2_BITS;
+
+typedef enum {
+
+ AP_REG3_EN = (1 << 15),
+ AP_REG3_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP3 = (1 << 6),
+ AP_PHY3_DFLT = (1 << 5),
+ AP_PHY3_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL3_BITS;
+
+
+typedef enum {
+
+ AP_REG4_EN = (1 << 15),
+ AP_REG4_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP4 = (1 << 6),
+ AP_PHY4_DFLT = (1 << 5),
+ AP_PHY4_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL4_BITS;
+
+
+typedef enum {
+
+ AP_REG5_EN = (1 << 15),
+ AP_REG5_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP5 = (1 << 6),
+ AP_PHY5_DFLT = (1 << 5),
+ AP_PHY5_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL5_BITS;
+
+
+
+
+/* AP_VALUE 0x98, 32bit ragister */
+typedef enum {
+
+ AP_VAL_ACTIVE = (1 << 31),
+ AP_VAL_RD_CMD = ( 1 << 29),
+ AP_ADDR = (1 << 18)|(1 << 17)|(1 << 16), /* 18:16 */
+ AP_VAL = (0xF << 0) | (0xF << 4) |( 0xF << 8) |
+ (0xF << 12), /* 15:0 */
+
+}AP_VALUE_BITS;
+
+typedef enum {
+
+ DLY_INT_A_R3 = (1 << 31),
+ DLY_INT_A_R2 = (1 << 30),
+ DLY_INT_A_R1 = (1 << 29),
+ DLY_INT_A_R0 = (1 << 28),
+ DLY_INT_A_T3 = (1 << 27),
+ DLY_INT_A_T2 = (1 << 26),
+ DLY_INT_A_T1 = (1 << 25),
+ DLY_INT_A_T0 = ( 1 << 24),
+ EVENT_COUNT_A = (0xF << 16) | (0x1 << 20),/* 20:16 */
+ MAX_DELAY_TIME_A = (0xF << 0) | (0xF << 4) | (1 << 8)|
+ (1 << 9) | (1 << 10), /* 10:0 */
+
+}DLY_INT_A_BITS;
+
+typedef enum {
+
+ DLY_INT_B_R3 = (1 << 31),
+ DLY_INT_B_R2 = (1 << 30),
+ DLY_INT_B_R1 = (1 << 29),
+ DLY_INT_B_R0 = (1 << 28),
+ DLY_INT_B_T3 = (1 << 27),
+ DLY_INT_B_T2 = (1 << 26),
+ DLY_INT_B_T1 = (1 << 25),
+ DLY_INT_B_T0 = ( 1 << 24),
+ EVENT_COUNT_B = (0xF << 16) | (0x1 << 20),/* 20:16 */
+ MAX_DELAY_TIME_B = (0xF << 0) | (0xF << 4) | (1 << 8)|
+ (1 << 9) | (1 << 10), /* 10:0 */
+}DLY_INT_B_BITS;
+
+
+/* FLOW_CONTROL 0xC8, 32bit register */
+typedef enum {
+
+ PAUSE_LEN_CHG = (1 << 30),
+ FTPE = (1 << 22),
+ FRPE = (1 << 21),
+ NAPA = (1 << 20),
+ NPA = (1 << 19),
+ FIXP = ( 1 << 18),
+ FCCMD = ( 1 << 16),
+ PAUSE_LEN = (0xF << 0) | (0xF << 4) |( 0xF << 8) | (0xF << 12), /* 15:0 */
+
+}FLOW_CONTROL_BITS;
+
+/* PHY_ ACCESS 0xD0, 32bit register */
+typedef enum {
+
+ PHY_CMD_ACTIVE = (1 << 31),
+ PHY_WR_CMD = (1 << 30),
+ PHY_RD_CMD = (1 << 29),
+ PHY_RD_ERR = (1 << 28),
+ PHY_PRE_SUP = (1 << 27),
+ PHY_ADDR = (1 << 21) | (1 << 22) | (1 << 23)|
+ (1 << 24) |(1 << 25),/* 25:21 */
+ PHY_REG_ADDR = (1 << 16) | (1 << 17) | (1 << 18)| (1 << 19) | (1 << 20),/* 20:16 */
+ PHY_DATA = (0xF << 0)|(0xF << 4) |(0xF << 8)|
+ (0xF << 12),/* 15:0 */
+
+}PHY_ACCESS_BITS;
+
+
+/* PMAT0 0x190, 32bit register */
+typedef enum {
+ PMR_ACTIVE = (1 << 31),
+ PMR_WR_CMD = (1 << 30),
+ PMR_RD_CMD = (1 << 29),
+ PMR_BANK = (1 <<28),
+ PMR_ADDR = (0xF << 16)|(1 << 20)|(1 << 21)|
+ (1 << 22),/* 22:16 */
+ PMR_B4 = (0xF << 0) | (0xF << 4),/* 15:0 */
+}PMAT0_BITS;
+
+
+/* PMAT1 0x194, 32bit register */
+typedef enum {
+ PMR_B3 = (0xF << 24) | (0xF <<28),/* 31:24 */
+ PMR_B2 = (0xF << 16) |(0xF << 20),/* 23:16 */
+ PMR_B1 = (0xF << 8) | (0xF <<12), /* 15:8 */
+ PMR_B0 = (0xF << 0)|(0xF << 4),/* 7:0 */
+}PMAT1_BITS;
+
+/************************************************************************/
+/* */
+/* MIB counter definitions */
+/* */
+/************************************************************************/
+
+#define rcv_miss_pkts 0x00
+#define rcv_octets 0x01
+#define rcv_broadcast_pkts 0x02
+#define rcv_multicast_pkts 0x03
+#define rcv_undersize_pkts 0x04
+#define rcv_oversize_pkts 0x05
+#define rcv_fragments 0x06
+#define rcv_jabbers 0x07
+#define rcv_unicast_pkts 0x08
+#define rcv_alignment_errors 0x09
+#define rcv_fcs_errors 0x0A
+#define rcv_good_octets 0x0B
+#define rcv_mac_ctrl 0x0C
+#define rcv_flow_ctrl 0x0D
+#define rcv_pkts_64_octets 0x0E
+#define rcv_pkts_65to127_octets 0x0F
+#define rcv_pkts_128to255_octets 0x10
+#define rcv_pkts_256to511_octets 0x11
+#define rcv_pkts_512to1023_octets 0x12
+#define rcv_pkts_1024to1518_octets 0x13
+#define rcv_unsupported_opcode 0x14
+#define rcv_symbol_errors 0x15
+#define rcv_drop_pkts_ring1 0x16
+#define rcv_drop_pkts_ring2 0x17
+#define rcv_drop_pkts_ring3 0x18
+#define rcv_drop_pkts_ring4 0x19
+#define rcv_jumbo_pkts 0x1A
+
+#define xmt_underrun_pkts 0x20
+#define xmt_octets 0x21
+#define xmt_packets 0x22
+#define xmt_broadcast_pkts 0x23
+#define xmt_multicast_pkts 0x24
+#define xmt_collisions 0x25
+#define xmt_unicast_pkts 0x26
+#define xmt_one_collision 0x27
+#define xmt_multiple_collision 0x28
+#define xmt_deferred_transmit 0x29
+#define xmt_late_collision 0x2A
+#define xmt_excessive_defer 0x2B
+#define xmt_loss_carrier 0x2C
+#define xmt_excessive_collision 0x2D
+#define xmt_back_pressure 0x2E
+#define xmt_flow_ctrl 0x2F
+#define xmt_pkts_64_octets 0x30
+#define xmt_pkts_65to127_octets 0x31
+#define xmt_pkts_128to255_octets 0x32
+#define xmt_pkts_256to511_octets 0x33
+#define xmt_pkts_512to1023_octets 0x34
+#define xmt_pkts_1024to1518_octet 0x35
+#define xmt_oversize_pkts 0x36
+#define xmt_jumbo_pkts 0x37
+
+
+/* Driver definitions */
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD8111E_7462 0x7462
+
+#define MAX_UNITS 8 /* Maximum number of devices possible */
+
+#define NUM_TX_BUFFERS 32 /* Number of transmit buffers */
+#define NUM_RX_BUFFERS 32 /* Number of receive buffers */
+
+#define TX_BUFF_MOD_MASK 31 /* (NUM_TX_BUFFERS -1) */
+#define RX_BUFF_MOD_MASK 31 /* (NUM_RX_BUFFERS -1) */
+
+#define NUM_TX_RING_DR 32
+#define NUM_RX_RING_DR 32
+
+#define TX_RING_DR_MOD_MASK 31 /* (NUM_TX_RING_DR -1) */
+#define RX_RING_DR_MOD_MASK 31 /* (NUM_RX_RING_DR -1) */
+
+#define MAX_FILTER_SIZE 64 /* Maximum multicast address */
+#define AMD8111E_MIN_MTU 60
+#define AMD8111E_MAX_MTU 9000
+
+#define PKT_BUFF_SZ 1536
+#define MIN_PKT_LEN 60
+
+#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */
+#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */
+#define DELAY_TIMER_CONV 50 /* msec to 10 usec conversion.
+ Only 500 usec resolution */
+#define OPTION_VLAN_ENABLE 0x0001
+#define OPTION_JUMBO_ENABLE 0x0002
+#define OPTION_MULTICAST_ENABLE 0x0004
+#define OPTION_WOL_ENABLE 0x0008
+#define OPTION_WAKE_MAGIC_ENABLE 0x0010
+#define OPTION_WAKE_PHY_ENABLE 0x0020
+#define OPTION_INTR_COAL_ENABLE 0x0040
+#define OPTION_DYN_IPG_ENABLE 0x0080
+
+#define PHY_REG_ADDR_MASK 0x1f
+
+/* ipg parameters */
+#define DEFAULT_IPG 0x60
+#define IFS1_DELTA 36
+#define IPG_CONVERGE_JIFFIES (HZ/2)
+#define IPG_STABLE_TIME 5
+#define MIN_IPG 96
+#define MAX_IPG 255
+#define IPG_STEP 16
+#define CSTATE 1
+#define SSTATE 2
+
+/* Assume contoller gets data 10 times the maximum processing time */
+#define REPEAT_CNT 10
+
+/* amd8111e descriptor flag definitions */
+typedef enum {
+
+ OWN_BIT = (1 << 15),
+ ADD_FCS_BIT = (1 << 13),
+ LTINT_BIT = (1 << 12),
+ STP_BIT = (1 << 9),
+ ENP_BIT = (1 << 8),
+ KILL_BIT = (1 << 6),
+ TCC_VLAN_INSERT = (1 << 1),
+ TCC_VLAN_REPLACE = (1 << 1) |( 1<< 0),
+
+}TX_FLAG_BITS;
+
+typedef enum {
+ ERR_BIT = (1 << 14),
+ FRAM_BIT = (1 << 13),
+ OFLO_BIT = (1 << 12),
+ CRC_BIT = (1 << 11),
+ PAM_BIT = (1 << 6),
+ LAFM_BIT = (1 << 5),
+ BAM_BIT = (1 << 4),
+ TT_VLAN_TAGGED = (1 << 3) |(1 << 2),/* 0x000 */
+ TT_PRTY_TAGGED = (1 << 3),/* 0x0008 */
+
+}RX_FLAG_BITS;
+
+#define RESET_RX_FLAGS 0x0000
+#define TT_MASK 0x000c
+#define TCC_MASK 0x0003
+
+/* driver ioctl parameters */
+#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32)
+
+/* amd8111e descriptor format */
+
+struct amd8111e_tx_dr{
+
+ __le16 buff_count; /* Size of the buffer pointed by this descriptor */
+
+ __le16 tx_flags;
+
+ __le16 tag_ctrl_info;
+
+ __le16 tag_ctrl_cmd;
+
+ __le32 buff_phy_addr;
+
+ __le32 reserved;
+};
+
+struct amd8111e_rx_dr{
+
+ __le32 reserved;
+
+ __le16 msg_count; /* Received message len */
+
+ __le16 tag_ctrl_info;
+
+ __le16 buff_count; /* Len of the buffer pointed by descriptor. */
+
+ __le16 rx_flags;
+
+ __le32 buff_phy_addr;
+
+};
+struct amd8111e_link_config{
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+#define AUTONEG_INVALID 0xff
+
+ unsigned long orig_phy_option;
+ u16 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 reserved; /* 32bit alignment */
+};
+
+enum coal_type{
+
+ NO_COALESCE,
+ LOW_COALESCE,
+ MEDIUM_COALESCE,
+ HIGH_COALESCE,
+
+};
+
+enum coal_mode{
+ RX_INTR_COAL,
+ TX_INTR_COAL,
+ DISABLE_COAL,
+ ENABLE_COAL,
+
+};
+#define MAX_TIMEOUT 40
+#define MAX_EVENT_COUNT 31
+struct amd8111e_coalesce_conf{
+
+ unsigned int rx_timeout;
+ unsigned int rx_event_count;
+ unsigned long rx_packets;
+ unsigned long rx_prev_packets;
+ unsigned long rx_bytes;
+ unsigned long rx_prev_bytes;
+ unsigned int rx_coal_type;
+
+ unsigned int tx_timeout;
+ unsigned int tx_event_count;
+ unsigned long tx_packets;
+ unsigned long tx_prev_packets;
+ unsigned long tx_bytes;
+ unsigned long tx_prev_bytes;
+ unsigned int tx_coal_type;
+
+};
+struct ipg_info{
+
+ unsigned int ipg_state;
+ unsigned int ipg;
+ unsigned int current_ipg;
+ unsigned int col_cnt;
+ unsigned int diff_col_cnt;
+ unsigned int timer_tick;
+ unsigned int prev_ipg;
+ struct timer_list ipg_timer;
+};
+
+struct amd8111e_priv{
+
+ struct amd8111e_tx_dr* tx_ring;
+ struct amd8111e_rx_dr* rx_ring;
+ dma_addr_t tx_ring_dma_addr; /* tx descriptor ring base address */
+ dma_addr_t rx_ring_dma_addr; /* rx descriptor ring base address */
+ const char *name;
+ struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */
+ struct net_device* amd8111e_net_dev; /* ptr to associated net_device */
+ /* Transmit and receive skbs */
+ struct sk_buff *tx_skbuff[NUM_TX_BUFFERS];
+ struct sk_buff *rx_skbuff[NUM_RX_BUFFERS];
+ /* Transmit and receive dma mapped addr */
+ dma_addr_t tx_dma_addr[NUM_TX_BUFFERS];
+ dma_addr_t rx_dma_addr[NUM_RX_BUFFERS];
+ /* Reg memory mapped address */
+ void __iomem *mmio;
+
+ struct napi_struct napi;
+
+ spinlock_t lock; /* Guard lock */
+ unsigned long rx_idx, tx_idx; /* The next free ring entry */
+ unsigned long tx_complete_idx;
+ unsigned long tx_ring_complete_idx;
+ unsigned long tx_ring_idx;
+ unsigned int rx_buff_len; /* Buffer length of rx buffers */
+ int options; /* Options enabled/disabled for the device */
+
+ unsigned long ext_phy_option;
+ int ext_phy_addr;
+ u32 ext_phy_id;
+
+ struct amd8111e_link_config link_config;
+ int pm_cap;
+
+ struct net_device *next;
+ int mii;
+ struct mii_if_info mii_if;
+ char opened;
+ unsigned int drv_rx_errors;
+ struct amd8111e_coalesce_conf coal_conf;
+
+ struct ipg_info ipg_data;
+
+};
+
+/* kernel provided writeq does not write 64 bits into the amd8111e device register instead writes only higher 32bits data into lower 32bits of the register.
+BUG? */
+#define amd8111e_writeq(_UlData,_memMap) \
+ writel(*(u32*)(&_UlData), _memMap); \
+ writel(*(u32*)((u8*)(&_UlData)+4), _memMap+4)
+
+/* maps the external speed options to internal value */
+typedef enum {
+ SPEED_AUTONEG,
+ SPEED10_HALF,
+ SPEED10_FULL,
+ SPEED100_HALF,
+ SPEED100_FULL,
+}EXT_PHY_OPTION;
+
+static int card_idx;
+static int speed_duplex[MAX_UNITS] = { 0, };
+static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true };
+static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false };
+static unsigned int chip_version;
+
+#endif /* _AMD8111E_H */
+
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
new file mode 100644
index 000000000..5e0f645f5
--- /dev/null
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -0,0 +1,791 @@
+/*
+ * Amiga Linux/m68k Ariadne Ethernet Driver
+ *
+ * © Copyright 1995-2003 by Geert Uytterhoeven (geert@linux-m68k.org)
+ * Peter De Schrijver (p2@mind.be)
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * MC68230: Parallel Interface/Timer (PI/T)
+ * Motorola Semiconductors, December, 1983
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
+ *
+ * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
+ * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
+ *
+ * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/*#define DEBUG*/
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/zorro.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+#include <asm/irq.h>
+
+#include "ariadne.h"
+
+#ifdef ARIADNE_DEBUG
+int ariadne_debug = ARIADNE_DEBUG;
+#else
+int ariadne_debug = 1;
+#endif
+
+/* Macros to Fix Endianness problems */
+
+/* Swap the Bytes in a WORD */
+#define swapw(x) (((x >> 8) & 0x00ff) | ((x << 8) & 0xff00))
+/* Get the Low BYTE in a WORD */
+#define lowb(x) (x & 0xff)
+/* Get the Swapped High WORD in a LONG */
+#define swhighw(x) ((((x) >> 8) & 0xff00) | (((x) >> 24) & 0x00ff))
+/* Get the Swapped Low WORD in a LONG */
+#define swloww(x) ((((x) << 8) & 0xff00) | (((x) >> 8) & 0x00ff))
+
+/* Transmit/Receive Ring Definitions */
+
+#define TX_RING_SIZE 5
+#define RX_RING_SIZE 16
+
+#define PKT_BUF_SIZE 1520
+
+/* Private Device Data */
+
+struct ariadne_private {
+ volatile struct TDRE *tx_ring[TX_RING_SIZE];
+ volatile struct RDRE *rx_ring[RX_RING_SIZE];
+ volatile u_short *tx_buff[TX_RING_SIZE];
+ volatile u_short *rx_buff[RX_RING_SIZE];
+ int cur_tx, cur_rx; /* The next free ring entry */
+ int dirty_tx; /* The ring entries to be free()ed */
+ char tx_full;
+};
+
+/* Structure Created in the Ariadne's RAM Buffer */
+
+struct lancedata {
+ struct TDRE tx_ring[TX_RING_SIZE];
+ struct RDRE rx_ring[RX_RING_SIZE];
+ u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
+ u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
+};
+
+static void memcpyw(volatile u_short *dest, u_short *src, int len)
+{
+ while (len >= 2) {
+ *(dest++) = *(src++);
+ len -= 2;
+ }
+ if (len == 1)
+ *dest = (*(u_char *)src) << 8;
+}
+
+static void ariadne_init_ring(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start;
+ int i;
+
+ netif_stop_queue(dev);
+
+ priv->tx_full = 0;
+ priv->cur_rx = priv->cur_tx = 0;
+ priv->dirty_tx = 0;
+
+ /* Set up TX Ring */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ volatile struct TDRE *t = &lancedata->tx_ring[i];
+ t->TMD0 = swloww(ARIADNE_RAM +
+ offsetof(struct lancedata, tx_buff[i]));
+ t->TMD1 = swhighw(ARIADNE_RAM +
+ offsetof(struct lancedata, tx_buff[i])) |
+ TF_STP | TF_ENP;
+ t->TMD2 = swapw((u_short)-PKT_BUF_SIZE);
+ t->TMD3 = 0;
+ priv->tx_ring[i] = &lancedata->tx_ring[i];
+ priv->tx_buff[i] = lancedata->tx_buff[i];
+ netdev_dbg(dev, "TX Entry %2d at %p, Buf at %p\n",
+ i, &lancedata->tx_ring[i], lancedata->tx_buff[i]);
+ }
+
+ /* Set up RX Ring */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ volatile struct RDRE *r = &lancedata->rx_ring[i];
+ r->RMD0 = swloww(ARIADNE_RAM +
+ offsetof(struct lancedata, rx_buff[i]));
+ r->RMD1 = swhighw(ARIADNE_RAM +
+ offsetof(struct lancedata, rx_buff[i])) |
+ RF_OWN;
+ r->RMD2 = swapw((u_short)-PKT_BUF_SIZE);
+ r->RMD3 = 0x0000;
+ priv->rx_ring[i] = &lancedata->rx_ring[i];
+ priv->rx_buff[i] = lancedata->rx_buff[i];
+ netdev_dbg(dev, "RX Entry %2d at %p, Buf at %p\n",
+ i, &lancedata->rx_ring[i], lancedata->rx_buff[i]);
+ }
+}
+
+static int ariadne_rx(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ int entry = priv->cur_rx % RX_RING_SIZE;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up */
+ while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) {
+ int status = lowb(priv->rx_ring[entry]->RMD1);
+
+ if (status != (RF_STP | RF_ENP)) { /* There was an error */
+ /* There is a tricky error noted by
+ * John Murphy <murf@perftech.com> to Russ Nelson:
+ * Even with full-sized buffers it's possible for a
+ * jabber packet to use two buffers, with only the
+ * last correctly noting the error
+ */
+ /* Only count a general error at the end of a packet */
+ if (status & RF_ENP)
+ dev->stats.rx_errors++;
+ if (status & RF_FRAM)
+ dev->stats.rx_frame_errors++;
+ if (status & RF_OFLO)
+ dev->stats.rx_over_errors++;
+ if (status & RF_CRC)
+ dev->stats.rx_crc_errors++;
+ if (status & RF_BUFF)
+ dev->stats.rx_fifo_errors++;
+ priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP;
+ } else {
+ /* Malloc up new buffer, compatible with net-3 */
+ short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
+ if (skb == NULL) {
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
+ break;
+
+ if (i > RX_RING_SIZE - 2) {
+ dev->stats.rx_dropped++;
+ priv->rx_ring[entry]->RMD1 |= RF_OWN;
+ priv->cur_rx++;
+ }
+ break;
+ }
+
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, pkt_len); /* Make room */
+ skb_copy_to_linear_data(skb,
+ (const void *)priv->rx_buff[entry],
+ pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data %p len %u\n",
+ ((u_short *)skb->data)[6],
+ skb->data + 6, skb->data,
+ skb->data, skb->len);
+
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+
+ priv->rx_ring[entry]->RMD1 |= RF_OWN;
+ entry = (++priv->cur_rx) % RX_RING_SIZE;
+ }
+
+ priv->cur_rx = priv->cur_rx % RX_RING_SIZE;
+
+ /* We should check that at least two ring entries are free.
+ * If not, we should free one and mark stats->rx_dropped++
+ */
+
+ return 0;
+}
+
+static irqreturn_t ariadne_interrupt(int irq, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ struct ariadne_private *priv;
+ int csr0, boguscnt;
+ int handled = 0;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+
+ if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
+ return IRQ_NONE; /* generated by the board */
+
+ priv = netdev_priv(dev);
+
+ boguscnt = 10;
+ while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP */
+ lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT);
+
+#ifdef DEBUG
+ if (ariadne_debug > 5) {
+ netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [",
+ csr0, lance->RDP);
+ if (csr0 & INTR)
+ pr_cont(" INTR");
+ if (csr0 & INEA)
+ pr_cont(" INEA");
+ if (csr0 & RXON)
+ pr_cont(" RXON");
+ if (csr0 & TXON)
+ pr_cont(" TXON");
+ if (csr0 & TDMD)
+ pr_cont(" TDMD");
+ if (csr0 & STOP)
+ pr_cont(" STOP");
+ if (csr0 & STRT)
+ pr_cont(" STRT");
+ if (csr0 & INIT)
+ pr_cont(" INIT");
+ if (csr0 & ERR)
+ pr_cont(" ERR");
+ if (csr0 & BABL)
+ pr_cont(" BABL");
+ if (csr0 & CERR)
+ pr_cont(" CERR");
+ if (csr0 & MISS)
+ pr_cont(" MISS");
+ if (csr0 & MERR)
+ pr_cont(" MERR");
+ if (csr0 & RINT)
+ pr_cont(" RINT");
+ if (csr0 & TINT)
+ pr_cont(" TINT");
+ if (csr0 & IDON)
+ pr_cont(" IDON");
+ pr_cont(" ]\n");
+ }
+#endif
+
+ if (csr0 & RINT) { /* Rx interrupt */
+ handled = 1;
+ ariadne_rx(dev);
+ }
+
+ if (csr0 & TINT) { /* Tx-done interrupt */
+ int dirty_tx = priv->dirty_tx;
+
+ handled = 1;
+ while (dirty_tx < priv->cur_tx) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = lowb(priv->tx_ring[entry]->TMD1);
+
+ if (status & TF_OWN)
+ break; /* It still hasn't been Txed */
+
+ priv->tx_ring[entry]->TMD1 &= 0xff00;
+
+ if (status & TF_ERR) {
+ /* There was an major error, log it */
+ int err_status = priv->tx_ring[entry]->TMD3;
+ dev->stats.tx_errors++;
+ if (err_status & EF_RTRY)
+ dev->stats.tx_aborted_errors++;
+ if (err_status & EF_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if (err_status & EF_LCOL)
+ dev->stats.tx_window_errors++;
+ if (err_status & EF_UFLO) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ dev->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ netdev_err(dev, "Tx FIFO error! Status %04x\n",
+ csr0);
+ /* Restart the chip */
+ lance->RDP = STRT;
+ }
+ } else {
+ if (status & (TF_MORE | TF_ONE))
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, priv->cur_tx,
+ priv->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (priv->tx_full && netif_queue_stopped(dev) &&
+ dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full */
+ priv->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ priv->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors */
+ if (csr0 & BABL) {
+ handled = 1;
+ dev->stats.tx_errors++; /* Tx babble */
+ }
+ if (csr0 & MISS) {
+ handled = 1;
+ dev->stats.rx_errors++; /* Missed a Rx frame */
+ }
+ if (csr0 & MERR) {
+ handled = 1;
+ netdev_err(dev, "Bus master arbitration failure, status %04x\n",
+ csr0);
+ /* Restart the chip */
+ lance->RDP = STRT;
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON;
+
+ if (ariadne_debug > 4)
+ netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n",
+ lance->RAP, lance->RDP);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int ariadne_open(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ u_short in;
+ u_long version;
+ int i;
+
+ /* Reset the LANCE */
+ in = lance->Reset;
+
+ /* Stop the LANCE */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP;
+
+ /* Check the LANCE version */
+ lance->RAP = CSR88; /* Chip ID */
+ version = swapw(lance->RDP);
+ lance->RAP = CSR89; /* Chip ID */
+ version |= swapw(lance->RDP) << 16;
+ if ((version & 0x00000fff) != 0x00000003) {
+ pr_warn("Couldn't find AMD Ethernet Chip\n");
+ return -EAGAIN;
+ }
+ if ((version & 0x0ffff000) != 0x00003000) {
+ pr_warn("Couldn't find Am79C960 (Wrong part number = %ld)\n",
+ (version & 0x0ffff000) >> 12);
+ return -EAGAIN;
+ }
+
+ netdev_dbg(dev, "Am79C960 (PCnet-ISA) Revision %ld\n",
+ (version & 0xf0000000) >> 28);
+
+ ariadne_init_ring(dev);
+
+ /* Miscellaneous Stuff */
+ lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR4; /* Test and Features Control */
+ lance->RDP = DPOLL | APAD_XMT | MFCOM | RCVCCOM | TXSTRTM | JABM;
+
+ /* Set the Multicast Table */
+ lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */
+ lance->RDP = 0x0000;
+
+ /* Set the Ethernet Hardware Address */
+ lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
+ lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
+ lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
+
+ /* Set the Init Block Mode */
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = 0x0000;
+
+ /* Set the Transmit Descriptor Ring Pointer */
+ lance->RAP = CSR30; /* Base Address of Transmit Ring */
+ lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
+ lance->RAP = CSR31; /* Base Address of transmit Ring */
+ lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
+
+ /* Set the Receive Descriptor Ring Pointer */
+ lance->RAP = CSR24; /* Base Address of Receive Ring */
+ lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
+ lance->RAP = CSR25; /* Base Address of Receive Ring */
+ lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
+
+ /* Set the Number of RX and TX Ring Entries */
+ lance->RAP = CSR76; /* Receive Ring Length */
+ lance->RDP = swapw(((u_short)-RX_RING_SIZE));
+ lance->RAP = CSR78; /* Transmit Ring Length */
+ lance->RDP = swapw(((u_short)-TX_RING_SIZE));
+
+ /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */
+ lance->RAP = ISACSR2; /* Miscellaneous Configuration */
+ lance->IDP = ASEL;
+
+ /* LED Control */
+ lance->RAP = ISACSR5; /* LED1 Status */
+ lance->IDP = PSE|XMTE;
+ lance->RAP = ISACSR6; /* LED2 Status */
+ lance->IDP = PSE|COLE;
+ lance->RAP = ISACSR7; /* LED3 Status */
+ lance->IDP = PSE|RCVE;
+
+ netif_start_queue(dev);
+
+ i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (i)
+ return i;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | STRT;
+
+ return 0;
+}
+
+static int ariadne_close(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ lance->RAP = CSR112; /* Missed Frame Count */
+ dev->stats.rx_missed_errors = swapw(lance->RDP);
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+
+ if (ariadne_debug > 1) {
+ netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
+ lance->RDP);
+ netdev_dbg(dev, "%lu packets missed\n",
+ dev->stats.rx_missed_errors);
+ }
+
+ /* We stop the LANCE here -- it occasionally polls memory if we don't */
+ lance->RDP = STOP;
+
+ free_irq(IRQ_AMIGA_PORTS, dev);
+
+ return 0;
+}
+
+static inline void ariadne_reset(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP;
+ ariadne_init_ring(dev);
+ lance->RDP = INEA | STRT;
+ netif_start_queue(dev);
+}
+
+static void ariadne_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ netdev_err(dev, "transmit timed out, status %04x, resetting\n",
+ lance->RDP);
+ ariadne_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ int entry;
+ unsigned long flags;
+ int len = skb->len;
+
+#if 0
+ if (ariadne_debug > 3) {
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ netdev_dbg(dev, "%s: csr0 %04x\n", __func__, lance->RDP);
+ lance->RDP = 0x0000;
+ }
+#endif
+
+ /* FIXME: is the 79C960 new enough to do its own padding right ? */
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ len = ETH_ZLEN;
+ }
+
+ /* Fill in a Tx ring entry */
+
+ netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data %p len %u\n",
+ ((u_short *)skb->data)[6],
+ skb->data + 6, skb->data,
+ skb->data, skb->len);
+
+ local_irq_save(flags);
+
+ entry = priv->cur_tx % TX_RING_SIZE;
+
+ /* Caution: the write order is important here, set the base address with
+ the "ownership" bits last */
+
+ priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
+ priv->tx_ring[entry]->TMD3 = 0x0000;
+ memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_DEBUG, "tx_buff: ", DUMP_PREFIX_OFFSET, 16, 1,
+ (void *)priv->tx_buff[entry],
+ skb->len > 64 ? 64 : skb->len, true);
+#endif
+
+ priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00)
+ | TF_OWN | TF_STP | TF_ENP;
+
+ dev_kfree_skb(skb);
+
+ priv->cur_tx++;
+ if ((priv->cur_tx >= TX_RING_SIZE) &&
+ (priv->dirty_tx >= TX_RING_SIZE)) {
+
+ netdev_dbg(dev, "*** Subtracting TX_RING_SIZE from cur_tx (%d) and dirty_tx (%d)\n",
+ priv->cur_tx, priv->dirty_tx);
+
+ priv->cur_tx -= TX_RING_SIZE;
+ priv->dirty_tx -= TX_RING_SIZE;
+ }
+ dev->stats.tx_bytes += len;
+
+ /* Trigger an immediate send poll */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | TDMD;
+
+ if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) {
+ netif_stop_queue(dev);
+ priv->tx_full = 1;
+ }
+ local_irq_restore(flags);
+
+ return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *ariadne_get_stats(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ saved_addr = lance->RAP;
+ lance->RAP = CSR112; /* Missed Frame Count */
+ dev->stats.rx_missed_errors = swapw(lance->RDP);
+ lance->RAP = saved_addr;
+ local_irq_restore(flags);
+
+ return &dev->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+static void set_multicast_list(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ if (!netif_running(dev))
+ return;
+
+ netif_stop_queue(dev);
+
+ /* We take the simple way out and always enable promiscuous mode */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP; /* Temporarily stop the lance */
+ ariadne_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = PROM; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = netdev_mc_count(dev);
+ int i;
+ /* We don't use the multicast table,
+ * but rely on upper-layer filtering
+ */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ lance->RAP = CSR8 + (i << 8);
+ /* Logical Address Filter */
+ lance->RDP = swapw(multicast_table[i]);
+ }
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = 0x0000; /* Unset promiscuous mode */
+ }
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | STRT | IDON;/* Resume normal operation */
+
+ netif_wake_queue(dev);
+}
+
+
+static void ariadne_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960));
+ release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE);
+ free_netdev(dev);
+}
+
+static const struct zorro_device_id ariadne_zorro_tbl[] = {
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
+
+static const struct net_device_ops ariadne_netdev_ops = {
+ .ndo_open = ariadne_open,
+ .ndo_stop = ariadne_close,
+ .ndo_start_xmit = ariadne_start_xmit,
+ .ndo_tx_timeout = ariadne_tx_timeout,
+ .ndo_get_stats = ariadne_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int ariadne_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ unsigned long board = z->resource.start;
+ unsigned long base_addr = board + ARIADNE_LANCE;
+ unsigned long mem_start = board + ARIADNE_RAM;
+ struct resource *r1, *r2;
+ struct net_device *dev;
+ u32 serial;
+ int err;
+
+ r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
+ if (!r1)
+ return -EBUSY;
+ r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
+ if (!r2) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(sizeof(struct ariadne_private));
+ if (dev == NULL) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ release_mem_region(mem_start, ARIADNE_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ r1->name = dev->name;
+ r2->name = dev->name;
+
+ serial = be32_to_cpu(z->rom.er_SerialNumber);
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x60;
+ dev->dev_addr[2] = 0x30;
+ dev->dev_addr[3] = (serial >> 16) & 0xff;
+ dev->dev_addr[4] = (serial >> 8) & 0xff;
+ dev->dev_addr[5] = serial & 0xff;
+ dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
+ dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
+ dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
+
+ dev->netdev_ops = &ariadne_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ;
+
+ err = register_netdev(dev);
+ if (err) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ release_mem_region(mem_start, ARIADNE_RAM_SIZE);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+
+ netdev_info(dev, "Ariadne at 0x%08lx, Ethernet Address %pM\n",
+ board, dev->dev_addr);
+
+ return 0;
+}
+
+static struct zorro_driver ariadne_driver = {
+ .name = "ariadne",
+ .id_table = ariadne_zorro_tbl,
+ .probe = ariadne_init_one,
+ .remove = ariadne_remove_one,
+};
+
+static int __init ariadne_init_module(void)
+{
+ return zorro_register_driver(&ariadne_driver);
+}
+
+static void __exit ariadne_cleanup_module(void)
+{
+ zorro_unregister_driver(&ariadne_driver);
+}
+
+module_init(ariadne_init_module);
+module_exit(ariadne_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ariadne.h b/drivers/net/ethernet/amd/ariadne.h
new file mode 100644
index 000000000..727be5cdd
--- /dev/null
+++ b/drivers/net/ethernet/amd/ariadne.h
@@ -0,0 +1,415 @@
+/*
+ * Amiga Linux/m68k Ariadne Ethernet Driver
+ *
+ * © Copyright 1995 by Geert Uytterhoeven (geert@linux-m68k.org)
+ * Peter De Schrijver
+ * (Peter.DeSchrijver@linux.cc.kuleuven.ac.be)
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * MC68230: Parallel Interface/Timer (PI/T)
+ * Motorola Semiconductors, December, 1983
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
+ *
+ * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
+ * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
+ *
+ * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
+ */
+
+
+ /*
+ * Am79C960 PCnet-ISA
+ */
+
+struct Am79C960 {
+ volatile u_short AddressPROM[8];
+ /* IEEE Address PROM (Unused in the Ariadne) */
+ volatile u_short RDP; /* Register Data Port */
+ volatile u_short RAP; /* Register Address Port */
+ volatile u_short Reset; /* Reset Chip on Read Access */
+ volatile u_short IDP; /* ISACSR Data Port */
+};
+
+
+ /*
+ * Am79C960 Control and Status Registers
+ *
+ * These values are already swap()ed!!
+ *
+ * Only registers marked with a `-' are intended for network software
+ * access
+ */
+
+#define CSR0 0x0000 /* - PCnet-ISA Controller Status */
+#define CSR1 0x0100 /* - IADR[15:0] */
+#define CSR2 0x0200 /* - IADR[23:16] */
+#define CSR3 0x0300 /* - Interrupt Masks and Deferral Control */
+#define CSR4 0x0400 /* - Test and Features Control */
+#define CSR6 0x0600 /* RCV/XMT Descriptor Table Length */
+#define CSR8 0x0800 /* - Logical Address Filter, LADRF[15:0] */
+#define CSR9 0x0900 /* - Logical Address Filter, LADRF[31:16] */
+#define CSR10 0x0a00 /* - Logical Address Filter, LADRF[47:32] */
+#define CSR11 0x0b00 /* - Logical Address Filter, LADRF[63:48] */
+#define CSR12 0x0c00 /* - Physical Address Register, PADR[15:0] */
+#define CSR13 0x0d00 /* - Physical Address Register, PADR[31:16] */
+#define CSR14 0x0e00 /* - Physical Address Register, PADR[47:32] */
+#define CSR15 0x0f00 /* - Mode Register */
+#define CSR16 0x1000 /* Initialization Block Address Lower */
+#define CSR17 0x1100 /* Initialization Block Address Upper */
+#define CSR18 0x1200 /* Current Receive Buffer Address */
+#define CSR19 0x1300 /* Current Receive Buffer Address */
+#define CSR20 0x1400 /* Current Transmit Buffer Address */
+#define CSR21 0x1500 /* Current Transmit Buffer Address */
+#define CSR22 0x1600 /* Next Receive Buffer Address */
+#define CSR23 0x1700 /* Next Receive Buffer Address */
+#define CSR24 0x1800 /* - Base Address of Receive Ring */
+#define CSR25 0x1900 /* - Base Address of Receive Ring */
+#define CSR26 0x1a00 /* Next Receive Descriptor Address */
+#define CSR27 0x1b00 /* Next Receive Descriptor Address */
+#define CSR28 0x1c00 /* Current Receive Descriptor Address */
+#define CSR29 0x1d00 /* Current Receive Descriptor Address */
+#define CSR30 0x1e00 /* - Base Address of Transmit Ring */
+#define CSR31 0x1f00 /* - Base Address of transmit Ring */
+#define CSR32 0x2000 /* Next Transmit Descriptor Address */
+#define CSR33 0x2100 /* Next Transmit Descriptor Address */
+#define CSR34 0x2200 /* Current Transmit Descriptor Address */
+#define CSR35 0x2300 /* Current Transmit Descriptor Address */
+#define CSR36 0x2400 /* Next Next Receive Descriptor Address */
+#define CSR37 0x2500 /* Next Next Receive Descriptor Address */
+#define CSR38 0x2600 /* Next Next Transmit Descriptor Address */
+#define CSR39 0x2700 /* Next Next Transmit Descriptor Address */
+#define CSR40 0x2800 /* Current Receive Status and Byte Count */
+#define CSR41 0x2900 /* Current Receive Status and Byte Count */
+#define CSR42 0x2a00 /* Current Transmit Status and Byte Count */
+#define CSR43 0x2b00 /* Current Transmit Status and Byte Count */
+#define CSR44 0x2c00 /* Next Receive Status and Byte Count */
+#define CSR45 0x2d00 /* Next Receive Status and Byte Count */
+#define CSR46 0x2e00 /* Poll Time Counter */
+#define CSR47 0x2f00 /* Polling Interval */
+#define CSR48 0x3000 /* Temporary Storage */
+#define CSR49 0x3100 /* Temporary Storage */
+#define CSR50 0x3200 /* Temporary Storage */
+#define CSR51 0x3300 /* Temporary Storage */
+#define CSR52 0x3400 /* Temporary Storage */
+#define CSR53 0x3500 /* Temporary Storage */
+#define CSR54 0x3600 /* Temporary Storage */
+#define CSR55 0x3700 /* Temporary Storage */
+#define CSR56 0x3800 /* Temporary Storage */
+#define CSR57 0x3900 /* Temporary Storage */
+#define CSR58 0x3a00 /* Temporary Storage */
+#define CSR59 0x3b00 /* Temporary Storage */
+#define CSR60 0x3c00 /* Previous Transmit Descriptor Address */
+#define CSR61 0x3d00 /* Previous Transmit Descriptor Address */
+#define CSR62 0x3e00 /* Previous Transmit Status and Byte Count */
+#define CSR63 0x3f00 /* Previous Transmit Status and Byte Count */
+#define CSR64 0x4000 /* Next Transmit Buffer Address */
+#define CSR65 0x4100 /* Next Transmit Buffer Address */
+#define CSR66 0x4200 /* Next Transmit Status and Byte Count */
+#define CSR67 0x4300 /* Next Transmit Status and Byte Count */
+#define CSR68 0x4400 /* Transmit Status Temporary Storage */
+#define CSR69 0x4500 /* Transmit Status Temporary Storage */
+#define CSR70 0x4600 /* Temporary Storage */
+#define CSR71 0x4700 /* Temporary Storage */
+#define CSR72 0x4800 /* Receive Ring Counter */
+#define CSR74 0x4a00 /* Transmit Ring Counter */
+#define CSR76 0x4c00 /* - Receive Ring Length */
+#define CSR78 0x4e00 /* - Transmit Ring Length */
+#define CSR80 0x5000 /* - Burst and FIFO Threshold Control */
+#define CSR82 0x5200 /* - Bus Activity Timer */
+#define CSR84 0x5400 /* DMA Address */
+#define CSR85 0x5500 /* DMA Address */
+#define CSR86 0x5600 /* Buffer Byte Counter */
+#define CSR88 0x5800 /* - Chip ID */
+#define CSR89 0x5900 /* - Chip ID */
+#define CSR92 0x5c00 /* Ring Length Conversion */
+#define CSR94 0x5e00 /* Transmit Time Domain Reflectometry Count */
+#define CSR96 0x6000 /* Bus Interface Scratch Register 0 */
+#define CSR97 0x6100 /* Bus Interface Scratch Register 0 */
+#define CSR98 0x6200 /* Bus Interface Scratch Register 1 */
+#define CSR99 0x6300 /* Bus Interface Scratch Register 1 */
+#define CSR104 0x6800 /* SWAP */
+#define CSR105 0x6900 /* SWAP */
+#define CSR108 0x6c00 /* Buffer Management Scratch */
+#define CSR109 0x6d00 /* Buffer Management Scratch */
+#define CSR112 0x7000 /* - Missed Frame Count */
+#define CSR114 0x7200 /* - Receive Collision Count */
+#define CSR124 0x7c00 /* - Buffer Management Unit Test */
+
+
+ /*
+ * Am79C960 ISA Control and Status Registers
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ISACSR0 0x0000 /* Master Mode Read Active */
+#define ISACSR1 0x0100 /* Master Mode Write Active */
+#define ISACSR2 0x0200 /* Miscellaneous Configuration */
+#define ISACSR4 0x0400 /* LED0 Status (Link Integrity) */
+#define ISACSR5 0x0500 /* LED1 Status */
+#define ISACSR6 0x0600 /* LED2 Status */
+#define ISACSR7 0x0700 /* LED3 Status */
+
+
+ /*
+ * Bit definitions for CSR0 (PCnet-ISA Controller Status)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ERR 0x0080 /* Error */
+#define BABL 0x0040 /* Babble: Transmitted too many bits */
+#define CERR 0x0020 /* No Heartbeat (10BASE-T) */
+#define MISS 0x0010 /* Missed Frame */
+#define MERR 0x0008 /* Memory Error */
+#define RINT 0x0004 /* Receive Interrupt */
+#define TINT 0x0002 /* Transmit Interrupt */
+#define IDON 0x0001 /* Initialization Done */
+#define INTR 0x8000 /* Interrupt Flag */
+#define INEA 0x4000 /* Interrupt Enable */
+#define RXON 0x2000 /* Receive On */
+#define TXON 0x1000 /* Transmit On */
+#define TDMD 0x0800 /* Transmit Demand */
+#define STOP 0x0400 /* Stop */
+#define STRT 0x0200 /* Start */
+#define INIT 0x0100 /* Initialize */
+
+
+ /*
+ * Bit definitions for CSR3 (Interrupt Masks and Deferral Control)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define BABLM 0x0040 /* Babble Mask */
+#define MISSM 0x0010 /* Missed Frame Mask */
+#define MERRM 0x0008 /* Memory Error Mask */
+#define RINTM 0x0004 /* Receive Interrupt Mask */
+#define TINTM 0x0002 /* Transmit Interrupt Mask */
+#define IDONM 0x0001 /* Initialization Done Mask */
+#define DXMT2PD 0x1000 /* Disable Transmit Two Part Deferral */
+#define EMBA 0x0800 /* Enable Modified Back-off Algorithm */
+
+
+ /*
+ * Bit definitions for CSR4 (Test and Features Control)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ENTST 0x0080 /* Enable Test Mode */
+#define DMAPLUS 0x0040 /* Disable Burst Transaction Counter */
+#define TIMER 0x0020 /* Timer Enable Register */
+#define DPOLL 0x0010 /* Disable Transmit Polling */
+#define APAD_XMT 0x0008 /* Auto Pad Transmit */
+#define ASTRP_RCV 0x0004 /* Auto Pad Stripping */
+#define MFCO 0x0002 /* Missed Frame Counter Overflow Interrupt */
+#define MFCOM 0x0001 /* Missed Frame Counter Overflow Mask */
+#define RCVCCO 0x2000 /* Receive Collision Counter Overflow Interrupt */
+#define RCVCCOM 0x1000 /* Receive Collision Counter Overflow Mask */
+#define TXSTRT 0x0800 /* Transmit Start Status */
+#define TXSTRTM 0x0400 /* Transmit Start Mask */
+#define JAB 0x0200 /* Jabber Error */
+#define JABM 0x0100 /* Jabber Error Mask */
+
+
+ /*
+ * Bit definitions for CSR15 (Mode Register)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define PROM 0x0080 /* Promiscuous Mode */
+#define DRCVBC 0x0040 /* Disable Receive Broadcast */
+#define DRCVPA 0x0020 /* Disable Receive Physical Address */
+#define DLNKTST 0x0010 /* Disable Link Status */
+#define DAPC 0x0008 /* Disable Automatic Polarity Correction */
+#define MENDECL 0x0004 /* MENDEC Loopback Mode */
+#define LRTTSEL 0x0002 /* Low Receive Threshold/Transmit Mode Select */
+#define PORTSEL1 0x0001 /* Port Select Bits */
+#define PORTSEL2 0x8000 /* Port Select Bits */
+#define INTL 0x4000 /* Internal Loopback */
+#define DRTY 0x2000 /* Disable Retry */
+#define FCOLL 0x1000 /* Force Collision */
+#define DXMTFCS 0x0800 /* Disable Transmit CRC */
+#define LOOP 0x0400 /* Loopback Enable */
+#define DTX 0x0200 /* Disable Transmitter */
+#define DRX 0x0100 /* Disable Receiver */
+
+
+ /*
+ * Bit definitions for ISACSR2 (Miscellaneous Configuration)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ASEL 0x0200 /* Media Interface Port Auto Select */
+
+
+ /*
+ * Bit definitions for ISACSR5-7 (LED1-3 Status)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define LEDOUT 0x0080 /* Current LED Status */
+#define PSE 0x8000 /* Pulse Stretcher Enable */
+#define XMTE 0x1000 /* Enable Transmit Status Signal */
+#define RVPOLE 0x0800 /* Enable Receive Polarity Signal */
+#define RCVE 0x0400 /* Enable Receive Status Signal */
+#define JABE 0x0200 /* Enable Jabber Signal */
+#define COLE 0x0100 /* Enable Collision Signal */
+
+
+ /*
+ * Receive Descriptor Ring Entry
+ */
+
+struct RDRE {
+ volatile u_short RMD0; /* LADR[15:0] */
+ volatile u_short RMD1; /* HADR[23:16] | Receive Flags */
+ volatile u_short RMD2; /* Buffer Byte Count (two's complement) */
+ volatile u_short RMD3; /* Message Byte Count */
+};
+
+
+ /*
+ * Transmit Descriptor Ring Entry
+ */
+
+struct TDRE {
+ volatile u_short TMD0; /* LADR[15:0] */
+ volatile u_short TMD1; /* HADR[23:16] | Transmit Flags */
+ volatile u_short TMD2; /* Buffer Byte Count (two's complement) */
+ volatile u_short TMD3; /* Error Flags */
+};
+
+
+ /*
+ * Receive Flags
+ */
+
+#define RF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */
+#define RF_ERR 0x0040 /* Error */
+#define RF_FRAM 0x0020 /* Framing Error */
+#define RF_OFLO 0x0010 /* Overflow Error */
+#define RF_CRC 0x0008 /* CRC Error */
+#define RF_BUFF 0x0004 /* Buffer Error */
+#define RF_STP 0x0002 /* Start of Packet */
+#define RF_ENP 0x0001 /* End of Packet */
+
+
+ /*
+ * Transmit Flags
+ */
+
+#define TF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */
+#define TF_ERR 0x0040 /* Error */
+#define TF_ADD_FCS 0x0020 /* Controls FCS Generation */
+#define TF_MORE 0x0010 /* More than one retry needed */
+#define TF_ONE 0x0008 /* One retry needed */
+#define TF_DEF 0x0004 /* Deferred */
+#define TF_STP 0x0002 /* Start of Packet */
+#define TF_ENP 0x0001 /* End of Packet */
+
+
+ /*
+ * Error Flags
+ */
+
+#define EF_BUFF 0x0080 /* Buffer Error */
+#define EF_UFLO 0x0040 /* Underflow Error */
+#define EF_LCOL 0x0010 /* Late Collision */
+#define EF_LCAR 0x0008 /* Loss of Carrier */
+#define EF_RTRY 0x0004 /* Retry Error */
+#define EF_TDR 0xff03 /* Time Domain Reflectometry */
+
+
+
+ /*
+ * MC68230 Parallel Interface/Timer
+ */
+
+struct MC68230 {
+ volatile u_char PGCR; /* Port General Control Register */
+ u_char Pad1[1];
+ volatile u_char PSRR; /* Port Service Request Register */
+ u_char Pad2[1];
+ volatile u_char PADDR; /* Port A Data Direction Register */
+ u_char Pad3[1];
+ volatile u_char PBDDR; /* Port B Data Direction Register */
+ u_char Pad4[1];
+ volatile u_char PCDDR; /* Port C Data Direction Register */
+ u_char Pad5[1];
+ volatile u_char PIVR; /* Port Interrupt Vector Register */
+ u_char Pad6[1];
+ volatile u_char PACR; /* Port A Control Register */
+ u_char Pad7[1];
+ volatile u_char PBCR; /* Port B Control Register */
+ u_char Pad8[1];
+ volatile u_char PADR; /* Port A Data Register */
+ u_char Pad9[1];
+ volatile u_char PBDR; /* Port B Data Register */
+ u_char Pad10[1];
+ volatile u_char PAAR; /* Port A Alternate Register */
+ u_char Pad11[1];
+ volatile u_char PBAR; /* Port B Alternate Register */
+ u_char Pad12[1];
+ volatile u_char PCDR; /* Port C Data Register */
+ u_char Pad13[1];
+ volatile u_char PSR; /* Port Status Register */
+ u_char Pad14[5];
+ volatile u_char TCR; /* Timer Control Register */
+ u_char Pad15[1];
+ volatile u_char TIVR; /* Timer Interrupt Vector Register */
+ u_char Pad16[3];
+ volatile u_char CPRH; /* Counter Preload Register (High) */
+ u_char Pad17[1];
+ volatile u_char CPRM; /* Counter Preload Register (Mid) */
+ u_char Pad18[1];
+ volatile u_char CPRL; /* Counter Preload Register (Low) */
+ u_char Pad19[3];
+ volatile u_char CNTRH; /* Count Register (High) */
+ u_char Pad20[1];
+ volatile u_char CNTRM; /* Count Register (Mid) */
+ u_char Pad21[1];
+ volatile u_char CNTRL; /* Count Register (Low) */
+ u_char Pad22[1];
+ volatile u_char TSR; /* Timer Status Register */
+ u_char Pad23[11];
+};
+
+
+ /*
+ * Ariadne Expansion Board Structure
+ */
+
+#define ARIADNE_LANCE 0x360
+
+#define ARIADNE_PIT 0x1000
+
+#define ARIADNE_BOOTPROM 0x4000 /* I guess it's here :-) */
+#define ARIADNE_BOOTPROM_SIZE 0x4000
+
+#define ARIADNE_RAM 0x8000 /* Always access WORDs!! */
+#define ARIADNE_RAM_SIZE 0x8000
+
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
new file mode 100644
index 000000000..5e8b72db8
--- /dev/null
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -0,0 +1,1166 @@
+/* atarilance.c: Ethernet driver for VME Lance cards on the Atari */
+/*
+ Written 1995/96 by Roman Hodek (Roman.Hodek@informatik.uni-erlangen.de)
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This drivers was written with the following sources of reference:
+ - The driver for the Riebl Lance card by the TU Vienna.
+ - The modified TUW driver for PAM's VME cards
+ - The PC-Linux driver for Lance cards (but this is for bus master
+ cards, not the shared memory ones)
+ - The Amiga Ariadne driver
+
+ v1.0: (in 1.2.13pl4/0.9.13)
+ Initial version
+ v1.1: (in 1.2.13pl5)
+ more comments
+ deleted some debugging stuff
+ optimized register access (keep AREG pointing to CSR0)
+ following AMD, CSR0_STRT should be set only after IDON is detected
+ use memcpy() for data transfers, that also employs long word moves
+ better probe procedure for 24-bit systems
+ non-VME-RieblCards need extra delays in memcpy
+ must also do write test, since 0xfxe00000 may hit ROM
+ use 8/32 tx/rx buffers, which should give better NFS performance;
+ this is made possible by shifting the last packet buffer after the
+ RieblCard reserved area
+ v1.2: (in 1.2.13pl8)
+ again fixed probing for the Falcon; 0xfe01000 hits phys. 0x00010000
+ and thus RAM, in case of no Lance found all memory contents have to
+ be restored!
+ Now possible to compile as module.
+ v1.3: 03/30/96 Jes Sorensen, Roman (in 1.3)
+ Several little 1.3 adaptions
+ When the lance is stopped it jumps back into little-endian
+ mode. It is therefore necessary to put it back where it
+ belongs, in big endian mode, in order to make things work.
+ This might be the reason why multicast-mode didn't work
+ before, but I'm not able to test it as I only got an Amiga
+ (we had similar problems with the A2065 driver).
+
+*/
+
+static const char version[] = "atarilance.c: v1.3 04/04/96 "
+ "Roman.Hodek@informatik.uni-erlangen.de\n";
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/io.h>
+
+/* Debug level:
+ * 0 = silent, print only serious errors
+ * 1 = normal, print error messages
+ * 2 = debug, print debug infos
+ * 3 = debug, print even more debug infos (packet data)
+ */
+
+#define LANCE_DEBUG 1
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+module_param(lance_debug, int, 0);
+MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
+MODULE_LICENSE("GPL");
+
+/* Print debug messages on probing? */
+#undef LANCE_DEBUG_PROBE
+
+#define DPRINTK(n,a) \
+ do { \
+ if (lance_debug >= n) \
+ printk a; \
+ } while( 0 )
+
+#ifdef LANCE_DEBUG_PROBE
+# define PROBE_PRINT(a) printk a
+#else
+# define PROBE_PRINT(a)
+#endif
+
+/* These define the number of Rx and Tx buffers as log2. (Only powers
+ * of two are valid)
+ * Much more rx buffers (32) are reserved than tx buffers (8), since receiving
+ * is more time critical then sending and packets may have to remain in the
+ * board's memory when main memory is low.
+ */
+
+#define TX_LOG_RING_SIZE 3
+#define RX_LOG_RING_SIZE 5
+
+/* These are the derived values */
+
+#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
+#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
+#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define TX_TIMEOUT (HZ/5)
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short buf_length; /* This length is 2s complement! */
+ volatile short msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short length; /* Length is 2s complement! */
+ volatile short misc;
+};
+
+struct ringdesc {
+ unsigned short adr_lo; /* Low 16 bits of address */
+ unsigned char len; /* Length bits */
+ unsigned char adr_hi; /* High 8 bits of address (unused) */
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode */
+ unsigned char hwaddr[6]; /* Physical ethernet address */
+ unsigned filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with length bits. */
+ struct ringdesc rx_ring;
+ struct ringdesc tx_ring;
+};
+
+/* The whole layout of the Lance shared memory */
+struct lance_memory {
+ struct lance_init_block init;
+ struct lance_tx_head tx_head[TX_RING_SIZE];
+ struct lance_rx_head rx_head[RX_RING_SIZE];
+ char packet_area[]; /* packet data follow after the
+ * init block and the ring
+ * descriptors and are located
+ * at runtime */
+};
+
+/* RieblCard specifics:
+ * The original TOS driver for these cards reserves the area from offset
+ * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the
+ * Ethernet address there, and the magic for verifying the data's validity.
+ * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe
+ * is reserved for the interrupt vector number.
+ */
+#define RIEBL_RSVD_START 0xee70
+#define RIEBL_RSVD_END 0xeec0
+#define RIEBL_MAGIC 0x09051990
+#define RIEBL_MAGIC_ADDR ((unsigned long *)(((char *)MEM) + 0xee8a))
+#define RIEBL_HWADDR_ADDR ((unsigned char *)(((char *)MEM) + 0xee8e))
+#define RIEBL_IVEC_ADDR ((unsigned short *)(((char *)MEM) + 0xfffe))
+
+/* This is a default address for the old RieblCards without a battery
+ * that have no ethernet address at boot time. 00:00:36:04 is the
+ * prefix for Riebl cards, the 00:00 at the end is arbitrary.
+ */
+
+static unsigned char OldRieblDefHwaddr[6] = {
+ 0x00, 0x00, 0x36, 0x04, 0x00, 0x00
+};
+
+
+/* I/O registers of the Lance chip */
+
+struct lance_ioreg {
+/* base+0x0 */ volatile unsigned short data;
+/* base+0x2 */ volatile unsigned short addr;
+ unsigned char _dummy1[3];
+/* base+0x7 */ volatile unsigned char ivec;
+ unsigned char _dummy2[5];
+/* base+0xd */ volatile unsigned char eeprom;
+ unsigned char _dummy3;
+/* base+0xf */ volatile unsigned char mem;
+};
+
+/* Types of boards this driver supports */
+
+enum lance_type {
+ OLD_RIEBL, /* old Riebl card without battery */
+ NEW_RIEBL, /* new Riebl card with battery */
+ PAM_CARD /* PAM card with EEPROM */
+};
+
+static char *lance_names[] = {
+ "Riebl-Card (without battery)",
+ "Riebl-Card (with battery)",
+ "PAM intern card"
+};
+
+/* The driver's private device structure */
+
+struct lance_private {
+ enum lance_type cardtype;
+ struct lance_ioreg *iobase;
+ struct lance_memory *mem;
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_tx; /* Ring entries to be freed. */
+ /* copy function */
+ void *(*memcpy_f)( void *, const void *, size_t );
+/* This must be long for set_bit() */
+ long tx_full;
+ spinlock_t devlock;
+};
+
+/* I/O register access macros */
+
+#define MEM lp->mem
+#define DREG IO->data
+#define AREG IO->addr
+#define REGA(a) (*( AREG = (a), &DREG ))
+
+/* Definitions for packet buffer access: */
+#define PKT_BUF_SZ 1544
+/* Get the address of a packet buffer corresponding to a given buffer head */
+#define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base)
+
+/* Possible memory/IO addresses for probing */
+
+static struct lance_addr {
+ unsigned long memaddr;
+ unsigned long ioaddr;
+ int slow_flag;
+} lance_addr_list[] = {
+ { 0xfe010000, 0xfe00fff0, 0 }, /* RieblCard VME in TT */
+ { 0xffc10000, 0xffc0fff0, 0 }, /* RieblCard VME in MegaSTE
+ (highest byte stripped) */
+ { 0xffe00000, 0xffff7000, 1 }, /* RieblCard in ST
+ (highest byte stripped) */
+ { 0xffd00000, 0xffff7000, 1 }, /* RieblCard in ST with hw modif. to
+ avoid conflict with ROM
+ (highest byte stripped) */
+ { 0xffcf0000, 0xffcffff0, 0 }, /* PAMCard VME in TT and MSTE
+ (highest byte stripped) */
+ { 0xfecf0000, 0xfecffff0, 0 }, /* Rhotron's PAMCard VME in TT and MSTE
+ (highest byte stripped) */
+};
+
+#define N_LANCE_ADDR ARRAY_SIZE(lance_addr_list)
+
+
+/* Definitions for the Lance */
+
+/* tx_head flags */
+#define TMD1_ENP 0x01 /* end of packet */
+#define TMD1_STP 0x02 /* start of packet */
+#define TMD1_DEF 0x04 /* deferred */
+#define TMD1_ONE 0x08 /* one retry needed */
+#define TMD1_MORE 0x10 /* more than one retry needed */
+#define TMD1_ERR 0x40 /* error summary */
+#define TMD1_OWN 0x80 /* ownership (set: chip owns) */
+
+#define TMD1_OWN_CHIP TMD1_OWN
+#define TMD1_OWN_HOST 0
+
+/* tx_head misc field */
+#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
+#define TMD3_RTRY 0x0400 /* failed after 16 retries */
+#define TMD3_LCAR 0x0800 /* carrier lost */
+#define TMD3_LCOL 0x1000 /* late collision */
+#define TMD3_UFLO 0x4000 /* underflow (late memory) */
+#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
+
+/* rx_head flags */
+#define RMD1_ENP 0x01 /* end of packet */
+#define RMD1_STP 0x02 /* start of packet */
+#define RMD1_BUFF 0x04 /* buffer error */
+#define RMD1_CRC 0x08 /* CRC error */
+#define RMD1_OFLO 0x10 /* overflow */
+#define RMD1_FRAM 0x20 /* framing error */
+#define RMD1_ERR 0x40 /* error summary */
+#define RMD1_OWN 0x80 /* ownership (set: ship owns) */
+
+#define RMD1_OWN_CHIP RMD1_OWN
+#define RMD1_OWN_HOST 0
+
+/* register names */
+#define CSR0 0 /* mode/status */
+#define CSR1 1 /* init block addr (low) */
+#define CSR2 2 /* init block addr (high) */
+#define CSR3 3 /* misc */
+#define CSR8 8 /* address filter */
+#define CSR15 15 /* promiscuous mode */
+
+/* CSR0 */
+/* (R=readable, W=writeable, S=set on write, C=clear on write) */
+#define CSR0_INIT 0x0001 /* initialize (RS) */
+#define CSR0_STRT 0x0002 /* start (RS) */
+#define CSR0_STOP 0x0004 /* stop (RS) */
+#define CSR0_TDMD 0x0008 /* transmit demand (RS) */
+#define CSR0_TXON 0x0010 /* transmitter on (R) */
+#define CSR0_RXON 0x0020 /* receiver on (R) */
+#define CSR0_INEA 0x0040 /* interrupt enable (RW) */
+#define CSR0_INTR 0x0080 /* interrupt active (R) */
+#define CSR0_IDON 0x0100 /* initialization done (RC) */
+#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
+#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
+#define CSR0_MERR 0x0800 /* memory error (RC) */
+#define CSR0_MISS 0x1000 /* missed frame (RC) */
+#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
+#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
+#define CSR0_ERR 0x8000 /* error (RC) */
+
+/* CSR3 */
+#define CSR3_BCON 0x0001 /* byte control */
+#define CSR3_ACON 0x0002 /* ALE control */
+#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
+
+
+
+/***************************** Prototypes *****************************/
+
+static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
+ *init_rec );
+static int lance_open( struct net_device *dev );
+static void lance_init_ring( struct net_device *dev );
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t lance_interrupt( int irq, void *dev_id );
+static int lance_rx( struct net_device *dev );
+static int lance_close( struct net_device *dev );
+static void set_multicast_list( struct net_device *dev );
+static int lance_set_mac_address( struct net_device *dev, void *addr );
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
+
+/************************* End of Prototypes **************************/
+
+
+
+
+
+static void *slow_memcpy( void *dst, const void *src, size_t len )
+
+{ char *cto = dst;
+ const char *cfrom = src;
+
+ while( len-- ) {
+ *cto++ = *cfrom++;
+ MFPDELAY();
+ }
+ return dst;
+}
+
+
+struct net_device * __init atarilance_probe(int unit)
+{
+ int i;
+ static int found;
+ struct net_device *dev;
+ int err = -ENODEV;
+
+ if (!MACH_IS_ATARI || found)
+ /* Assume there's only one board possible... That seems true, since
+ * the Riebl/PAM board's address cannot be changed. */
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ for( i = 0; i < N_LANCE_ADDR; ++i ) {
+ if (lance_probe1( dev, &lance_addr_list[i] )) {
+ found = 1;
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+ free_irq(dev->irq, dev);
+ break;
+ }
+ }
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+
+/* Derived from hwreg_present() in atari/config.c: */
+
+static noinline int __init addr_accessible(volatile void *regp, int wordflag,
+ int writeflag)
+{
+ int ret;
+ unsigned long flags;
+ long *vbr, save_berr;
+
+ local_irq_save(flags);
+
+ __asm__ __volatile__ ( "movec %/vbr,%0" : "=r" (vbr) : );
+ save_berr = vbr[2];
+
+ __asm__ __volatile__
+ ( "movel %/sp,%/d1\n\t"
+ "movel #Lberr,%2@\n\t"
+ "moveq #0,%0\n\t"
+ "tstl %3\n\t"
+ "bne 1f\n\t"
+ "moveb %1@,%/d0\n\t"
+ "nop \n\t"
+ "bra 2f\n"
+"1: movew %1@,%/d0\n\t"
+ "nop \n"
+"2: tstl %4\n\t"
+ "beq 2f\n\t"
+ "tstl %3\n\t"
+ "bne 1f\n\t"
+ "clrb %1@\n\t"
+ "nop \n\t"
+ "moveb %/d0,%1@\n\t"
+ "nop \n\t"
+ "bra 2f\n"
+"1: clrw %1@\n\t"
+ "nop \n\t"
+ "movew %/d0,%1@\n\t"
+ "nop \n"
+"2: moveq #1,%0\n"
+"Lberr: movel %/d1,%/sp"
+ : "=&d" (ret)
+ : "a" (regp), "a" (&vbr[2]), "rm" (wordflag), "rm" (writeflag)
+ : "d0", "d1", "memory"
+ );
+
+ vbr[2] = save_berr;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_mac_address = lance_set_mac_address,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static unsigned long __init lance_probe1( struct net_device *dev,
+ struct lance_addr *init_rec )
+{
+ volatile unsigned short *memaddr =
+ (volatile unsigned short *)init_rec->memaddr;
+ volatile unsigned short *ioaddr =
+ (volatile unsigned short *)init_rec->ioaddr;
+ struct lance_private *lp;
+ struct lance_ioreg *IO;
+ int i;
+ static int did_version;
+ unsigned short save1, save2;
+
+ PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
+ (long)memaddr, (long)ioaddr ));
+
+ /* Test whether memory readable and writable */
+ PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" ));
+ if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail;
+
+ /* Written values should come back... */
+ PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" ));
+ save1 = *memaddr;
+ *memaddr = 0x0001;
+ if (*memaddr != 0x0001) goto probe_fail;
+ PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" ));
+ *memaddr = 0x0000;
+ if (*memaddr != 0x0000) goto probe_fail;
+ *memaddr = save1;
+
+ /* First port should be readable and writable */
+ PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" ));
+ if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail;
+
+ /* and written values should be readable */
+ PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" ));
+ save2 = ioaddr[1];
+ ioaddr[1] = 0x0001;
+ if (ioaddr[1] != 0x0001) goto probe_fail;
+
+ /* The CSR0_INIT bit should not be readable */
+ PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" ));
+ save1 = ioaddr[0];
+ ioaddr[1] = CSR0;
+ ioaddr[0] = CSR0_INIT | CSR0_STOP;
+ if (ioaddr[0] != CSR0_STOP) {
+ ioaddr[0] = save1;
+ ioaddr[1] = save2;
+ goto probe_fail;
+ }
+ PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" ));
+ ioaddr[0] = CSR0_STOP;
+ if (ioaddr[0] != CSR0_STOP) {
+ ioaddr[0] = save1;
+ ioaddr[1] = save2;
+ goto probe_fail;
+ }
+
+ /* Now ok... */
+ PROBE_PRINT(( "lance_probe1: Lance card detected\n" ));
+ goto probe_ok;
+
+ probe_fail:
+ return 0;
+
+ probe_ok:
+ lp = netdev_priv(dev);
+ MEM = (struct lance_memory *)memaddr;
+ IO = lp->iobase = (struct lance_ioreg *)ioaddr;
+ dev->base_addr = (unsigned long)ioaddr; /* informational only */
+ lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy;
+
+ REGA( CSR0 ) = CSR0_STOP;
+
+ /* Now test for type: If the eeprom I/O port is readable, it is a
+ * PAM card */
+ if (addr_accessible( &(IO->eeprom), 0, 0 )) {
+ /* Switch back to Ram */
+ i = IO->mem;
+ lp->cardtype = PAM_CARD;
+ }
+ else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) {
+ lp->cardtype = NEW_RIEBL;
+ }
+ else
+ lp->cardtype = OLD_RIEBL;
+
+ if (lp->cardtype == PAM_CARD ||
+ memaddr == (unsigned short *)0xffe00000) {
+ /* PAMs card and Riebl on ST use level 5 autovector */
+ if (request_irq(IRQ_AUTO_5, lance_interrupt, 0,
+ "PAM,Riebl-ST Ethernet", dev)) {
+ printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
+ return 0;
+ }
+ dev->irq = IRQ_AUTO_5;
+ }
+ else {
+ /* For VME-RieblCards, request a free VME int */
+ unsigned int irq = atari_register_vme_int();
+ if (!irq) {
+ printk( "Lance: request for VME interrupt failed\n" );
+ return 0;
+ }
+ if (request_irq(irq, lance_interrupt, 0, "Riebl-VME Ethernet",
+ dev)) {
+ printk( "Lance: request for irq %u failed\n", irq );
+ return 0;
+ }
+ dev->irq = irq;
+ }
+
+ printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ",
+ dev->name, lance_names[lp->cardtype],
+ (unsigned long)ioaddr,
+ (unsigned long)memaddr,
+ dev->irq,
+ init_rec->slow_flag ? " (slow memcpy)" : "" );
+
+ /* Get the ethernet address */
+ switch( lp->cardtype ) {
+ case OLD_RIEBL:
+ /* No ethernet address! (Set some default address) */
+ memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
+ break;
+ case NEW_RIEBL:
+ lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ break;
+ case PAM_CARD:
+ i = IO->eeprom;
+ for( i = 0; i < 6; ++i )
+ dev->dev_addr[i] =
+ ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
+ ((((unsigned short *)MEM)[i*2+1] & 0x0f));
+ i = IO->mem;
+ break;
+ }
+ printk("%pM\n", dev->dev_addr);
+ if (lp->cardtype == OLD_RIEBL) {
+ printk( "%s: Warning: This is a default ethernet address!\n",
+ dev->name );
+ printk( " Use \"ifconfig hw ether ...\" to set the address.\n" );
+ }
+
+ spin_lock_init(&lp->devlock);
+
+ MEM->init.mode = 0x0000; /* Disable Rx and Tx. */
+ for( i = 0; i < 6; i++ )
+ MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head );
+ MEM->init.rx_ring.adr_hi = 0;
+ MEM->init.rx_ring.len = RX_RING_LEN_BITS;
+ MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head );
+ MEM->init.tx_ring.adr_hi = 0;
+ MEM->init.tx_ring.len = TX_RING_LEN_BITS;
+
+ if (lp->cardtype == PAM_CARD)
+ IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq);
+ else
+ *RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq);
+
+ if (did_version++ == 0)
+ DPRINTK( 1, ( version ));
+
+ dev->netdev_ops = &lance_netdev_ops;
+
+ /* XXX MSch */
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return 1;
+}
+
+
+static int lance_open( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+ int i;
+
+ DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
+
+ lance_init_ring(dev);
+ /* Re-initialize the LANCE, and start it when done. */
+
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+ REGA( CSR2 ) = 0;
+ REGA( CSR1 ) = 0;
+ REGA( CSR0 ) = CSR0_INIT;
+ /* From now on, AREG is kept to point to CSR0 */
+
+ i = 1000000;
+ while (--i > 0)
+ if (DREG & CSR0_IDON)
+ break;
+ if (i <= 0 || (DREG & CSR0_ERR)) {
+ DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
+ dev->name, i, DREG ));
+ DREG = CSR0_STOP;
+ return -EIO;
+ }
+ DREG = CSR0_IDON;
+ DREG = CSR0_STRT;
+ DREG = CSR0_INEA;
+
+ netif_start_queue (dev);
+
+ DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
+
+ return 0;
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+
+static void lance_init_ring( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+ unsigned offset;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_tx = 0;
+
+ offset = offsetof( struct lance_memory, packet_area );
+
+/* If the packet buffer at offset 'o' would conflict with the reserved area
+ * of RieblCards, advance it */
+#define CHECK_OFFSET(o) \
+ do { \
+ if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) { \
+ if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \
+ : (o) < RIEBL_RSVD_END) \
+ (o) = RIEBL_RSVD_END; \
+ } \
+ } while(0)
+
+ for( i = 0; i < TX_RING_SIZE; i++ ) {
+ CHECK_OFFSET(offset);
+ MEM->tx_head[i].base = offset;
+ MEM->tx_head[i].flag = TMD1_OWN_HOST;
+ MEM->tx_head[i].base_hi = 0;
+ MEM->tx_head[i].length = 0;
+ MEM->tx_head[i].misc = 0;
+ offset += PKT_BUF_SZ;
+ }
+
+ for( i = 0; i < RX_RING_SIZE; i++ ) {
+ CHECK_OFFSET(offset);
+ MEM->rx_head[i].base = offset;
+ MEM->rx_head[i].flag = TMD1_OWN_CHIP;
+ MEM->rx_head[i].base_hi = 0;
+ MEM->rx_head[i].buf_length = -PKT_BUF_SZ;
+ MEM->rx_head[i].msg_length = 0;
+ offset += PKT_BUF_SZ;
+ }
+}
+
+
+/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
+
+
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+
+ AREG = CSR0;
+ DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, DREG ));
+ DREG = CSR0_STOP;
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+ dev->stats.tx_errors++;
+#ifndef final_version
+ { int i;
+ DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n",
+ lp->dirty_tx, lp->cur_tx,
+ lp->tx_full ? " (full)" : "",
+ lp->cur_rx ));
+ for( i = 0 ; i < RX_RING_SIZE; i++ )
+ DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
+ i, MEM->rx_head[i].base,
+ -MEM->rx_head[i].buf_length,
+ MEM->rx_head[i].msg_length ));
+ for( i = 0 ; i < TX_RING_SIZE; i++ )
+ DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n",
+ i, MEM->tx_head[i].base,
+ -MEM->tx_head[i].length,
+ MEM->tx_head[i].misc ));
+ }
+#endif
+ /* XXX MSch: maybe purge/reinit ring here */
+ /* lance_restart, essentially */
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
+
+static netdev_tx_t
+lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+ int entry, len;
+ struct lance_tx_head *head;
+ unsigned long flags;
+
+ DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, DREG ));
+
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ len = skb->len;
+ if (len < ETH_ZLEN)
+ len = ETH_ZLEN;
+ /* PAM-Card has a bug: Can only send packets with even number of bytes! */
+ else if (lp->cardtype == PAM_CARD && (len & 1))
+ ++len;
+
+ if (len > skb->len) {
+ if (skb_padto(skb, len))
+ return NETDEV_TX_OK;
+ }
+
+ netif_stop_queue (dev);
+
+ /* Fill in a Tx ring entry */
+ if (lance_debug >= 3) {
+ printk( "%s: TX pkt type 0x%04x from %pM to %pM"
+ " data at 0x%08x len %d\n",
+ dev->name, ((u_short *)skb->data)[6],
+ &skb->data[6], skb->data,
+ (int)skb->data, (int)skb->len );
+ }
+
+ /* We're not prepared for the int until the last flags are set/reset. And
+ * the int may happen already after setting the OWN_CHIP... */
+ spin_lock_irqsave (&lp->devlock, flags);
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+ head = &(MEM->tx_head[entry]);
+
+ /* Caution: the write order is important here, set the "ownership" bits
+ * last.
+ */
+
+
+ head->length = -len;
+ head->misc = 0;
+ lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ dev->stats.tx_bytes += skb->len;
+ dev_consume_skb_irq(skb);
+ lp->cur_tx++;
+ while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
+ lp->cur_tx -= TX_RING_SIZE;
+ lp->dirty_tx -= TX_RING_SIZE;
+ }
+
+ /* Trigger an immediate send poll. */
+ DREG = CSR0_INEA | CSR0_TDMD;
+
+ if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
+ TMD1_OWN_HOST)
+ netif_start_queue (dev);
+ else
+ lp->tx_full = 1;
+ spin_unlock_irqrestore (&lp->devlock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* The LANCE interrupt handler. */
+
+static irqreturn_t lance_interrupt( int irq, void *dev_id )
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp;
+ struct lance_ioreg *IO;
+ int csr0, boguscnt = 10;
+ int handled = 0;
+
+ if (dev == NULL) {
+ DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
+ return IRQ_NONE;
+ }
+
+ lp = netdev_priv(dev);
+ IO = lp->iobase;
+ spin_lock (&lp->devlock);
+
+ AREG = CSR0;
+
+ while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) &&
+ --boguscnt >= 0) {
+ handled = 1;
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP |
+ CSR0_TDMD | CSR0_INEA);
+
+ DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
+ dev->name, csr0, DREG ));
+
+ if (csr0 & CSR0_RINT) /* Rx interrupt */
+ lance_rx( dev );
+
+ if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while( dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = MEM->tx_head[entry].flag;
+
+ if (status & TMD1_OWN_CHIP)
+ break; /* It still hasn't been Txed */
+
+ MEM->tx_head[entry].flag = 0;
+
+ if (status & TMD1_ERR) {
+ /* There was an major error, log it. */
+ int err_status = MEM->tx_head[entry].misc;
+ dev->stats.tx_errors++;
+ if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
+ if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
+ if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++;
+ if (err_status & TMD3_UFLO) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ dev->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n",
+ dev->name, csr0 ));
+ /* Restart the chip. */
+ DREG = CSR0_STRT;
+ }
+ } else {
+ if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF))
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+
+ /* XXX MSch: free skb?? */
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ DPRINTK( 0, ( "out-of-sync dirty pointer,"
+ " %d vs. %d, full=%ld.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full ));
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && (netif_queue_stopped(dev)) &&
+ dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue (dev);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & CSR0_MERR) {
+ DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
+ "status %04x.\n", dev->name, csr0 ));
+ /* Restart the chip. */
+ DREG = CSR0_STRT;
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
+ CSR0_IDON | CSR0_INEA;
+
+ DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
+ dev->name, DREG ));
+
+ spin_unlock (&lp->devlock);
+ return IRQ_RETVAL(handled);
+}
+
+
+static int lance_rx( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name,
+ MEM->rx_head[entry].flag ));
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
+ struct lance_rx_head *head = &(MEM->rx_head[entry]);
+ int status = head->flag;
+
+ if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & RMD1_ENP) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
+ if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
+ if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
+ if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
+ head->flag &= (RMD1_ENP|RMD1_STP);
+ } else {
+ /* Malloc up new buffer, compatible with net-3. */
+ short pkt_len = head->msg_length & 0xfff;
+ struct sk_buff *skb;
+
+ if (pkt_len < 60) {
+ printk( "%s: Runt packet!\n", dev->name );
+ dev->stats.rx_errors++;
+ }
+ else {
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
+ if (skb == NULL) {
+ for( i = 0; i < RX_RING_SIZE; i++ )
+ if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
+ RMD1_OWN_CHIP)
+ break;
+
+ if (i > RX_RING_SIZE - 2) {
+ dev->stats.rx_dropped++;
+ head->flag |= RMD1_OWN_CHIP;
+ lp->cur_rx++;
+ }
+ break;
+ }
+
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head);
+
+ printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM "
+ "data %8ph len %d\n",
+ dev->name, ((u_short *)data)[6],
+ &data[6], data, &data[15], pkt_len);
+ }
+
+ skb_reserve( skb, 2 ); /* 16 byte align */
+ skb_put( skb, pkt_len ); /* Make room */
+ lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ }
+
+ head->flag |= RMD1_OWN_CHIP;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+ lp->cur_rx &= RX_RING_MOD_MASK;
+
+ /* From lance.c (Donald Becker): */
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+
+static int lance_close( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+
+ netif_stop_queue (dev);
+
+ AREG = CSR0;
+
+ DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, DREG ));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ DREG = CSR0_STOP;
+
+ return 0;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+
+static void set_multicast_list( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+
+ if (netif_running(dev))
+ /* Only possible if board is already started */
+ return;
+
+ /* We take the simple way out and always enable promiscuous mode. */
+ DREG = CSR0_STOP; /* Temporarily stop the lance. */
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name ));
+ REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = netdev_mc_count(dev);
+ int i;
+ /* We don't use the multicast table, but rely on upper-layer
+ * filtering. */
+ memset( multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table) );
+ for( i = 0; i < 4; i++ )
+ REGA( CSR8+i ) = multicast_table[i];
+ REGA( CSR15 ) = 0; /* Unset promiscuous mode */
+ }
+
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+
+ /* Resume normal operation and reset AREG to CSR0 */
+ REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
+}
+
+
+/* This is needed for old RieblCards and possible for new RieblCards */
+
+static int lance_set_mac_address( struct net_device *dev, void *addr )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct sockaddr *saddr = addr;
+ int i;
+
+ if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
+ return -EOPNOTSUPP;
+
+ if (netif_running(dev)) {
+ /* Only possible while card isn't started */
+ DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
+ dev->name ));
+ return -EIO;
+ }
+
+ memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
+ for( i = 0; i < 6; i++ )
+ MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
+ lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
+ /* set also the magic for future sessions */
+ *RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+static struct net_device *atarilance_dev;
+
+static int __init atarilance_module_init(void)
+{
+ atarilance_dev = atarilance_probe(-1);
+ return PTR_ERR_OR_ZERO(atarilance_dev);
+}
+
+static void __exit atarilance_module_exit(void)
+{
+ unregister_netdev(atarilance_dev);
+ free_irq(atarilance_dev->irq, atarilance_dev);
+ free_netdev(atarilance_dev);
+}
+module_init(atarilance_module_init);
+module_exit(atarilance_module_exit);
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
new file mode 100644
index 000000000..19e195420
--- /dev/null
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -0,0 +1,1376 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Alchemy Au1x00 ethernet driver
+ *
+ * Copyright 2001-2003, 2006 MontaVista Software Inc.
+ * Copyright 2002 TimeSys Corp.
+ * Added ethtool/mii-tool support,
+ * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
+ * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
+ * or riemer@riemer-nt.de: fixed the link beat detection with
+ * ioctls (SIOCGMIIPHY)
+ * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
+ * converted to use linux-2.6.x's PHY framework
+ *
+ * Author: MontaVista Software, Inc.
+ * ppopov@mvista.com or source@mvista.com
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/capability.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
+
+#include <asm/mipsregs.h>
+#include <asm/irq.h>
+#include <asm/processor.h>
+
+#include <au1000.h>
+#include <au1xxx_eth.h>
+#include <prom.h>
+
+#include "au1000_eth.h"
+
+#ifdef AU1000_ETH_DEBUG
+static int au1000_debug = 5;
+#else
+static int au1000_debug = 3;
+#endif
+
+#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+#define DRV_NAME "au1000_eth"
+#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
+#define DRV_DESC "Au1xxx on-chip Ethernet driver"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+
+/* AU1000 MAC registers and bits */
+#define MAC_CONTROL 0x0
+# define MAC_RX_ENABLE (1 << 2)
+# define MAC_TX_ENABLE (1 << 3)
+# define MAC_DEF_CHECK (1 << 5)
+# define MAC_SET_BL(X) (((X) & 0x3) << 6)
+# define MAC_AUTO_PAD (1 << 8)
+# define MAC_DISABLE_RETRY (1 << 10)
+# define MAC_DISABLE_BCAST (1 << 11)
+# define MAC_LATE_COL (1 << 12)
+# define MAC_HASH_MODE (1 << 13)
+# define MAC_HASH_ONLY (1 << 15)
+# define MAC_PASS_ALL (1 << 16)
+# define MAC_INVERSE_FILTER (1 << 17)
+# define MAC_PROMISCUOUS (1 << 18)
+# define MAC_PASS_ALL_MULTI (1 << 19)
+# define MAC_FULL_DUPLEX (1 << 20)
+# define MAC_NORMAL_MODE 0
+# define MAC_INT_LOOPBACK (1 << 21)
+# define MAC_EXT_LOOPBACK (1 << 22)
+# define MAC_DISABLE_RX_OWN (1 << 23)
+# define MAC_BIG_ENDIAN (1 << 30)
+# define MAC_RX_ALL (1 << 31)
+#define MAC_ADDRESS_HIGH 0x4
+#define MAC_ADDRESS_LOW 0x8
+#define MAC_MCAST_HIGH 0xC
+#define MAC_MCAST_LOW 0x10
+#define MAC_MII_CNTRL 0x14
+# define MAC_MII_BUSY (1 << 0)
+# define MAC_MII_READ 0
+# define MAC_MII_WRITE (1 << 1)
+# define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6)
+# define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11)
+#define MAC_MII_DATA 0x18
+#define MAC_FLOW_CNTRL 0x1C
+# define MAC_FLOW_CNTRL_BUSY (1 << 0)
+# define MAC_FLOW_CNTRL_ENABLE (1 << 1)
+# define MAC_PASS_CONTROL (1 << 2)
+# define MAC_SET_PAUSE(X) (((X) & 0xffff) << 16)
+#define MAC_VLAN1_TAG 0x20
+#define MAC_VLAN2_TAG 0x24
+
+/* Ethernet Controller Enable */
+# define MAC_EN_CLOCK_ENABLE (1 << 0)
+# define MAC_EN_RESET0 (1 << 1)
+# define MAC_EN_TOSS (0 << 2)
+# define MAC_EN_CACHEABLE (1 << 3)
+# define MAC_EN_RESET1 (1 << 4)
+# define MAC_EN_RESET2 (1 << 5)
+# define MAC_DMA_RESET (1 << 6)
+
+/* Ethernet Controller DMA Channels */
+/* offsets from MAC_TX_RING_ADDR address */
+#define MAC_TX_BUFF0_STATUS 0x0
+# define TX_FRAME_ABORTED (1 << 0)
+# define TX_JAB_TIMEOUT (1 << 1)
+# define TX_NO_CARRIER (1 << 2)
+# define TX_LOSS_CARRIER (1 << 3)
+# define TX_EXC_DEF (1 << 4)
+# define TX_LATE_COLL_ABORT (1 << 5)
+# define TX_EXC_COLL (1 << 6)
+# define TX_UNDERRUN (1 << 7)
+# define TX_DEFERRED (1 << 8)
+# define TX_LATE_COLL (1 << 9)
+# define TX_COLL_CNT_MASK (0xF << 10)
+# define TX_PKT_RETRY (1 << 31)
+#define MAC_TX_BUFF0_ADDR 0x4
+# define TX_DMA_ENABLE (1 << 0)
+# define TX_T_DONE (1 << 1)
+# define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
+#define MAC_TX_BUFF0_LEN 0x8
+#define MAC_TX_BUFF1_STATUS 0x10
+#define MAC_TX_BUFF1_ADDR 0x14
+#define MAC_TX_BUFF1_LEN 0x18
+#define MAC_TX_BUFF2_STATUS 0x20
+#define MAC_TX_BUFF2_ADDR 0x24
+#define MAC_TX_BUFF2_LEN 0x28
+#define MAC_TX_BUFF3_STATUS 0x30
+#define MAC_TX_BUFF3_ADDR 0x34
+#define MAC_TX_BUFF3_LEN 0x38
+
+/* offsets from MAC_RX_RING_ADDR */
+#define MAC_RX_BUFF0_STATUS 0x0
+# define RX_FRAME_LEN_MASK 0x3fff
+# define RX_WDOG_TIMER (1 << 14)
+# define RX_RUNT (1 << 15)
+# define RX_OVERLEN (1 << 16)
+# define RX_COLL (1 << 17)
+# define RX_ETHER (1 << 18)
+# define RX_MII_ERROR (1 << 19)
+# define RX_DRIBBLING (1 << 20)
+# define RX_CRC_ERROR (1 << 21)
+# define RX_VLAN1 (1 << 22)
+# define RX_VLAN2 (1 << 23)
+# define RX_LEN_ERROR (1 << 24)
+# define RX_CNTRL_FRAME (1 << 25)
+# define RX_U_CNTRL_FRAME (1 << 26)
+# define RX_MCAST_FRAME (1 << 27)
+# define RX_BCAST_FRAME (1 << 28)
+# define RX_FILTER_FAIL (1 << 29)
+# define RX_PACKET_FILTER (1 << 30)
+# define RX_MISSED_FRAME (1 << 31)
+
+# define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN | \
+ RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
+ RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
+#define MAC_RX_BUFF0_ADDR 0x4
+# define RX_DMA_ENABLE (1 << 0)
+# define RX_T_DONE (1 << 1)
+# define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
+# define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0)
+#define MAC_RX_BUFF1_STATUS 0x10
+#define MAC_RX_BUFF1_ADDR 0x14
+#define MAC_RX_BUFF2_STATUS 0x20
+#define MAC_RX_BUFF2_ADDR 0x24
+#define MAC_RX_BUFF3_STATUS 0x30
+#define MAC_RX_BUFF3_ADDR 0x34
+
+/*
+ * Theory of operation
+ *
+ * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
+ * There are four receive and four transmit descriptors. These
+ * descriptors are not in memory; rather, they are just a set of
+ * hardware registers.
+ *
+ * Since the Au1000 has a coherent data cache, the receive and
+ * transmit buffers are allocated from the KSEG0 segment. The
+ * hardware registers, however, are still mapped at KSEG1 to
+ * make sure there's no out-of-order writes, and that all writes
+ * complete immediately.
+ */
+
+/*
+ * board-specific configurations
+ *
+ * PHY detection algorithm
+ *
+ * If phy_static_config is undefined, the PHY setup is
+ * autodetected:
+ *
+ * mii_probe() first searches the current MAC's MII bus for a PHY,
+ * selecting the first (or last, if phy_search_highest_addr is
+ * defined) PHY address not already claimed by another netdev.
+ *
+ * If nothing was found that way when searching for the 2nd ethernet
+ * controller's PHY and phy1_search_mac0 is defined, then
+ * the first MII bus is searched as well for an unclaimed PHY; this is
+ * needed in case of a dual-PHY accessible only through the MAC0's MII
+ * bus.
+ *
+ * Finally, if no PHY is found, then the corresponding ethernet
+ * controller is not registered to the network subsystem.
+ */
+
+/* autodetection defaults: phy1_search_mac0 */
+
+/* static PHY setup
+ *
+ * most boards PHY setup should be detectable properly with the
+ * autodetection algorithm in mii_probe(), but in some cases (e.g. if
+ * you have a switch attached, or want to use the PHY's interrupt
+ * notification capabilities) you can provide a static PHY
+ * configuration here
+ *
+ * IRQs may only be set, if a PHY address was configured
+ * If a PHY address is given, also a bus id is required to be set
+ *
+ * ps: make sure the used irqs are configured properly in the board
+ * specific irq-map
+ */
+static void au1000_enable_mac(struct net_device *dev, int force_reset)
+{
+ unsigned long flags;
+ struct au1000_private *aup = netdev_priv(dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ if (force_reset || (!aup->mac_enabled)) {
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
+ writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
+ | MAC_EN_CLOCK_ENABLE), aup->enable);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
+
+ aup->mac_enabled = 1;
+ }
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+}
+
+/*
+ * MII operations
+ */
+static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 *const mii_control_reg = &aup->mac->mii_control;
+ u32 *const mii_data_reg = &aup->mac->mii_data;
+ u32 timedout = 20;
+ u32 mii_control;
+
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ netdev_err(dev, "read_MII busy timeout!!\n");
+ return -1;
+ }
+ }
+
+ mii_control = MAC_SET_MII_SELECT_REG(reg) |
+ MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
+
+ writel(mii_control, mii_control_reg);
+
+ timedout = 20;
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ netdev_err(dev, "mdio_read busy timeout!!\n");
+ return -1;
+ }
+ }
+ return readl(mii_data_reg);
+}
+
+static void au1000_mdio_write(struct net_device *dev, int phy_addr,
+ int reg, u16 value)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 *const mii_control_reg = &aup->mac->mii_control;
+ u32 *const mii_data_reg = &aup->mac->mii_data;
+ u32 timedout = 20;
+ u32 mii_control;
+
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ netdev_err(dev, "mdio_write busy timeout!!\n");
+ return;
+ }
+ }
+
+ mii_control = MAC_SET_MII_SELECT_REG(reg) |
+ MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
+
+ writel(value, mii_data_reg);
+ writel(mii_control, mii_control_reg);
+}
+
+static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct net_device *const dev = bus->priv;
+
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
+ return au1000_mdio_read(dev, phy_addr, regnum);
+}
+
+static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct net_device *const dev = bus->priv;
+
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
+ au1000_mdio_write(dev, phy_addr, regnum, value);
+ return 0;
+}
+
+static int au1000_mdiobus_reset(struct mii_bus *bus)
+{
+ struct net_device *const dev = bus->priv;
+
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
+ return 0;
+}
+
+static void au1000_hard_stop(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
+
+ netif_dbg(aup, drv, dev, "hard stop\n");
+
+ reg = readl(&aup->mac->control);
+ reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
+ writel(reg, &aup->mac->control);
+ wmb(); /* drain writebuffer */
+ mdelay(10);
+}
+
+static void au1000_enable_rx_tx(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
+
+ netif_dbg(aup, hw, dev, "enable_rx_tx\n");
+
+ reg = readl(&aup->mac->control);
+ reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
+ writel(reg, &aup->mac->control);
+ wmb(); /* drain writebuffer */
+ mdelay(10);
+}
+
+static void
+au1000_adjust_link(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ unsigned long flags;
+ u32 reg;
+
+ int status_change = 0;
+
+ BUG_ON(!phydev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ if (phydev->link && (aup->old_speed != phydev->speed)) {
+ /* speed changed */
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ case SPEED_100:
+ break;
+ default:
+ netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
+ phydev->speed);
+ break;
+ }
+
+ aup->old_speed = phydev->speed;
+
+ status_change = 1;
+ }
+
+ if (phydev->link && (aup->old_duplex != phydev->duplex)) {
+ /* duplex mode changed */
+
+ /* switching duplex mode requires to disable rx and tx! */
+ au1000_hard_stop(dev);
+
+ reg = readl(&aup->mac->control);
+ if (DUPLEX_FULL == phydev->duplex) {
+ reg |= MAC_FULL_DUPLEX;
+ reg &= ~MAC_DISABLE_RX_OWN;
+ } else {
+ reg &= ~MAC_FULL_DUPLEX;
+ reg |= MAC_DISABLE_RX_OWN;
+ }
+ writel(reg, &aup->mac->control);
+ wmb(); /* drain writebuffer */
+ mdelay(1);
+
+ au1000_enable_rx_tx(dev);
+ aup->old_duplex = phydev->duplex;
+
+ status_change = 1;
+ }
+
+ if (phydev->link != aup->old_link) {
+ /* link state changed */
+
+ if (!phydev->link) {
+ /* link went down */
+ aup->old_speed = 0;
+ aup->old_duplex = -1;
+ }
+
+ aup->old_link = phydev->link;
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ netdev_info(dev, "link down\n");
+ }
+}
+
+static int au1000_mii_probe(struct net_device *dev)
+{
+ struct au1000_private *const aup = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ int phy_addr;
+
+ if (aup->phy_static_config) {
+ BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
+
+ if (aup->phy_addr)
+ phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
+ else
+ netdev_info(dev, "using PHY-less setup\n");
+ return 0;
+ }
+
+ /* find the first (lowest address) PHY
+ * on the current MAC's MII bus
+ */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
+ if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
+ phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
+ if (!aup->phy_search_highest_addr)
+ /* break out with first one found */
+ break;
+ }
+
+ if (aup->phy1_search_mac0) {
+ /* try harder to find a PHY */
+ if (!phydev && (aup->mac_id == 1)) {
+ /* no PHY found, maybe we have a dual PHY? */
+ dev_info(&dev->dev, ": no PHY found on MAC1, "
+ "let's see if it's attached to MAC0...\n");
+
+ /* find the first (lowest address) non-attached
+ * PHY on the MAC0 MII bus
+ */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ struct phy_device *const tmp_phydev =
+ mdiobus_get_phy(aup->mii_bus,
+ phy_addr);
+
+ if (aup->mac_id == 1)
+ break;
+
+ /* no PHY here... */
+ if (!tmp_phydev)
+ continue;
+
+ /* already claimed by MAC0 */
+ if (tmp_phydev->attached_dev)
+ continue;
+
+ phydev = tmp_phydev;
+ break; /* found it */
+ }
+ }
+ }
+
+ if (!phydev) {
+ netdev_err(dev, "no PHY found\n");
+ return -1;
+ }
+
+ /* now we are supposed to have a proper phydev, to attach to... */
+ BUG_ON(phydev->attached_dev);
+
+ phydev = phy_connect(dev, phydev_name(phydev),
+ &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ phy_set_max_speed(phydev, SPEED_100);
+
+ aup->old_link = 0;
+ aup->old_speed = 0;
+ aup->old_duplex = -1;
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+/*
+ * Buffer allocation/deallocation routines. The buffer descriptor returned
+ * has the virtual and dma address of a buffer suitable for
+ * both, receive and transmit operations.
+ */
+static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
+{
+ struct db_dest *pDB;
+ pDB = aup->pDBfree;
+
+ if (pDB)
+ aup->pDBfree = pDB->pnext;
+
+ return pDB;
+}
+
+void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
+{
+ struct db_dest *pDBfree = aup->pDBfree;
+ if (pDBfree)
+ pDBfree->pnext = pDB;
+ aup->pDBfree = pDB;
+}
+
+static void au1000_reset_mac_unlocked(struct net_device *dev)
+{
+ struct au1000_private *const aup = netdev_priv(dev);
+ int i;
+
+ au1000_hard_stop(dev);
+
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
+ writel(0, aup->enable);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
+
+ aup->tx_full = 0;
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ /* reset control bits */
+ aup->rx_dma_ring[i]->buff_stat &= ~0xf;
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ /* reset control bits */
+ aup->tx_dma_ring[i]->buff_stat &= ~0xf;
+ }
+
+ aup->mac_enabled = 0;
+
+}
+
+static void au1000_reset_mac(struct net_device *dev)
+{
+ struct au1000_private *const aup = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
+ (unsigned)aup);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ au1000_reset_mac_unlocked(dev);
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+}
+
+/*
+ * Setup the receive and transmit "rings". These pointers are the addresses
+ * of the rx and tx MAC DMA registers so they are fixed by the hardware --
+ * these are not descriptors sitting in memory.
+ */
+static void
+au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
+{
+ int i;
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ aup->rx_dma_ring[i] = (struct rx_dma *)
+ (tx_base + 0x100 + sizeof(struct rx_dma) * i);
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ aup->tx_dma_ring[i] = (struct tx_dma *)
+ (tx_base + sizeof(struct tx_dma) * i);
+ }
+}
+
+/*
+ * ethtool operations
+ */
+static void
+au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
+ aup->mac_id);
+}
+
+static void au1000_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ aup->msg_enable = value;
+}
+
+static u32 au1000_get_msglevel(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ return aup->msg_enable;
+}
+
+static const struct ethtool_ops au1000_ethtool_ops = {
+ .get_drvinfo = au1000_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = au1000_get_msglevel,
+ .set_msglevel = au1000_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+/*
+ * Initialize the interface.
+ *
+ * When the device powers up, the clocks are disabled and the
+ * mac is in reset state. When the interface is closed, we
+ * do the same -- reset the device and disable the clocks to
+ * conserve power. Thus, whenever au1000_init() is called,
+ * the device should already be in reset state.
+ */
+static int au1000_init(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ unsigned long flags;
+ int i;
+ u32 control;
+
+ netif_dbg(aup, hw, dev, "au1000_init\n");
+
+ /* bring the device out of reset */
+ au1000_enable_mac(dev, 1);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ writel(0, &aup->mac->control);
+ aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
+ aup->tx_tail = aup->tx_head;
+ aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
+
+ writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
+ &aup->mac->mac_addr_high);
+ writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
+ dev->dev_addr[1]<<8 | dev->dev_addr[0],
+ &aup->mac->mac_addr_low);
+
+
+ for (i = 0; i < NUM_RX_DMA; i++)
+ aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
+
+ wmb(); /* drain writebuffer */
+
+ control = MAC_RX_ENABLE | MAC_TX_ENABLE;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+ control |= MAC_BIG_ENDIAN;
+#endif
+ if (dev->phydev) {
+ if (dev->phydev->link && (DUPLEX_FULL == dev->phydev->duplex))
+ control |= MAC_FULL_DUPLEX;
+ else
+ control |= MAC_DISABLE_RX_OWN;
+ } else { /* PHY-less op, assume full-duplex */
+ control |= MAC_FULL_DUPLEX;
+ }
+
+ writel(control, &aup->mac->control);
+ writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
+ wmb(); /* drain writebuffer */
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+ return 0;
+}
+
+static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->rx_packets++;
+ if (status & RX_MCAST_FRAME)
+ ps->multicast++;
+
+ if (status & RX_ERROR) {
+ ps->rx_errors++;
+ if (status & RX_MISSED_FRAME)
+ ps->rx_missed_errors++;
+ if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
+ ps->rx_length_errors++;
+ if (status & RX_CRC_ERROR)
+ ps->rx_crc_errors++;
+ if (status & RX_COLL)
+ ps->collisions++;
+ } else
+ ps->rx_bytes += status & RX_FRAME_LEN_MASK;
+
+}
+
+/*
+ * Au1000 receive routine.
+ */
+static int au1000_rx(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct rx_dma *prxd;
+ u32 buff_stat, status;
+ struct db_dest *pDB;
+ u32 frmlen;
+
+ netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
+
+ prxd = aup->rx_dma_ring[aup->rx_head];
+ buff_stat = prxd->buff_stat;
+ while (buff_stat & RX_T_DONE) {
+ status = prxd->status;
+ pDB = aup->rx_db_inuse[aup->rx_head];
+ au1000_update_rx_stats(dev, status);
+ if (!(status & RX_ERROR)) {
+
+ /* good frame */
+ frmlen = (status & RX_FRAME_LEN_MASK);
+ frmlen -= 4; /* Remove FCS */
+ skb = netdev_alloc_skb(dev, frmlen + 2);
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 2); /* 16 byte IP header align */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)pDB->vaddr, frmlen);
+ skb_put(skb, frmlen);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb); /* pass the packet to upper layers */
+ } else {
+ if (au1000_debug > 4) {
+ pr_err("rx_error(s):");
+ if (status & RX_MISSED_FRAME)
+ pr_cont(" miss");
+ if (status & RX_WDOG_TIMER)
+ pr_cont(" wdog");
+ if (status & RX_RUNT)
+ pr_cont(" runt");
+ if (status & RX_OVERLEN)
+ pr_cont(" overlen");
+ if (status & RX_COLL)
+ pr_cont(" coll");
+ if (status & RX_MII_ERROR)
+ pr_cont(" mii error");
+ if (status & RX_CRC_ERROR)
+ pr_cont(" crc error");
+ if (status & RX_LEN_ERROR)
+ pr_cont(" len error");
+ if (status & RX_U_CNTRL_FRAME)
+ pr_cont(" u control frame");
+ pr_cont("\n");
+ }
+ }
+ prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
+ aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
+ wmb(); /* drain writebuffer */
+
+ /* next descriptor */
+ prxd = aup->rx_dma_ring[aup->rx_head];
+ buff_stat = prxd->buff_stat;
+ }
+ return 0;
+}
+
+static void au1000_update_tx_stats(struct net_device *dev, u32 status)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ if (status & TX_FRAME_ABORTED) {
+ if (!dev->phydev || (DUPLEX_FULL == dev->phydev->duplex)) {
+ if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
+ /* any other tx errors are only valid
+ * in half duplex mode
+ */
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ }
+ } else {
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
+ ps->tx_carrier_errors++;
+ }
+ }
+}
+
+/*
+ * Called from the interrupt service routine to acknowledge
+ * the TX DONE bits. This is a must if the irq is setup as
+ * edge triggered.
+ */
+static void au1000_tx_ack(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct tx_dma *ptxd;
+
+ ptxd = aup->tx_dma_ring[aup->tx_tail];
+
+ while (ptxd->buff_stat & TX_T_DONE) {
+ au1000_update_tx_stats(dev, ptxd->status);
+ ptxd->buff_stat &= ~TX_T_DONE;
+ ptxd->len = 0;
+ wmb(); /* drain writebuffer */
+
+ aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
+ ptxd = aup->tx_dma_ring[aup->tx_tail];
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+}
+
+/*
+ * Au1000 interrupt service routine.
+ */
+static irqreturn_t au1000_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+
+ /* Handle RX interrupts first to minimize chance of overrun */
+
+ au1000_rx(dev);
+ au1000_tx_ack(dev);
+ return IRQ_RETVAL(1);
+}
+
+static int au1000_open(struct net_device *dev)
+{
+ int retval;
+ struct au1000_private *aup = netdev_priv(dev);
+
+ netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
+
+ retval = request_irq(dev->irq, au1000_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
+ return retval;
+ }
+
+ retval = au1000_init(dev);
+ if (retval) {
+ netdev_err(dev, "error in au1000_init\n");
+ free_irq(dev->irq, dev);
+ return retval;
+ }
+
+ if (dev->phydev)
+ phy_start(dev->phydev);
+
+ netif_start_queue(dev);
+
+ netif_dbg(aup, drv, dev, "open: Initialization done.\n");
+
+ return 0;
+}
+
+static int au1000_close(struct net_device *dev)
+{
+ unsigned long flags;
+ struct au1000_private *const aup = netdev_priv(dev);
+
+ netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
+
+ if (dev->phydev)
+ phy_stop(dev->phydev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ au1000_reset_mac_unlocked(dev);
+
+ /* stop the device */
+ netif_stop_queue(dev);
+
+ /* disable the interrupt */
+ free_irq(dev->irq, dev);
+ spin_unlock_irqrestore(&aup->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Au1000 transmit routine.
+ */
+static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct net_device_stats *ps = &dev->stats;
+ struct tx_dma *ptxd;
+ u32 buff_stat;
+ struct db_dest *pDB;
+ int i;
+
+ netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
+ (unsigned)aup, skb->len,
+ skb->data, aup->tx_head);
+
+ ptxd = aup->tx_dma_ring[aup->tx_head];
+ buff_stat = ptxd->buff_stat;
+ if (buff_stat & TX_DMA_ENABLE) {
+ /* We've wrapped around and the transmitter is still busy */
+ netif_stop_queue(dev);
+ aup->tx_full = 1;
+ return NETDEV_TX_BUSY;
+ } else if (buff_stat & TX_T_DONE) {
+ au1000_update_tx_stats(dev, ptxd->status);
+ ptxd->len = 0;
+ }
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ pDB = aup->tx_db_inuse[aup->tx_head];
+ skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
+ if (skb->len < ETH_ZLEN) {
+ for (i = skb->len; i < ETH_ZLEN; i++)
+ ((char *)pDB->vaddr)[i] = 0;
+
+ ptxd->len = ETH_ZLEN;
+ } else
+ ptxd->len = skb->len;
+
+ ps->tx_packets++;
+ ps->tx_bytes += ptxd->len;
+
+ ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
+ wmb(); /* drain writebuffer */
+ dev_kfree_skb(skb);
+ aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * The Tx ring has been full longer than the watchdog timeout
+ * value. The transmitter must be hung?
+ */
+static void au1000_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
+ au1000_reset_mac(dev);
+ au1000_init(dev);
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static void au1000_multicast_list(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
+
+ netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
+ reg = readl(&aup->mac->control);
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ reg |= MAC_PROMISCUOUS;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
+ reg |= MAC_PASS_ALL_MULTI;
+ reg &= ~MAC_PROMISCUOUS;
+ netdev_info(dev, "Pass all multicast\n");
+ } else {
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[2]; /* Multicast hash filter */
+
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
+ (long *)mc_filter);
+ writel(mc_filter[1], &aup->mac->multi_hash_high);
+ writel(mc_filter[0], &aup->mac->multi_hash_low);
+ reg &= ~MAC_PROMISCUOUS;
+ reg |= MAC_HASH_MODE;
+ }
+ writel(reg, &aup->mac->control);
+}
+
+static const struct net_device_ops au1000_netdev_ops = {
+ .ndo_open = au1000_open,
+ .ndo_stop = au1000_close,
+ .ndo_start_xmit = au1000_tx,
+ .ndo_set_rx_mode = au1000_multicast_list,
+ .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_tx_timeout = au1000_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int au1000_probe(struct platform_device *pdev)
+{
+ struct au1000_private *aup = NULL;
+ struct au1000_eth_platform_data *pd;
+ struct net_device *dev = NULL;
+ struct db_dest *pDB, *pDBfree;
+ int irq, i, err = 0;
+ struct resource *base, *macen, *macdma;
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!base) {
+ dev_err(&pdev->dev, "failed to retrieve base register\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!macen) {
+ dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!macdma) {
+ dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (!request_mem_region(base->start, resource_size(base),
+ pdev->name)) {
+ dev_err(&pdev->dev, "failed to request memory region for base registers\n");
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (!request_mem_region(macen->start, resource_size(macen),
+ pdev->name)) {
+ dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
+ err = -ENXIO;
+ goto err_request;
+ }
+
+ if (!request_mem_region(macdma->start, resource_size(macdma),
+ pdev->name)) {
+ dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
+ err = -ENXIO;
+ goto err_macdma;
+ }
+
+ dev = alloc_etherdev(sizeof(struct au1000_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+ aup = netdev_priv(dev);
+
+ spin_lock_init(&aup->lock);
+ aup->msg_enable = (au1000_debug < 4 ?
+ AU1000_DEF_MSG_ENABLE : au1000_debug);
+
+ /* Allocate the data buffers
+ * Snooping works fine with eth on all au1xxx
+ */
+ aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ &aup->dma_addr, 0);
+ if (!aup->vaddr) {
+ dev_err(&pdev->dev, "failed to allocate data buffers\n");
+ err = -ENOMEM;
+ goto err_vaddr;
+ }
+
+ /* aup->mac is the base address of the MAC's registers */
+ aup->mac = (struct mac_reg *)
+ ioremap(base->start, resource_size(base));
+ if (!aup->mac) {
+ dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
+ err = -ENXIO;
+ goto err_remap1;
+ }
+
+ /* Setup some variables for quick register address access */
+ aup->enable = (u32 *)ioremap(macen->start,
+ resource_size(macen));
+ if (!aup->enable) {
+ dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
+ err = -ENXIO;
+ goto err_remap2;
+ }
+ aup->mac_id = pdev->id;
+
+ aup->macdma = ioremap(macdma->start, resource_size(macdma));
+ if (!aup->macdma) {
+ dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
+ err = -ENXIO;
+ goto err_remap3;
+ }
+
+ au1000_setup_hw_rings(aup, aup->macdma);
+
+ writel(0, aup->enable);
+ aup->mac_enabled = 0;
+
+ pd = dev_get_platdata(&pdev->dev);
+ if (!pd) {
+ dev_info(&pdev->dev, "no platform_data passed,"
+ " PHY search on MAC0\n");
+ aup->phy1_search_mac0 = 1;
+ } else {
+ if (is_valid_ether_addr(pd->mac)) {
+ memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
+ } else {
+ /* Set a random MAC since no valid provided by platform_data. */
+ eth_hw_addr_random(dev);
+ }
+
+ aup->phy_static_config = pd->phy_static_config;
+ aup->phy_search_highest_addr = pd->phy_search_highest_addr;
+ aup->phy1_search_mac0 = pd->phy1_search_mac0;
+ aup->phy_addr = pd->phy_addr;
+ aup->phy_busid = pd->phy_busid;
+ aup->phy_irq = pd->phy_irq;
+ }
+
+ if (aup->phy_busid > 0) {
+ dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
+ err = -ENODEV;
+ goto err_mdiobus_alloc;
+ }
+
+ aup->mii_bus = mdiobus_alloc();
+ if (aup->mii_bus == NULL) {
+ dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
+ err = -ENOMEM;
+ goto err_mdiobus_alloc;
+ }
+
+ aup->mii_bus->priv = dev;
+ aup->mii_bus->read = au1000_mdiobus_read;
+ aup->mii_bus->write = au1000_mdiobus_write;
+ aup->mii_bus->reset = au1000_mdiobus_reset;
+ aup->mii_bus->name = "au1000_eth_mii";
+ snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, aup->mac_id);
+
+ /* if known, set corresponding PHY IRQs */
+ if (aup->phy_static_config)
+ if (aup->phy_irq && aup->phy_busid == aup->mac_id)
+ aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
+
+ err = mdiobus_register(aup->mii_bus);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register MDIO bus\n");
+ goto err_mdiobus_reg;
+ }
+
+ err = au1000_mii_probe(dev);
+ if (err != 0)
+ goto err_out;
+
+ pDBfree = NULL;
+ /* setup the data buffer descriptors and attach a buffer to each one */
+ pDB = aup->db;
+ for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
+ pDB->pnext = pDBfree;
+ pDBfree = pDB;
+ pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
+ pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB++;
+ }
+ aup->pDBfree = pDBfree;
+
+ err = -ENODEV;
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ pDB = au1000_GetFreeDB(aup);
+ if (!pDB)
+ goto err_out;
+
+ aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->rx_db_inuse[i] = pDB;
+ }
+
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ pDB = au1000_GetFreeDB(aup);
+ if (!pDB)
+ goto err_out;
+
+ aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->tx_dma_ring[i]->len = 0;
+ aup->tx_db_inuse[i] = pDB;
+ }
+
+ dev->base_addr = base->start;
+ dev->irq = irq;
+ dev->netdev_ops = &au1000_netdev_ops;
+ dev->ethtool_ops = &au1000_ethtool_ops;
+ dev->watchdog_timeo = ETH_TX_TIMEOUT;
+
+ /*
+ * The boot code uses the ethernet controller, so reset it to start
+ * fresh. au1000_init() expects that the device is in reset state.
+ */
+ au1000_reset_mac(dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ netdev_err(dev, "Cannot register net device, aborting.\n");
+ goto err_out;
+ }
+
+ netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
+ (unsigned long)base->start, irq);
+
+ return 0;
+
+err_out:
+ if (aup->mii_bus != NULL)
+ mdiobus_unregister(aup->mii_bus);
+
+ /* here we should have a valid dev plus aup-> register addresses
+ * so we can reset the mac properly.
+ */
+ au1000_reset_mac(dev);
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ if (aup->rx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ if (aup->tx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
+ }
+err_mdiobus_reg:
+ mdiobus_free(aup->mii_bus);
+err_mdiobus_alloc:
+ iounmap(aup->macdma);
+err_remap3:
+ iounmap(aup->enable);
+err_remap2:
+ iounmap(aup->mac);
+err_remap1:
+ dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+err_vaddr:
+ free_netdev(dev);
+err_alloc:
+ release_mem_region(macdma->start, resource_size(macdma));
+err_macdma:
+ release_mem_region(macen->start, resource_size(macen));
+err_request:
+ release_mem_region(base->start, resource_size(base));
+out:
+ return err;
+}
+
+static int au1000_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct au1000_private *aup = netdev_priv(dev);
+ int i;
+ struct resource *base, *macen;
+
+ unregister_netdev(dev);
+ mdiobus_unregister(aup->mii_bus);
+ mdiobus_free(aup->mii_bus);
+
+ for (i = 0; i < NUM_RX_DMA; i++)
+ if (aup->rx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
+
+ for (i = 0; i < NUM_TX_DMA; i++)
+ if (aup->tx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
+
+ dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+
+ iounmap(aup->macdma);
+ iounmap(aup->mac);
+ iounmap(aup->enable);
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ release_mem_region(base->start, resource_size(base));
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(base->start, resource_size(base));
+
+ macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ release_mem_region(macen->start, resource_size(macen));
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver au1000_eth_driver = {
+ .probe = au1000_probe,
+ .remove = au1000_remove,
+ .driver = {
+ .name = "au1000-eth",
+ },
+};
+
+module_platform_driver(au1000_eth_driver);
+
+MODULE_ALIAS("platform:au1000-eth");
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
new file mode 100644
index 000000000..e3a3ed29d
--- /dev/null
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Alchemy Au1x00 ethernet driver include file
+ *
+ * Author: Pete Popov <ppopov@mvista.com>
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ */
+
+
+#define MAC_IOSIZE 0x10000
+#define NUM_RX_DMA 4 /* Au1x00 has 4 rx hardware descriptors */
+#define NUM_TX_DMA 4 /* Au1x00 has 4 tx hardware descriptors */
+
+#define NUM_RX_BUFFS 4
+#define NUM_TX_BUFFS 4
+#define MAX_BUF_SIZE 2048
+
+#define ETH_TX_TIMEOUT (HZ/4)
+#define MAC_MIN_PKT_SIZE 64
+
+#define MULTICAST_FILTER_LIMIT 64
+
+/*
+ * Data Buffer Descriptor. Data buffers must be aligned on 32 byte
+ * boundary for both, receive and transmit.
+ */
+struct db_dest {
+ struct db_dest *pnext;
+ u32 *vaddr;
+ dma_addr_t dma_addr;
+};
+
+/*
+ * The transmit and receive descriptors are memory
+ * mapped registers.
+ */
+struct tx_dma {
+ u32 status;
+ u32 buff_stat;
+ u32 len;
+ u32 pad;
+};
+
+struct rx_dma {
+ u32 status;
+ u32 buff_stat;
+ u32 pad[2];
+};
+
+
+/*
+ * MAC control registers, memory mapped.
+ */
+struct mac_reg {
+ u32 control;
+ u32 mac_addr_high;
+ u32 mac_addr_low;
+ u32 multi_hash_high;
+ u32 multi_hash_low;
+ u32 mii_control;
+ u32 mii_data;
+ u32 flow_control;
+ u32 vlan1_tag;
+ u32 vlan2_tag;
+};
+
+
+struct au1000_private {
+ struct db_dest *pDBfree;
+ struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS];
+ struct rx_dma *rx_dma_ring[NUM_RX_DMA];
+ struct tx_dma *tx_dma_ring[NUM_TX_DMA];
+ struct db_dest *rx_db_inuse[NUM_RX_DMA];
+ struct db_dest *tx_db_inuse[NUM_TX_DMA];
+ u32 rx_head;
+ u32 tx_head;
+ u32 tx_tail;
+ u32 tx_full;
+
+ int mac_id;
+
+ int mac_enabled; /* whether MAC is currently enabled and running
+ * (req. for mdio)
+ */
+
+ int old_link; /* used by au1000_adjust_link */
+ int old_speed;
+ int old_duplex;
+
+ struct mii_bus *mii_bus;
+
+ /* PHY configuration */
+ int phy_static_config;
+ int phy_search_highest_addr;
+ int phy1_search_mac0;
+
+ int phy_addr;
+ int phy_busid;
+ int phy_irq;
+
+ /* These variables are just for quick access
+ * to certain regs addresses.
+ */
+ struct mac_reg *mac; /* mac registers */
+ u32 *enable; /* address of MAC Enable Register */
+ void __iomem *macdma; /* base of MAC DMA port */
+ u32 vaddr; /* virtual address of rx/tx buffers */
+ dma_addr_t dma_addr; /* dma address of rx/tx buffers */
+
+ spinlock_t lock; /* Serialise access to device */
+
+ u32 msg_enable;
+};
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
new file mode 100644
index 000000000..7282ce55f
--- /dev/null
+++ b/drivers/net/ethernet/amd/declance.c
@@ -0,0 +1,1380 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Lance ethernet driver for the MIPS processor based
+ * DECstation family
+ *
+ *
+ * adopted from sunlance.c by Richard van den Berg
+ *
+ * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
+ *
+ * additional sources:
+ * - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
+ * Revision 1.2
+ *
+ * History:
+ *
+ * v0.001: The kernel accepts the code and it shows the hardware address.
+ *
+ * v0.002: Removed most sparc stuff, left only some module and dma stuff.
+ *
+ * v0.003: Enhanced base address calculation from proposals by
+ * Harald Koerfgen and Thomas Riemer.
+ *
+ * v0.004: lance-regs is pointing at the right addresses, added prom
+ * check. First start of address mapping and DMA.
+ *
+ * v0.005: started to play around with LANCE-DMA. This driver will not
+ * work for non IOASIC lances. HK
+ *
+ * v0.006: added pointer arrays to lance_private and setup routine for
+ * them in dec_lance_init. HK
+ *
+ * v0.007: Big shit. The LANCE seems to use a different DMA mechanism to
+ * access the init block. This looks like one (short) word at a
+ * time, but the smallest amount the IOASIC can transfer is a
+ * (long) word. So we have a 2-2 padding here. Changed
+ * lance_init_block accordingly. The 16-16 padding for the buffers
+ * seems to be correct. HK
+ *
+ * v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer
+ *
+ * v0.009: Module support fixes, multiple interfaces support, various
+ * bits. macro
+ *
+ * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
+ * PMAX requirement to only use halfword accesses to the
+ * buffer. macro
+ *
+ * v0.011: Converted the PMAD to the driver model. macro
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/tc.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+
+#include <asm/dec/interrupts.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/kn01.h>
+#include <asm/dec/machtype.h>
+#include <asm/dec/system.h>
+
+static const char version[] =
+"declance.c: v0.011 by Linux MIPS DECstation task force\n";
+
+MODULE_AUTHOR("Linux MIPS DECstation task force");
+MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
+MODULE_LICENSE("GPL");
+
+#define __unused __attribute__ ((unused))
+
+/*
+ * card types
+ */
+#define ASIC_LANCE 1
+#define PMAD_LANCE 2
+#define PMAX_LANCE 3
+
+
+#define LE_CSR0 0
+#define LE_CSR1 1
+#define LE_CSR2 2
+#define LE_CSR3 3
+
+#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
+
+#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
+#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
+#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
+#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
+#define LE_C0_MERR 0x0800 /* ME: Memory error */
+#define LE_C0_RINT 0x0400 /* Received interrupt */
+#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
+#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
+#define LE_C0_INTR 0x0080 /* Interrupt or error */
+#define LE_C0_INEA 0x0040 /* Interrupt enable */
+#define LE_C0_RXON 0x0020 /* Receiver on */
+#define LE_C0_TXON 0x0010 /* Transmitter on */
+#define LE_C0_TDMD 0x0008 /* Transmitter demand */
+#define LE_C0_STOP 0x0004 /* Stop the card */
+#define LE_C0_STRT 0x0002 /* Start the card */
+#define LE_C0_INIT 0x0001 /* Init the card */
+
+#define LE_C3_BSWP 0x4 /* SWAP */
+#define LE_C3_ACON 0x2 /* ALE Control */
+#define LE_C3_BCON 0x1 /* Byte control */
+
+/* Receive message descriptor 1 */
+#define LE_R1_OWN 0x8000 /* Who owns the entry */
+#define LE_R1_ERR 0x4000 /* Error: if FRA, OFL, CRC or BUF is set */
+#define LE_R1_FRA 0x2000 /* FRA: Frame error */
+#define LE_R1_OFL 0x1000 /* OFL: Frame overflow */
+#define LE_R1_CRC 0x0800 /* CRC error */
+#define LE_R1_BUF 0x0400 /* BUF: Buffer error */
+#define LE_R1_SOP 0x0200 /* Start of packet */
+#define LE_R1_EOP 0x0100 /* End of packet */
+#define LE_R1_POK 0x0300 /* Packet is complete: SOP + EOP */
+
+/* Transmit message descriptor 1 */
+#define LE_T1_OWN 0x8000 /* Lance owns the packet */
+#define LE_T1_ERR 0x4000 /* Error summary */
+#define LE_T1_EMORE 0x1000 /* Error: more than one retry needed */
+#define LE_T1_EONE 0x0800 /* Error: one retry needed */
+#define LE_T1_EDEF 0x0400 /* Error: deferred */
+#define LE_T1_SOP 0x0200 /* Start of packet */
+#define LE_T1_EOP 0x0100 /* End of packet */
+#define LE_T1_POK 0x0300 /* Packet is complete: SOP + EOP */
+
+#define LE_T3_BUF 0x8000 /* Buffer error */
+#define LE_T3_UFL 0x4000 /* Error underflow */
+#define LE_T3_LCOL 0x1000 /* Error late collision */
+#define LE_T3_CLOS 0x0800 /* Error carrier loss */
+#define LE_T3_RTY 0x0400 /* Error retry */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
+
+/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
+
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define PKT_BUF_SZ 1536
+#define RX_BUFF_SIZE PKT_BUF_SZ
+#define TX_BUFF_SIZE PKT_BUF_SZ
+
+#undef TEST_HITS
+#define ZERO 0
+
+/*
+ * The DS2100/3100 have a linear 64 kB buffer which supports halfword
+ * accesses only. Each halfword of the buffer is word-aligned in the
+ * CPU address space.
+ *
+ * The PMAD-AA has a 128 kB buffer on-board.
+ *
+ * The IOASIC LANCE devices use a shared memory region. This region
+ * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
+ * boundary. The LANCE sees this as a 64 kB long continuous memory
+ * region.
+ *
+ * The LANCE's DMA address is used as an index in this buffer and DMA
+ * takes place in bursts of eight 16-bit words which are packed into
+ * four 32-bit words by the IOASIC. This leads to a strange padding:
+ * 16 bytes of valid data followed by a 16 byte gap :-(.
+ */
+
+struct lance_rx_desc {
+ unsigned short rmd0; /* low address of packet */
+ unsigned short rmd1; /* high address of packet
+ and descriptor bits */
+ short length; /* 2s complement (negative!)
+ of buffer length */
+ unsigned short mblength; /* actual number of bytes received */
+};
+
+struct lance_tx_desc {
+ unsigned short tmd0; /* low address of packet */
+ unsigned short tmd1; /* high address of packet
+ and descriptor bits */
+ short length; /* 2s complement (negative!)
+ of buffer length */
+ unsigned short misc;
+};
+
+
+/* First part of the LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* pre-set mode (reg. 15) */
+
+ unsigned short phys_addr[3]; /* physical ethernet address */
+ unsigned short filter[4]; /* multicast filter */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ unsigned short rx_ptr; /* receive descriptor addr */
+ unsigned short rx_len; /* receive len and high addr */
+ unsigned short tx_ptr; /* transmit descriptor addr */
+ unsigned short tx_len; /* transmit len and high addr */
+
+ short gap[4];
+
+ /* The buffer descriptors */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+};
+
+#define BUF_OFFSET_CPU sizeof(struct lance_init_block)
+#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
+
+#define shift_off(off, type) \
+ (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
+
+#define lib_off(rt, type) \
+ shift_off(offsetof(struct lance_init_block, rt), type)
+
+#define lib_ptr(ib, rt, type) \
+ ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
+
+#define rds_off(rt, type) \
+ shift_off(offsetof(struct lance_rx_desc, rt), type)
+
+#define rds_ptr(rd, rt, type) \
+ ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
+
+#define tds_off(rt, type) \
+ shift_off(offsetof(struct lance_tx_desc, rt), type)
+
+#define tds_ptr(td, rt, type) \
+ ((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
+
+struct lance_private {
+ struct net_device *next;
+ int type;
+ int dma_irq;
+ volatile struct lance_regs *ll;
+
+ spinlock_t lock;
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ unsigned short busmaster_regval;
+
+ struct timer_list multicast_timer;
+ struct net_device *dev;
+
+ /* Pointers to the ring buffers as seen from the CPU */
+ char *rx_buf_ptr_cpu[RX_RING_SIZE];
+ char *tx_buf_ptr_cpu[TX_RING_SIZE];
+
+ /* Pointers to the ring buffers as seen from the LANCE */
+ uint rx_buf_ptr_lnc[RX_RING_SIZE];
+ uint tx_buf_ptr_lnc[TX_RING_SIZE];
+};
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+/* The lance control ports are at an absolute address, machine and tc-slot
+ * dependent.
+ * DECstations do only 32-bit access and the LANCE uses 16 bit addresses,
+ * so we have to give the structure an extra member making rap pointing
+ * at the right address
+ */
+struct lance_regs {
+ volatile unsigned short rdp; /* register data port */
+ unsigned short pad;
+ volatile unsigned short rap; /* register address port */
+};
+
+int dec_lance_debug = 2;
+
+static struct tc_driver dec_lance_tc_driver;
+static struct net_device *root_lance_dev;
+
+static inline void writereg(volatile unsigned short *regptr, short value)
+{
+ *regptr = value;
+ iob();
+}
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ uint leptr;
+
+ /* The address space as seen from the LANCE
+ * begins at address 0. HK
+ */
+ leptr = 0;
+
+ writereg(&ll->rap, LE_CSR1);
+ writereg(&ll->rdp, (leptr & 0xFFFF));
+ writereg(&ll->rap, LE_CSR2);
+ writereg(&ll->rdp, leptr >> 16);
+ writereg(&ll->rap, LE_CSR3);
+ writereg(&ll->rdp, lp->busmaster_regval);
+
+ /* Point back to csr0 */
+ writereg(&ll->rap, LE_CSR0);
+}
+
+/*
+ * Our specialized copy routines
+ *
+ */
+static void cp_to_buf(const int type, void *to, const void *from, int len)
+{
+ unsigned short *tp;
+ const unsigned short *fp;
+ unsigned short clen;
+ unsigned char *rtp;
+ const unsigned char *rfp;
+
+ if (type == PMAD_LANCE) {
+ memcpy(to, from, len);
+ } else if (type == PMAX_LANCE) {
+ clen = len >> 1;
+ tp = to;
+ fp = from;
+
+ while (clen--) {
+ *tp++ = *fp++;
+ tp++;
+ }
+
+ clen = len & 1;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ } else {
+ /*
+ * copy 16 Byte chunks
+ */
+ clen = len >> 4;
+ tp = to;
+ fp = from;
+ while (clen--) {
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ tp += 8;
+ }
+
+ /*
+ * do the rest, if any.
+ */
+ clen = len & 15;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ }
+
+ iob();
+}
+
+static void cp_from_buf(const int type, void *to, const void *from, int len)
+{
+ unsigned short *tp;
+ const unsigned short *fp;
+ unsigned short clen;
+ unsigned char *rtp;
+ const unsigned char *rfp;
+
+ if (type == PMAD_LANCE) {
+ memcpy(to, from, len);
+ } else if (type == PMAX_LANCE) {
+ clen = len >> 1;
+ tp = to;
+ fp = from;
+ while (clen--) {
+ *tp++ = *fp++;
+ fp++;
+ }
+
+ clen = len & 1;
+
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
+
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ } else {
+
+ /*
+ * copy 16 Byte chunks
+ */
+ clen = len >> 4;
+ tp = to;
+ fp = from;
+ while (clen--) {
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ fp += 8;
+ }
+
+ /*
+ * do the rest, if any.
+ */
+ clen = len & 15;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+
+
+ }
+
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ uint leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block.
+ * XXX bit 0 of the physical address registers has to be zero
+ */
+ *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
+ dev->dev_addr[0];
+ *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
+ dev->dev_addr[2];
+ *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
+ dev->dev_addr[4];
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = offsetof(struct lance_init_block, brx_ring);
+ *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
+ (leptr >> 16);
+ *lib_ptr(ib, rx_ptr, lp->type) = leptr;
+ if (ZERO)
+ printk("RX ptr: %8.8x(%8.8x)\n",
+ leptr, (uint)lib_off(brx_ring, lp->type));
+
+ /* Setup tx descriptor pointer */
+ leptr = offsetof(struct lance_init_block, btx_ring);
+ *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
+ (leptr >> 16);
+ *lib_ptr(ib, tx_ptr, lp->type) = leptr;
+ if (ZERO)
+ printk("TX ptr: %8.8x(%8.8x)\n",
+ leptr, (uint)lib_off(btx_ring, lp->type));
+
+ if (ZERO)
+ printk("TX rings:\n");
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ leptr = lp->tx_buf_ptr_lnc[i];
+ *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
+ *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
+ 0xff;
+ *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
+ /* The ones required by tmd2 */
+ *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
+ if (i < 3 && ZERO)
+ printk("%d: %8.8x(%p)\n",
+ i, leptr, lp->tx_buf_ptr_cpu[i]);
+ }
+
+ /* Setup the Rx ring entries */
+ if (ZERO)
+ printk("RX rings:\n");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = lp->rx_buf_ptr_lnc[i];
+ *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
+ *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
+ 0xff) |
+ LE_R1_OWN;
+ *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
+ 0xf000;
+ *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
+ if (i < 3 && ZERO)
+ printk("%d: %8.8x(%p)\n",
+ i, leptr, lp->rx_buf_ptr_cpu[i]);
+ }
+ iob();
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ int i;
+
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_INIT);
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {
+ udelay(10);
+ }
+ if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, ll->rdp);
+ return -1;
+ }
+ if ((ll->rdp & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, ll->rdp);
+ return -1;
+ }
+ writereg(&ll->rdp, LE_C0_IDON);
+ writereg(&ll->rdp, LE_C0_STRT);
+ writereg(&ll->rdp, LE_C0_INEA);
+
+ return 0;
+}
+
+static int lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ volatile u16 *rd;
+ unsigned short bits;
+ int entry, len;
+ struct sk_buff *skb;
+
+#ifdef TEST_HITS
+ {
+ int i;
+
+ printk("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
+ lp->type) &
+ LE_R1_OWN ? "_" : "X");
+ else
+ printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
+ lp->type) &
+ LE_R1_OWN ? "." : "1");
+ }
+ printk("]");
+ }
+#endif
+
+ for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
+ !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
+ rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
+ entry = lp->rx_new;
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
+ len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
+ skb = netdev_alloc_skb(dev, len + 2);
+
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ *rds_ptr(rd, mblength, lp->type) = 0;
+ *rds_ptr(rd, rmd1, lp->type) =
+ ((lp->rx_buf_ptr_lnc[entry] >> 16) &
+ 0xff) | LE_R1_OWN;
+ lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
+ return 0;
+ }
+ dev->stats.rx_bytes += len;
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+
+ cp_from_buf(lp->type, skb->data,
+ lp->rx_buf_ptr_cpu[entry], len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ *rds_ptr(rd, mblength, lp->type) = 0;
+ *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
+ *rds_ptr(rd, rmd1, lp->type) =
+ ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
+ lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
+ }
+ return 0;
+}
+
+static void lance_tx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile u16 *td;
+ int i, j;
+ int status;
+
+ j = lp->tx_old;
+
+ spin_lock(&lp->lock);
+
+ for (i = j; i != lp->tx_new; i = j) {
+ td = lib_ptr(ib, btx_ring[i], lp->type);
+ /* If we hit a packet not owned by us, stop */
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
+ break;
+
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
+ status = *tds_ptr(td, misc, lp->type);
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ printk("%s: Carrier Lost\n", dev->name);
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF | LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
+ LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+ j = (j + 1) & TX_RING_MOD_MASK;
+ }
+ lp->tx_old = j;
+out:
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+
+ spin_unlock(&lp->lock);
+}
+
+static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+
+ printk(KERN_ERR "%s: DMA error\n", dev->name);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int csr0;
+
+ writereg(&ll->rap, LE_CSR0);
+ csr0 = ll->rdp;
+
+ /* Acknowledge all the interrupt sources ASAP */
+ writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
+ LE_C0_CERR | LE_C0_MERR);
+ }
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++;
+
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++;
+
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Memory error, status %04x\n", dev->name, csr0);
+
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+ }
+
+ writereg(&ll->rdp, LE_C0_INEA);
+ writereg(&ll->rdp, LE_C0_INEA);
+ return IRQ_HANDLED;
+}
+
+static int lance_open(struct net_device *dev)
+{
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status = 0;
+
+ /* Stop the Lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ /* Set mode and clear multicast filter only at device open,
+ * so that lance_init_ring() called at any error will not
+ * forget multicast filters.
+ *
+ * BTW it is common bug in all lance drivers! --ANK
+ */
+ *lib_ptr(ib, mode, lp->type) = 0;
+ *lib_ptr(ib, filter[0], lp->type) = 0;
+ *lib_ptr(ib, filter[1], lp->type) = 0;
+ *lib_ptr(ib, filter[2], lp->type) = 0;
+ *lib_ptr(ib, filter[3], lp->type) = 0;
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+
+ netif_start_queue(dev);
+
+ /* Associate IRQ with lance_interrupt */
+ if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
+ printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ if (lp->dma_irq >= 0) {
+ unsigned long flags;
+
+ if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT,
+ "lance error", dev)) {
+ free_irq(dev->irq, dev);
+ printk("%s: Can't get DMA IRQ %d\n", dev->name,
+ lp->dma_irq);
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&ioasic_ssr_lock, flags);
+
+ fast_mb();
+ /* Enable I/O ASIC LANCE DMA. */
+ ioasic_write(IO_REG_SSR,
+ ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);
+
+ fast_mb();
+ spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
+ }
+
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ /* Stop the card */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ if (lp->dma_irq >= 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioasic_ssr_lock, flags);
+
+ fast_mb();
+ /* Disable I/O ASIC LANCE DMA. */
+ ioasic_write(IO_REG_SSR,
+ ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);
+
+ fast_iob();
+ spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
+
+ free_irq(lp->dma_irq, dev);
+ }
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static inline int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status;
+
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+ netif_trans_update(dev); /* prevent tx timeout */
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
+ dev->name, ll->rdp);
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ unsigned long flags;
+ int entry, len;
+
+ len = skb->len;
+
+ if (len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ len = ETH_ZLEN;
+ }
+
+ dev->stats.tx_bytes += len;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ entry = lp->tx_new;
+ *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
+ *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
+
+ cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
+
+ /* Now, give the packet to the lance */
+ *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
+ ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
+ (LE_T1_POK | LE_T1_OWN);
+ lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
+
+ if (TX_BUFFS_AVAIL <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ *lib_ptr(ib, filter[0], lp->type) = 0xffff;
+ *lib_ptr(ib, filter[1], lp->type) = 0xffff;
+ *lib_ptr(ib, filter[2], lp->type) = 0xffff;
+ *lib_ptr(ib, filter[3], lp->type) = 0xffff;
+ return;
+ }
+ /* clear the multicast filter */
+ *lib_ptr(ib, filter[0], lp->type) = 0;
+ *lib_ptr(ib, filter[1], lp->type) = 0;
+ *lib_ptr(ib, filter[2], lp->type) = 0;
+ *lib_ptr(ib, filter[3], lp->type) = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ crc = crc >> 26;
+ *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
+ }
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ volatile struct lance_regs *ll = lp->ll;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
+ } else {
+ *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static void lance_set_multicast_retry(struct timer_list *t)
+{
+ struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct net_device *dev = lp->dev;
+
+ lance_set_multicast(dev);
+}
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int dec_lance_probe(struct device *bdev, const int type)
+{
+ static unsigned version_printed;
+ static const char fmt[] = "declance%d";
+ char name[10];
+ struct net_device *dev;
+ struct lance_private *lp;
+ volatile struct lance_regs *ll;
+ resource_size_t start = 0, len = 0;
+ int i, ret;
+ unsigned long esar_base;
+ unsigned char *esar;
+ const char *desc;
+
+ if (dec_lance_debug && version_printed++ == 0)
+ printk(version);
+
+ if (bdev)
+ snprintf(name, sizeof(name), "%s", dev_name(bdev));
+ else {
+ i = 0;
+ dev = root_lance_dev;
+ while (dev) {
+ i++;
+ lp = netdev_priv(dev);
+ dev = lp->next;
+ }
+ snprintf(name, sizeof(name), fmt, i);
+ }
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /*
+ * alloc_etherdev ensures the data structures used by the LANCE
+ * are aligned.
+ */
+ lp = netdev_priv(dev);
+ spin_lock_init(&lp->lock);
+
+ lp->type = type;
+ switch (type) {
+ case ASIC_LANCE:
+ dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
+
+ /* buffer space for the on-board LANCE shared memory */
+ /*
+ * FIXME: ugly hack!
+ */
+ dev->mem_start = CKSEG1ADDR(0x00020000);
+ dev->mem_end = dev->mem_start + 0x00020000;
+ dev->irq = dec_interrupt[DEC_IRQ_LANCE];
+ esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
+
+ /* Workaround crash with booting KN04 2.1k from Disk */
+ memset((void *)dev->mem_start, 0,
+ dev->mem_end - dev->mem_start);
+
+ /*
+ * setup the pointer arrays, this sucks [tm] :-(
+ */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * RX_RING_SIZE * RX_BUFF_SIZE +
+ 2 * i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ /* Setup I/O ASIC LANCE DMA. */
+ lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
+ ioasic_write(IO_REG_LANCE_DMA_P,
+ CPHYSADDR(dev->mem_start) << 3);
+
+ break;
+#ifdef CONFIG_TC
+ case PMAD_LANCE:
+ dev_set_drvdata(bdev, dev);
+
+ start = to_tc_dev(bdev)->resource.start;
+ len = to_tc_dev(bdev)->resource.end - start + 1;
+ if (!request_mem_region(start, len, dev_name(bdev))) {
+ printk(KERN_ERR
+ "%s: Unable to reserve MMIO resource\n",
+ dev_name(bdev));
+ ret = -EBUSY;
+ goto err_out_dev;
+ }
+
+ dev->mem_start = CKSEG1ADDR(start);
+ dev->mem_end = dev->mem_start + 0x100000;
+ dev->base_addr = dev->mem_start + 0x100000;
+ dev->irq = to_tc_dev(bdev)->interrupt;
+ esar_base = dev->mem_start + 0x1c0002;
+ lp->dma_irq = -1;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ break;
+#endif
+ case PMAX_LANCE:
+ dev->irq = dec_interrupt[DEC_IRQ_LANCE];
+ dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
+ dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
+ dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
+ esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
+ lp->dma_irq = -1;
+
+ /*
+ * setup the pointer arrays, this sucks [tm] :-(
+ */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * RX_RING_SIZE * RX_BUFF_SIZE +
+ 2 * i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ break;
+
+ default:
+ printk(KERN_ERR "%s: declance_init called with unknown type\n",
+ name);
+ ret = -ENODEV;
+ goto err_out_dev;
+ }
+
+ ll = (struct lance_regs *) dev->base_addr;
+ esar = (unsigned char *) esar_base;
+
+ /* prom checks */
+ /* First, check for test pattern */
+ if (esar[0x60] != 0xff && esar[0x64] != 0x00 &&
+ esar[0x68] != 0x55 && esar[0x6c] != 0xaa) {
+ printk(KERN_ERR
+ "%s: Ethernet station address prom not found!\n",
+ name);
+ ret = -ENODEV;
+ goto err_out_resource;
+ }
+ /* Check the prom contents */
+ for (i = 0; i < 8; i++) {
+ if (esar[i * 4] != esar[0x3c - i * 4] &&
+ esar[i * 4] != esar[0x40 + i * 4] &&
+ esar[0x3c - i * 4] != esar[0x40 + i * 4]) {
+ printk(KERN_ERR "%s: Something is wrong with the "
+ "ethernet station address prom!\n", name);
+ ret = -ENODEV;
+ goto err_out_resource;
+ }
+ }
+
+ /* Copy the ethernet address to the device structure, later to the
+ * lance initialization block so the lance gets it every time it's
+ * (re)initialized.
+ */
+ switch (type) {
+ case ASIC_LANCE:
+ desc = "IOASIC onboard LANCE";
+ break;
+ case PMAD_LANCE:
+ desc = "PMAD-AA";
+ break;
+ case PMAX_LANCE:
+ desc = "PMAX onboard LANCE";
+ break;
+ }
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = esar[i * 4];
+
+ printk("%s: %s, addr = %pM, irq = %d\n",
+ name, desc, dev->dev_addr, dev->irq);
+
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->watchdog_timeo = 5*HZ;
+
+ /* lp->ll is the location of the registers for lance card */
+ lp->ll = ll;
+
+ /* busmaster_regval (CSR3) should be zero according to the PMAD-AA
+ * specification.
+ */
+ lp->busmaster_regval = 0;
+
+ dev->dma = 0;
+
+ /* We cannot sleep if the chip is busy during a
+ * multicast list update event, because such events
+ * can occur from interrupts (ex. IPv6). So we
+ * use a timer to try again later when necessary. -DaveM
+ */
+ lp->dev = dev;
+ timer_setup(&lp->multicast_timer, lance_set_multicast_retry, 0);
+
+
+ ret = register_netdev(dev);
+ if (ret) {
+ printk(KERN_ERR
+ "%s: Unable to register netdev, aborting.\n", name);
+ goto err_out_resource;
+ }
+
+ if (!bdev) {
+ lp->next = root_lance_dev;
+ root_lance_dev = dev;
+ }
+
+ printk("%s: registered as %s.\n", name, dev->name);
+ return 0;
+
+err_out_resource:
+ if (bdev)
+ release_mem_region(start, len);
+
+err_out_dev:
+ free_netdev(dev);
+
+err_out:
+ return ret;
+}
+
+/* Find all the lance cards on the system and initialize them */
+static int __init dec_lance_platform_probe(void)
+{
+ int count = 0;
+
+ if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
+ if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
+ if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
+ count++;
+ } else if (!TURBOCHANNEL) {
+ if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
+ count++;
+ }
+ }
+
+ return (count > 0) ? 0 : -ENODEV;
+}
+
+static void __exit dec_lance_platform_remove(void)
+{
+ while (root_lance_dev) {
+ struct net_device *dev = root_lance_dev;
+ struct lance_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ root_lance_dev = lp->next;
+ free_netdev(dev);
+ }
+}
+
+#ifdef CONFIG_TC
+static int dec_lance_tc_probe(struct device *dev);
+static int dec_lance_tc_remove(struct device *dev);
+
+static const struct tc_device_id dec_lance_tc_table[] = {
+ { "DEC ", "PMAD-AA " },
+ { }
+};
+MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
+
+static struct tc_driver dec_lance_tc_driver = {
+ .id_table = dec_lance_tc_table,
+ .driver = {
+ .name = "declance",
+ .bus = &tc_bus_type,
+ .probe = dec_lance_tc_probe,
+ .remove = dec_lance_tc_remove,
+ },
+};
+
+static int dec_lance_tc_probe(struct device *dev)
+{
+ int status = dec_lance_probe(dev, PMAD_LANCE);
+ if (!status)
+ get_device(dev);
+ return status;
+}
+
+static void dec_lance_remove(struct device *bdev)
+{
+ struct net_device *dev = dev_get_drvdata(bdev);
+ resource_size_t start, len;
+
+ unregister_netdev(dev);
+ start = to_tc_dev(bdev)->resource.start;
+ len = to_tc_dev(bdev)->resource.end - start + 1;
+ release_mem_region(start, len);
+ free_netdev(dev);
+}
+
+static int dec_lance_tc_remove(struct device *dev)
+{
+ put_device(dev);
+ dec_lance_remove(dev);
+ return 0;
+}
+#endif
+
+static int __init dec_lance_init(void)
+{
+ int status;
+
+ status = tc_register_driver(&dec_lance_tc_driver);
+ if (!status)
+ dec_lance_platform_probe();
+ return status;
+}
+
+static void __exit dec_lance_exit(void)
+{
+ dec_lance_platform_remove();
+ tc_unregister_driver(&dec_lance_tc_driver);
+}
+
+
+module_init(dec_lance_init);
+module_exit(dec_lance_exit);
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
new file mode 100644
index 000000000..e10aceb2b
--- /dev/null
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* hplance.c : the Linux/hp300/lance ethernet driver
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ * Uses the generic 7990.c LANCE code.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pgtable.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/route.h>
+#include <linux/dio.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+
+#include "hplance.h"
+
+/* We have 16392 bytes of RAM for the init block and buffers. This places
+ * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
+ */
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+
+#include "7990.h" /* use generic LANCE code */
+
+/* Our private data structure */
+struct hplance_private {
+ struct lance_private lance;
+};
+
+/* function prototypes... This is easy because all the grot is in the
+ * generic LANCE support. All we have to support is probing for boards,
+ * plus board-specific init, open and close actions.
+ * Oh, and we need to tell the generic code how to read and write LANCE registers...
+ */
+static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent);
+static void hplance_init(struct net_device *dev, struct dio_dev *d);
+static void hplance_remove_one(struct dio_dev *d);
+static void hplance_writerap(void *priv, unsigned short value);
+static void hplance_writerdp(void *priv, unsigned short value);
+static unsigned short hplance_readrdp(void *priv);
+static int hplance_open(struct net_device *dev);
+static int hplance_close(struct net_device *dev);
+
+static struct dio_device_id hplance_dio_tbl[] = {
+ { DIO_ID_LAN },
+ { 0 }
+};
+
+static struct dio_driver hplance_driver = {
+ .name = "hplance",
+ .id_table = hplance_dio_tbl,
+ .probe = hplance_init_one,
+ .remove = hplance_remove_one,
+};
+
+static const struct net_device_ops hplance_netdev_ops = {
+ .ndo_open = hplance_open,
+ .ndo_stop = hplance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = lance_poll,
+#endif
+};
+
+/* Find all the HP Lance boards and initialise them... */
+static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent)
+{
+ struct net_device *dev;
+ int err = -ENOMEM;
+
+ dev = alloc_etherdev(sizeof(struct hplance_private));
+ if (!dev)
+ goto out;
+
+ err = -EBUSY;
+ if (!request_mem_region(dio_resource_start(d),
+ dio_resource_len(d), d->name))
+ goto out_free_netdev;
+
+ hplance_init(dev, d);
+ err = register_netdev(dev);
+ if (err)
+ goto out_release_mem_region;
+
+ dio_set_drvdata(d, dev);
+
+ printk(KERN_INFO "%s: %s; select code %d, addr %pM, irq %d\n",
+ dev->name, d->name, d->scode, dev->dev_addr, d->ipl);
+
+ return 0;
+
+ out_release_mem_region:
+ release_mem_region(dio_resource_start(d), dio_resource_len(d));
+ out_free_netdev:
+ free_netdev(dev);
+ out:
+ return err;
+}
+
+static void hplance_remove_one(struct dio_dev *d)
+{
+ struct net_device *dev = dio_get_drvdata(d);
+
+ unregister_netdev(dev);
+ release_mem_region(dio_resource_start(d), dio_resource_len(d));
+ free_netdev(dev);
+}
+
+/* Initialise a single lance board at the given DIO device */
+static void hplance_init(struct net_device *dev, struct dio_dev *d)
+{
+ unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
+ struct hplance_private *lp;
+ int i;
+
+ /* reset the board */
+ out_8(va + DIO_IDOFF, 0xff);
+ udelay(100); /* ariba! ariba! udelay! udelay! */
+
+ /* Fill the dev fields */
+ dev->base_addr = va;
+ dev->netdev_ops = &hplance_netdev_ops;
+ dev->dma = 0;
+
+ for (i = 0; i < 6; i++) {
+ /* The NVRAM holds our ethernet address, one nibble per byte,
+ * at bytes NVRAMOFF+1,3,5,7,9...
+ */
+ dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
+ }
+
+ lp = netdev_priv(dev);
+ lp->lance.name = d->name;
+ lp->lance.base = va;
+ lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
+ lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = d->ipl;
+ lp->lance.writerap = hplance_writerap;
+ lp->lance.writerdp = hplance_writerdp;
+ lp->lance.readrdp = hplance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+}
+
+/* This is disgusting. We have to check the DIO status register for ack every
+ * time we read or write the LANCE registers.
+ */
+static void hplance_writerap(void *priv, unsigned short value)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static void hplance_writerdp(void *priv, unsigned short value)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static unsigned short hplance_readrdp(void *priv)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ __u16 value;
+ do {
+ value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+ return value;
+}
+
+static int hplance_open(struct net_device *dev)
+{
+ int status;
+ struct lance_private *lp = netdev_priv(dev);
+
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ out_8(lp->base + HPLANCE_STATUS, LE_IE);
+
+ return 0;
+}
+
+static int hplance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
+ lance_close(dev);
+ return 0;
+}
+
+static int __init hplance_init_module(void)
+{
+ return dio_register_driver(&hplance_driver);
+}
+
+static void __exit hplance_cleanup_module(void)
+{
+ dio_unregister_driver(&hplance_driver);
+}
+
+module_init(hplance_init_module);
+module_exit(hplance_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/hplance.h b/drivers/net/ethernet/amd/hplance.h
new file mode 100644
index 000000000..bc845a2c6
--- /dev/null
+++ b/drivers/net/ethernet/amd/hplance.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Random defines and structures for the HP Lance driver.
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ */
+
+/* Registers */
+#define HPLANCE_ID 0x01 /* DIO register: ID byte */
+#define HPLANCE_STATUS 0x03 /* DIO register: interrupt enable/status */
+
+/* Control and status bits for the status register */
+#define LE_IE 0x80 /* interrupt enable */
+#define LE_IR 0x40 /* interrupt requested */
+#define LE_LOCK 0x08 /* lock status register */
+#define LE_ACK 0x04 /* ack of lock */
+#define LE_JAB 0x02 /* loss of tx clock (???) */
+/* We can also extract the IPL from the status register with the standard
+ * DIO_IPL(hplance) macro, or using dio_scodetoipl()
+ */
+
+/* These are the offsets for the DIO regs (hplance_reg), lance_ioreg,
+ * memory and NVRAM:
+ */
+#define HPLANCE_IDOFF 0 /* board baseaddr */
+#define HPLANCE_REGOFF 0x4000 /* lance registers */
+#define HPLANCE_MEMOFF 0x8000 /* struct lance_init_block */
+#define HPLANCE_NVRAMOFF 0xC008 /* etheraddress as one *nibble* per byte */
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
new file mode 100644
index 000000000..9dae225b7
--- /dev/null
+++ b/drivers/net/ethernet/amd/lance.c
@@ -0,0 +1,1311 @@
+/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
+/*
+ Written/copyright 1993-1998 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
+ with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Andrey V. Savochkin:
+ - alignment problem with 1.3.* kernel and some minor changes.
+ Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
+ - added support for Linux/Alpha, but removed most of it, because
+ it worked only for the PCI chip.
+ - added hook for the 32bit lance driver
+ - added PCnetPCI II (79C970A) to chip table
+ Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
+ - hopefully fix above so Linux/Alpha can use ISA cards too.
+ 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
+ v1.12 10/27/97 Module support -djb
+ v1.14 2/3/98 Module support modified, made PCI support optional -djb
+ v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
+ before unregister_netdev() which caused NULL pointer
+ reference later in the chain (in rtnetlink_fill_ifinfo())
+ -- Mika Kuoppala <miku@iki.fi>
+
+ Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
+ the 2.1 version of the old driver - Alan Cox
+
+ Get rid of check_region, check kmalloc return in lance_probe1
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
+
+ Reworked detection, added support for Racal InterLan EtherBlaster cards
+ Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
+*/
+
+static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
+static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
+static int __init do_lance_probe(struct net_device *dev);
+
+
+static struct card {
+ char id_offset14;
+ char id_offset15;
+} cards[] = {
+ { //"normal"
+ .id_offset14 = 0x57,
+ .id_offset15 = 0x57,
+ },
+ { //NI6510EB
+ .id_offset14 = 0x52,
+ .id_offset15 = 0x44,
+ },
+ { //Racal InterLan EtherBlaster
+ .id_offset14 = 0x52,
+ .id_offset15 = 0x49,
+ },
+};
+#define NUM_CARDS 3
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the AMD 79C960, the "PCnet-ISA
+single-chip ethernet controller for ISA". This chip is used in a wide
+variety of boards from vendors such as Allied Telesis, HP, Kingston,
+and Boca. This driver is also intended to work with older AMD 7990
+designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
+I use the name LANCE to refer to all of the AMD chips, even though it properly
+refers only to the original 7990.
+
+II. Board-specific settings
+
+The driver is designed to work the boards that use the faster
+bus-master mode, rather than in shared memory mode. (Only older designs
+have on-board buffer memory needed to support the slower shared memory mode.)
+
+Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
+channel. This driver probes the likely base addresses:
+{0x300, 0x320, 0x340, 0x360}.
+After the board is found it generates a DMA-timeout interrupt and uses
+autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
+of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
+probed for by enabling each free DMA channel in turn and checking if
+initialization succeeds.
+
+The HP-J2405A board is an exception: with this board it is easy to read the
+EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
+_know_ the base address -- that field is for writing the EEPROM.)
+
+III. Driver operation
+
+IIIa. Ring buffers
+The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
+the base and length of the data buffer, along with status bits. The length
+of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
+the buffer length (rather than being directly the buffer length) for
+implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
+ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
+needlessly uses extra space and reduces the chance that an upper layer will
+be able to reorder queued Tx packets based on priority. Decreasing the number
+of entries makes it more difficult to achieve back-to-back packet transmission
+and increases the chance that Rx ring will overflow. (Consider the worst case
+of receiving back-to-back minimum-sized packets.)
+
+The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
+statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
+avoid the administrative overhead. For the Rx side this avoids dynamically
+allocating full-sized buffers "just in case", at the expense of a
+memory-to-memory data copy for each packet received. For most systems this
+is a good tradeoff: the Rx buffer will always be in low memory, the copy
+is inexpensive, and it primes the cache for later packet processing. For Tx
+the buffers are only used when needed as low-memory bounce buffers.
+
+IIIB. 16M memory limitations.
+For the ISA bus master mode all structures used directly by the LANCE,
+the initialization block, Rx and Tx rings, and data buffers, must be
+accessible from the ISA bus, i.e. in the lower 16M of real memory.
+This is a problem for current Linux kernels on >16M machines. The network
+devices are initialized after memory initialization, and the kernel doles out
+memory from the top of memory downward. The current solution is to have a
+special network initialization routine that's called before memory
+initialization; this will eventually be generalized for all network devices.
+As mentioned before, low-memory "bounce-buffers" are used when needed.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+*/
+
+/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
+ That translates to 4 and 4 (16 == 2^^4).
+ This is a compile-time option for efficiency.
+ */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define LANCE_DATA 0x10
+#define LANCE_ADDR 0x12
+#define LANCE_RESET 0x14
+#define LANCE_BUS_IF 0x16
+#define LANCE_TOTAL_SIZE 0x18
+
+#define TX_TIMEOUT (HZ/5)
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ s32 base;
+ s16 buf_length; /* This length is 2s complement (negative)! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ s32 base;
+ s16 length; /* Length is 2s complement (negative)! */
+ s16 misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ u16 mode; /* Pre-set mode (reg. 15) */
+ u8 phys_addr[6]; /* Physical ethernet address */
+ u32 filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring; /* Tx and Rx ring base pointers */
+ u32 tx_ring;
+};
+
+struct lance_private {
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
+ struct lance_rx_head rx_ring[RX_RING_SIZE];
+ struct lance_tx_head tx_ring[TX_RING_SIZE];
+ struct lance_init_block init_block;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
+ /* Tx low-memory "bounce buffer" address. */
+ char (*tx_bounce_buffs)[PKT_BUF_SZ];
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ int dma;
+ unsigned char chip_version; /* See lance_chip_type. */
+ spinlock_t devlock;
+};
+
+#define LANCE_MUST_PAD 0x00000001
+#define LANCE_ENABLE_AUTOSELECT 0x00000002
+#define LANCE_MUST_REINIT_RING 0x00000004
+#define LANCE_MUST_UNRESET 0x00000008
+#define LANCE_HAS_MISSED_FRAME 0x00000010
+
+/* A mapping from the chip ID number to the part number and features.
+ These are from the datasheets -- in real life the '970 version
+ reportedly has the same ID as the '965. */
+static struct lance_chip_type {
+ int id_number;
+ const char *name;
+ int flags;
+} chip_table[] = {
+ {0x0000, "LANCE 7990", /* Ancient lance chip. */
+ LANCE_MUST_PAD + LANCE_MUST_UNRESET},
+ {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
+ it the PCnet32. */
+ {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x0, "PCnet (unknown)",
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+};
+
+enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
+
+
+/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
+ Assume yes until we know the memory size. */
+static unsigned char lance_need_isa_bounce_buffers = 1;
+
+static int lance_open(struct net_device *dev);
+static void lance_init_ring(struct net_device *dev, gfp_t mode);
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int lance_rx(struct net_device *dev);
+static irqreturn_t lance_interrupt(int irq, void *dev_id);
+static int lance_close(struct net_device *dev);
+static struct net_device_stats *lance_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
+
+
+
+#ifdef MODULE
+#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
+
+static struct net_device *dev_lance[MAX_CARDS];
+static int io[MAX_CARDS];
+static int dma[MAX_CARDS];
+static int irq[MAX_CARDS];
+
+module_param_hw_array(io, int, ioport, NULL, 0);
+module_param_hw_array(dma, int, dma, NULL, 0);
+module_param_hw_array(irq, int, irq, NULL, 0);
+module_param(lance_debug, int, 0);
+MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
+MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
+MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
+MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
+
+int __init init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) /* only complain once */
+ break;
+ printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
+ return -EPERM;
+ }
+ dev = alloc_etherdev(0);
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->dma = dma[this_dev];
+ if (do_lance_probe(dev) == 0) {
+ dev_lance[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ break;
+ }
+ if (found != 0)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ if (dev->dma != 4)
+ free_dma(dev->dma);
+ release_region(dev->base_addr, LANCE_TOTAL_SIZE);
+ kfree(lp->tx_bounce_buffs);
+ kfree((void*)lp->rx_buffs);
+ kfree(lp);
+}
+
+void __exit cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ struct net_device *dev = dev_lance[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
+
+
+/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
+ board probes now that kmalloc() can allocate ISA DMA-able regions.
+ This also allows the LANCE driver to be used as a module.
+ */
+static int __init do_lance_probe(struct net_device *dev)
+{
+ unsigned int *port;
+ int result;
+
+ if (high_memory <= phys_to_virt(16*1024*1024))
+ lance_need_isa_bounce_buffers = 0;
+
+ for (port = lance_portlist; *port; port++) {
+ int ioaddr = *port;
+ struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
+ "lance-probe");
+
+ if (r) {
+ /* Detect the card with minimal I/O reads */
+ char offset14 = inb(ioaddr + 14);
+ int card;
+ for (card = 0; card < NUM_CARDS; ++card)
+ if (cards[card].id_offset14 == offset14)
+ break;
+ if (card < NUM_CARDS) {/*yes, the first byte matches*/
+ char offset15 = inb(ioaddr + 15);
+ for (card = 0; card < NUM_CARDS; ++card)
+ if ((cards[card].id_offset14 == offset14) &&
+ (cards[card].id_offset15 == offset15))
+ break;
+ }
+ if (card < NUM_CARDS) { /*Signature OK*/
+ result = lance_probe1(dev, ioaddr, 0, 0);
+ if (!result) {
+ struct lance_private *lp = dev->ml_priv;
+ int ver = lp->chip_version;
+
+ r->name = chip_table[ver].name;
+ return 0;
+ }
+ }
+ release_region(ioaddr, LANCE_TOTAL_SIZE);
+ }
+ }
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init lance_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(0);
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_lance_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_stop = lance_close,
+ .ndo_get_stats = lance_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
+{
+ struct lance_private *lp;
+ unsigned long dma_channels; /* Mark spuriously-busy DMA channels */
+ int i, reset_val, lance_version;
+ const char *chipname;
+ /* Flags for specific chips or boards. */
+ unsigned char hpJ2405A = 0; /* HP ISA adaptor */
+ int hp_builtin = 0; /* HP on-board ethernet. */
+ static int did_version; /* Already printed version info. */
+ unsigned long flags;
+ int err = -ENOMEM;
+ void __iomem *bios;
+
+ /* First we look for special cases.
+ Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
+ There are two HP versions, check the BIOS for the configuration port.
+ This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
+ */
+ bios = ioremap(0xf00f0, 0x14);
+ if (!bios)
+ return -ENOMEM;
+ if (readw(bios + 0x12) == 0x5048) {
+ static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
+ int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99;
+ /* We can have boards other than the built-in! Verify this is on-board. */
+ if ((inb(hp_port) & 0xc0) == 0x80 &&
+ ioaddr_table[inb(hp_port) & 3] == ioaddr)
+ hp_builtin = hp_port;
+ }
+ iounmap(bios);
+ /* We also recognize the HP Vectra on-board here, but check below. */
+ hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
+ inb(ioaddr+2) == 0x09);
+
+ /* Reset the LANCE. */
+ reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
+
+ /* The Un-Reset needed is only needed for the real NE2100, and will
+ confuse the HP board. */
+ if (!hpJ2405A)
+ outw(reset_val, ioaddr+LANCE_RESET);
+
+ outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
+ if (inw(ioaddr+LANCE_DATA) != 0x0004)
+ return -ENODEV;
+
+ /* Get the version of the chip. */
+ outw(88, ioaddr+LANCE_ADDR);
+ if (inw(ioaddr+LANCE_ADDR) != 88) {
+ lance_version = 0;
+ } else { /* Good, it's a newer chip. */
+ int chip_version = inw(ioaddr+LANCE_DATA);
+ outw(89, ioaddr+LANCE_ADDR);
+ chip_version |= inw(ioaddr+LANCE_DATA) << 16;
+ if (lance_debug > 2)
+ printk(" LANCE chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return -ENODEV;
+ chip_version = (chip_version >> 12) & 0xffff;
+ for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
+ if (chip_table[lance_version].id_number == chip_version)
+ break;
+ }
+ }
+
+ /* We can't allocate private data from alloc_etherdev() because it must
+ a ISA DMA-able region. */
+ chipname = chip_table[lance_version].name;
+ printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
+
+ /* There is a 16 byte station address PROM at the base address.
+ The first six bytes are the station address. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + i);
+ printk("%pM", dev->dev_addr);
+
+ dev->base_addr = ioaddr;
+ /* Make certain the data structures used by the LANCE are aligned and DMAble. */
+
+ lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+ if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
+ dev->ml_priv = lp;
+ lp->name = chipname;
+ lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
+ GFP_DMA | GFP_KERNEL);
+ if (!lp->rx_buffs)
+ goto out_lp;
+ if (lance_need_isa_bounce_buffers) {
+ lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
+ GFP_DMA | GFP_KERNEL);
+ if (!lp->tx_bounce_buffs)
+ goto out_rx;
+ } else
+ lp->tx_bounce_buffs = NULL;
+
+ lp->chip_version = lance_version;
+ spin_lock_init(&lp->devlock);
+
+ lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+
+ if (irq) { /* Set iff PCI card. */
+ dev->dma = 4; /* Native bus-master, no DMA channel needed. */
+ dev->irq = irq;
+ } else if (hp_builtin) {
+ static const char dma_tbl[4] = {3, 5, 6, 0};
+ static const char irq_tbl[4] = {3, 4, 5, 9};
+ unsigned char port_val = inb(hp_builtin);
+ dev->dma = dma_tbl[(port_val >> 4) & 3];
+ dev->irq = irq_tbl[(port_val >> 2) & 3];
+ printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (hpJ2405A) {
+ static const char dma_tbl[4] = {3, 5, 6, 7};
+ static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
+ short reset_val = inw(ioaddr+LANCE_RESET);
+ dev->dma = dma_tbl[(reset_val >> 2) & 3];
+ dev->irq = irq_tbl[(reset_val >> 4) & 7];
+ printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
+ short bus_info;
+ outw(8, ioaddr+LANCE_ADDR);
+ bus_info = inw(ioaddr+LANCE_BUS_IF);
+ dev->dma = bus_info & 0x07;
+ dev->irq = (bus_info >> 4) & 0x0F;
+ } else {
+ /* The DMA channel may be passed in PARAM1. */
+ if (dev->mem_start & 0x07)
+ dev->dma = dev->mem_start & 0x07;
+ }
+
+ if (dev->dma == 0) {
+ /* Read the DMA channel status register, so that we can avoid
+ stuck DMA channels in the DMA detection below. */
+ dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
+ (inb(DMA2_STAT_REG) & 0xf0);
+ }
+ err = -ENODEV;
+ if (dev->irq >= 2)
+ printk(" assigned IRQ %d", dev->irq);
+ else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
+ unsigned long irq_mask;
+
+ /* To auto-IRQ we enable the initialization-done and DMA error
+ interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ boards will work. */
+ irq_mask = probe_irq_on();
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if (dev->irq)
+ printk(", probed IRQ %d", dev->irq);
+ else {
+ printk(", failed to detect IRQ line.\n");
+ goto out_tx;
+ }
+
+ /* Check for the initialization done bit, 0x0100, which means
+ that we don't need a DMA channel. */
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ dev->dma = 4;
+ }
+
+ if (dev->dma == 4) {
+ printk(", no DMA needed.\n");
+ } else if (dev->dma) {
+ if (request_dma(dev->dma, chipname)) {
+ printk("DMA %d allocation failed.\n", dev->dma);
+ goto out_tx;
+ } else
+ printk(", assigned DMA %d.\n", dev->dma);
+ } else { /* OK, we have to auto-DMA. */
+ for (i = 0; i < 4; i++) {
+ static const char dmas[] = { 5, 6, 7, 3 };
+ int dma = dmas[i];
+ int boguscnt;
+
+ /* Don't enable a permanently busy DMA channel, or the machine
+ will hang. */
+ if (test_bit(dma, &dma_channels))
+ continue;
+ outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
+ if (request_dma(dma, chipname))
+ continue;
+
+ flags=claim_dma_lock();
+ set_dma_mode(dma, DMA_MODE_CASCADE);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ /* Trigger an initialization. */
+ outw(0x0001, ioaddr+LANCE_DATA);
+ for (boguscnt = 100; boguscnt > 0; --boguscnt)
+ if (inw(ioaddr+LANCE_DATA) & 0x0900)
+ break;
+ if (inw(ioaddr+LANCE_DATA) & 0x0100) {
+ dev->dma = dma;
+ printk(", DMA %d.\n", dev->dma);
+ break;
+ } else {
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ release_dma_lock(flags);
+ free_dma(dma);
+ }
+ }
+ if (i == 4) { /* Failure: bail. */
+ printk("DMA detection failed.\n");
+ goto out_tx;
+ }
+ }
+
+ if (lance_version == 0 && dev->irq == 0) {
+ /* We may auto-IRQ now that we have a DMA channel. */
+ /* Trigger an initialization just for the interrupt. */
+ unsigned long irq_mask;
+
+ irq_mask = probe_irq_on();
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ mdelay(40);
+ dev->irq = probe_irq_off(irq_mask);
+ if (dev->irq == 0) {
+ printk(" Failed to detect the 7990 IRQ line.\n");
+ goto out_dma;
+ }
+ printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
+ }
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* Turn on auto-select of media (10baseT or BNC) so that the user
+ can watch the LEDs even if the board isn't opened. */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Don't touch 10base2 power bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 0 && did_version++ == 0)
+ printk(version);
+
+ /* The LANCE-specific entries in the device structure. */
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out_dma;
+ return 0;
+out_dma:
+ if (dev->dma != 4)
+ free_dma(dev->dma);
+out_tx:
+ kfree(lp->tx_bounce_buffs);
+out_rx:
+ kfree((void*)lp->rx_buffs);
+out_lp:
+ kfree(lp);
+ return err;
+}
+
+
+static int
+lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
+ return -EAGAIN;
+ }
+
+ /* We used to allocate DMA here, but that was silly.
+ DMA lines can't be shared! We now permanently allocate them. */
+
+ /* Reset the LANCE */
+ inw(ioaddr+LANCE_RESET);
+
+ /* The DMA controller is used as a no-operation slave, "cascade mode". */
+ if (dev->dma != 4) {
+ unsigned long flags=claim_dma_lock();
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ release_dma_lock(flags);
+ }
+
+ /* Un-Reset the LANCE, needed only for the NE2100. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
+ outw(0, ioaddr+LANCE_RESET);
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Only touch autoselect bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 1)
+ printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq, dev->dma,
+ (u32) isa_virt_to_bus(lp->tx_ring),
+ (u32) isa_virt_to_bus(lp->rx_ring),
+ (u32) isa_virt_to_bus(&lp->init_block));
+
+ lance_init_ring(dev, GFP_KERNEL);
+ /* Re-initialize the LANCE, and start it when done. */
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+
+ outw(0x0004, ioaddr+LANCE_ADDR);
+ outw(0x0915, ioaddr+LANCE_DATA);
+
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0001, ioaddr+LANCE_DATA);
+
+ netif_start_queue (dev);
+
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ outw(0x0042, ioaddr+LANCE_DATA);
+
+ if (lance_debug > 2)
+ printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
+
+ return 0; /* Always succeed */
+}
+
+/* The LANCE has been halted for one reason or another (busmaster memory
+ arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ etc.). Modern LANCE variants always reload their ring-buffer
+ configuration when restarted, so we must reinitialize our ring
+ context before restarting. As part of this reinitialization,
+ find all packets still on the Tx ring and pretend that they had been
+ sent (in effect, drop the packets on the floor) - the higher-level
+ protocols will time out and retransmit. It'd be better to shuffle
+ these skbs to a temp list and then actually re-Tx them after
+ restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+*/
+
+static void
+lance_purge_ring(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int i;
+
+ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = lp->rx_skbuff[i];
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ lp->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+static void
+lance_init_ring(struct net_device *dev, gfp_t gfp)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int i;
+
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ void *rx_buff;
+
+ skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
+ lp->rx_skbuff[i] = skb;
+ if (skb)
+ rx_buff = skb->data;
+ else
+ rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
+ if (rx_buff == NULL)
+ lp->rx_ring[i].base = 0;
+ else
+ lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
+ lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_ring[i].base = 0;
+ }
+
+ lp->init_block.mode = 0x0000;
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+}
+
+static void
+lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
+{
+ struct lance_private *lp = dev->ml_priv;
+
+ if (must_reinit ||
+ (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
+ lance_purge_ring(dev);
+ lance_init_ring(dev, GFP_ATOMIC);
+ }
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(csr0_bits, dev->base_addr + LANCE_DATA);
+}
+
+
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
+{
+ struct lance_private *lp = (struct lance_private *) dev->ml_priv;
+ int ioaddr = dev->base_addr;
+
+ outw (0, ioaddr + LANCE_ADDR);
+ printk ("%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, inw (ioaddr + LANCE_DATA));
+ outw (0x0004, ioaddr + LANCE_DATA);
+ dev->stats.tx_errors++;
+#ifndef final_version
+ if (lance_debug > 3) {
+ int i;
+ printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc);
+ printk ("\n");
+ }
+#endif
+ lance_restart (dev, 0x0043, 1);
+
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue (dev);
+}
+
+
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int ioaddr = dev->base_addr;
+ int entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->devlock, flags);
+
+ if (lance_debug > 3) {
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
+ inw(ioaddr+LANCE_DATA));
+ outw(0x0000, ioaddr+LANCE_DATA);
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out;
+ lp->tx_ring[entry].length = -ETH_ZLEN;
+ }
+ else
+ lp->tx_ring[entry].length = -skb->len;
+ } else
+ lp->tx_ring[entry].length = -skb->len;
+
+ lp->tx_ring[entry].misc = 0x0000;
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* If any part of this buffer is >16M we must copy it to a low-memory
+ buffer. */
+ if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
+ if (lance_debug > 5)
+ printk("%s: bouncing a high-memory packet (%#x).\n",
+ dev->name, (u32)isa_virt_to_bus(skb->data));
+ skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
+ lp->tx_ring[entry].base =
+ ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
+ dev_consume_skb_irq(skb);
+ } else {
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
+ }
+ lp->cur_tx++;
+
+ /* Trigger an immediate send poll. */
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0048, ioaddr+LANCE_DATA);
+
+ if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
+ netif_stop_queue(dev);
+
+out:
+ spin_unlock_irqrestore(&lp->devlock, flags);
+ return NETDEV_TX_OK;
+}
+
+/* The LANCE interrupt handler. */
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp;
+ int csr0, ioaddr, boguscnt=10;
+ int must_restart;
+
+ ioaddr = dev->base_addr;
+ lp = dev->ml_priv;
+
+ spin_lock (&lp->devlock);
+
+ outw(0x00, dev->base_addr + LANCE_ADDR);
+ while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
+ --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
+
+ must_restart = 0;
+
+ if (lance_debug > 5)
+ printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ lance_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = lp->tx_ring[entry].base;
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x40000000) {
+ /* There was an major error, log it. */
+ int err_status = lp->tx_ring[entry].misc;
+ dev->stats.tx_errors++;
+ if (err_status & 0x0400)
+ dev->stats.tx_aborted_errors++;
+ if (err_status & 0x0800)
+ dev->stats.tx_carrier_errors++;
+ if (err_status & 0x1000)
+ dev->stats.tx_window_errors++;
+ if (err_status & 0x4000) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ dev->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk("%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+ } else {
+ if (status & 0x18000000)
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+
+ /* We must free the original skb if it's not a data-only copy
+ in the bounce buffer. */
+ if (lp->tx_skbuff[entry]) {
+ dev_consume_skb_irq(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
+ dirty_tx, lp->cur_tx,
+ netif_queue_stopped(dev) ? "yes" : "no");
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ /* if the ring is no longer full, accept more packets */
+ if (netif_queue_stopped(dev) &&
+ dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
+ netif_wake_queue (dev);
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & 0x0800) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x0004, dev->base_addr + LANCE_DATA);
+ lance_restart(dev, 0x0002, 0);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x7940, dev->base_addr + LANCE_DATA);
+
+ if (lance_debug > 4)
+ printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
+ dev->name, inw(ioaddr + LANCE_ADDR),
+ inw(dev->base_addr + LANCE_DATA));
+
+ spin_unlock (&lp->devlock);
+ return IRQ_HANDLED;
+}
+
+static int
+lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (lp->rx_ring[entry].base >= 0) {
+ int status = lp->rx_ring[entry].base >> 24;
+
+ if (status != 0x03) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & 0x01) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x10)
+ dev->stats.rx_over_errors++;
+ if (status & 0x08)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x04)
+ dev->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].base &= 0x03ffffff;
+ }
+ else
+ {
+ /* Malloc up new buffer, compatible with net3. */
+ short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len<60)
+ {
+ printk("%s: Runt packet!\n",dev->name);
+ dev->stats.rx_errors++;
+ }
+ else
+ {
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ for (i=0; i < RX_RING_SIZE; i++)
+ if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2)
+ {
+ dev->stats.rx_dropped++;
+ lp->rx_ring[entry].base |= 0x80000000;
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
+ pkt_len);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ }
+ /* The docs say that the buffer length isn't touched, but Andrew Boyd
+ of QNX reports that some revs of the 79C965 clear it. */
+ lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
+ lp->rx_ring[entry].base |= 0x80000000;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+static int
+lance_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct lance_private *lp = dev->ml_priv;
+
+ netif_stop_queue (dev);
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ outw(112, ioaddr+LANCE_ADDR);
+ dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ }
+ outw(0, ioaddr+LANCE_ADDR);
+
+ if (lance_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(ioaddr+LANCE_DATA));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ outw(0x0004, ioaddr+LANCE_DATA);
+
+ if (dev->dma != 4)
+ {
+ unsigned long flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ }
+ free_irq(dev->irq, dev);
+
+ lance_purge_ring(dev);
+
+ return 0;
+}
+
+static struct net_device_stats *lance_get_stats(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ short ioaddr = dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->devlock, flags);
+ saved_addr = inw(ioaddr+LANCE_ADDR);
+ outw(112, ioaddr+LANCE_ADDR);
+ dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ outw(saved_addr, ioaddr+LANCE_ADDR);
+ spin_unlock_irqrestore(&lp->devlock, flags);
+ }
+
+ return &dev->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outw(0, ioaddr+LANCE_ADDR);
+ outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
+
+ if (dev->flags&IFF_PROMISC) {
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int i;
+ int num_addrs=netdev_mc_count(dev);
+ if(dev->flags&IFF_ALLMULTI)
+ num_addrs=1;
+ /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ outw(8 + i, ioaddr+LANCE_ADDR);
+ outw(multicast_table[i], ioaddr+LANCE_DATA);
+ }
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
+ }
+
+ lance_restart(dev, 0x0142, 0); /* Resume normal operation */
+
+}
+
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
new file mode 100644
index 000000000..3f2e4cdd0
--- /dev/null
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* mvme147.c : the Linux/mvme147/lance ethernet driver
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ * Uses the generic 7990.c LANCE code.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/pgtable.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/route.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/mvme147hw.h>
+
+/* We have 32K of RAM for the init block and buffers. This places
+ * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
+ */
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+
+#include "7990.h" /* use generic LANCE code */
+
+/* Our private data structure */
+struct m147lance_private {
+ struct lance_private lance;
+ unsigned long ram;
+};
+
+/* function prototypes... This is easy because all the grot is in the
+ * generic LANCE support. All we have to support is probing for boards,
+ * plus board-specific init, open and close actions.
+ * Oh, and we need to tell the generic code how to read and write LANCE registers...
+ */
+static int m147lance_open(struct net_device *dev);
+static int m147lance_close(struct net_device *dev);
+static void m147lance_writerap(struct lance_private *lp, unsigned short value);
+static void m147lance_writerdp(struct lance_private *lp, unsigned short value);
+static unsigned short m147lance_readrdp(struct lance_private *lp);
+
+typedef void (*writerap_t)(void *, unsigned short);
+typedef void (*writerdp_t)(void *, unsigned short);
+typedef unsigned short (*readrdp_t)(void *);
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = m147lance_open,
+ .ndo_stop = m147lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+/* Initialise the one and only on-board 7990 */
+struct net_device * __init mvme147lance_probe(int unit)
+{
+ struct net_device *dev;
+ static int called;
+ static const char name[] = "MVME147 LANCE";
+ struct m147lance_private *lp;
+ u_long *addr;
+ u_long address;
+ int err;
+
+ if (!MACH_IS_MVME147 || called)
+ return ERR_PTR(-ENODEV);
+ called++;
+
+ dev = alloc_etherdev(sizeof(struct m147lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0)
+ sprintf(dev->name, "eth%d", unit);
+
+ /* Fill the dev fields */
+ dev->base_addr = (unsigned long)MVME147_LANCE_BASE;
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->dma = 0;
+
+ addr = (u_long *)ETHERNET_ADDRESS;
+ address = *addr;
+ dev->dev_addr[0] = 0x08;
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x3e;
+ address = address >> 8;
+ dev->dev_addr[5] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[4] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[3] = address&0xff;
+
+ printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
+ dev->name, dev->base_addr, MVME147_LANCE_IRQ,
+ dev->dev_addr);
+
+ lp = netdev_priv(dev);
+ lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
+ if (!lp->ram) {
+ printk("%s: No memory for LANCE buffers\n", dev->name);
+ free_netdev(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lp->lance.name = name;
+ lp->lance.base = dev->base_addr;
+ lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
+ lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = MVME147_LANCE_IRQ;
+ lp->lance.writerap = (writerap_t)m147lance_writerap;
+ lp->lance.writerdp = (writerdp_t)m147lance_writerdp;
+ lp->lance.readrdp = (readrdp_t)m147lance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+
+ err = register_netdev(dev);
+ if (err) {
+ free_pages(lp->ram, 3);
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+
+static void m147lance_writerap(struct lance_private *lp, unsigned short value)
+{
+ out_be16(lp->base + LANCE_RAP, value);
+}
+
+static void m147lance_writerdp(struct lance_private *lp, unsigned short value)
+{
+ out_be16(lp->base + LANCE_RDP, value);
+}
+
+static unsigned short m147lance_readrdp(struct lance_private *lp)
+{
+ return in_be16(lp->base + LANCE_RDP);
+}
+
+static int m147lance_open(struct net_device *dev)
+{
+ int status;
+
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ m147_pcc->lan_cntrl = 0; /* clear the interrupts (if any) */
+ m147_pcc->lan_cntrl = 0x08 | 0x04; /* Enable irq 4 */
+
+ return 0;
+}
+
+static int m147lance_close(struct net_device *dev)
+{
+ /* disable interrupts at boardlevel */
+ m147_pcc->lan_cntrl = 0x0; /* disable interrupts */
+ lance_close(dev);
+ return 0;
+}
+
+#ifdef MODULE
+MODULE_LICENSE("GPL");
+
+static struct net_device *dev_mvme147_lance;
+int __init init_module(void)
+{
+ dev_mvme147_lance = mvme147lance_probe(-1);
+ return PTR_ERR_OR_ZERO(dev_mvme147_lance);
+}
+
+void __exit cleanup_module(void)
+{
+ struct m147lance_private *lp = netdev_priv(dev_mvme147_lance);
+ unregister_netdev(dev_mvme147_lance);
+ free_pages(lp->ram, 3);
+ free_netdev(dev_mvme147_lance);
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
new file mode 100644
index 000000000..c38edf6f0
--- /dev/null
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -0,0 +1,1247 @@
+/*
+ * ni6510 (am7990 'lance' chip) driver for Linux-net-3
+ * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
+ * copyrights (c) 1994,1995,1996 by M.Hipp
+ *
+ * This driver can handle the old ni6510 board and the newer ni6510
+ * EtherBlaster. (probably it also works with every full NE2100
+ * compatible card)
+ *
+ * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers the Linux-kernel.
+ *
+ * comments/bugs/suggestions can be sent to:
+ * Michael Hipp
+ * email: hippm@informatik.uni-tuebingen.de
+ *
+ * sources:
+ * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
+ * and from the original drivers by D.Becker
+ *
+ * known problems:
+ * - on some PCI boards (including my own) the card/board/ISA-bridge has
+ * problems with bus master DMA. This results in lotsa overruns.
+ * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
+ * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
+ * Or just play with your BIOS options to optimize ISA-DMA access.
+ * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
+ * defines -> please report me your experience then
+ * - Harald reported for ASUS SP3G mainboards, that you should use
+ * the 'optimal settings' from the user's manual on page 3-12!
+ *
+ * credits:
+ * thanx to Jason Sullivan for sending me a ni6510 card!
+ * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
+ *
+ * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
+ * average: FTP -> 8384421 bytes received in 8.5 seconds
+ * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
+ * peak: FTP -> 8384421 bytes received in 7.5 seconds
+ * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
+ */
+
+/*
+ * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
+ * 96.Sept.29: virt_to_bus stuff added for new memory modell
+ * 96.April.29: Added Harald Koenig's Patches (MH)
+ * 96.April.13: enhanced error handling .. more tests (MH)
+ * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
+ * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
+ * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
+ * hopefully no more 16MB limit
+ *
+ * 95.Nov.18: multicast tweaked (AC).
+ *
+ * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
+ *
+ * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "ni65.h"
+
+/*
+ * the current setting allows an acceptable performance
+ * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
+ * the header of this file
+ * 'invert' the defines for max. performance. This may cause DMA problems
+ * on some boards (e.g on my ASUS SP3G)
+ */
+#undef XMT_VIA_SKB
+#undef RCV_VIA_SKB
+#define RCV_PARANOIA_CHECK
+
+#define MID_PERFORMANCE
+
+#if defined( LOW_PERFORMANCE )
+ static int isa0=7,isa1=7,csr80=0x0c10;
+#elif defined( MID_PERFORMANCE )
+ static int isa0=5,isa1=5,csr80=0x2810;
+#else /* high performance */
+ static int isa0=4,isa1=4,csr80=0x0017;
+#endif
+
+/*
+ * a few card/vendor specific defines
+ */
+#define NI65_ID0 0x00
+#define NI65_ID1 0x55
+#define NI65_EB_ID0 0x52
+#define NI65_EB_ID1 0x44
+#define NE2100_ID0 0x57
+#define NE2100_ID1 0x57
+
+#define PORT p->cmdr_addr
+
+/*
+ * buffer configuration
+ */
+#if 1
+#define RMDNUM 16
+#define RMDNUMMASK 0x80000000
+#else
+#define RMDNUM 8
+#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
+#endif
+
+#if 0
+#define TMDNUM 1
+#define TMDNUMMASK 0x00000000
+#else
+#define TMDNUM 4
+#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
+#endif
+
+/* slightly oversized */
+#define R_BUF_SIZE 1544
+#define T_BUF_SIZE 1544
+
+/*
+ * lance register defines
+ */
+#define L_DATAREG 0x00
+#define L_ADDRREG 0x02
+#define L_RESET 0x04
+#define L_CONFIG 0x05
+#define L_BUSIF 0x06
+
+/*
+ * to access the lance/am7990-regs, you have to write
+ * reg-number into L_ADDRREG, then you can access it using L_DATAREG
+ */
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+#define INIT_RING_BEFORE_START 0x1
+#define FULL_RESET_ON_ERROR 0x2
+
+#if 0
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
+ outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
+ inw(PORT+L_DATAREG))
+#if 0
+#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#else
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+#else
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+
+static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
+
+static struct card {
+ unsigned char id0,id1;
+ short id_offset;
+ short total_size;
+ short cmd_offset;
+ short addr_offset;
+ unsigned char *vendor_id;
+ char *cardname;
+ unsigned long config;
+} cards[] = {
+ {
+ .id0 = NI65_ID0,
+ .id1 = NI65_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x10,
+ .cmd_offset = 0x0,
+ .addr_offset = 0x8,
+ .vendor_id = ni_vendor,
+ .cardname = "ni6510",
+ .config = 0x1,
+ },
+ {
+ .id0 = NI65_EB_ID0,
+ .id1 = NI65_EB_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x18,
+ .cmd_offset = 0x10,
+ .addr_offset = 0x0,
+ .vendor_id = ni_vendor,
+ .cardname = "ni6510 EtherBlaster",
+ .config = 0x2,
+ },
+ {
+ .id0 = NE2100_ID0,
+ .id1 = NE2100_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x18,
+ .cmd_offset = 0x10,
+ .addr_offset = 0x0,
+ .vendor_id = NULL,
+ .cardname = "generic NE2100",
+ .config = 0x0,
+ },
+};
+#define NUM_CARDS 3
+
+struct priv
+{
+ struct rmd rmdhead[RMDNUM];
+ struct tmd tmdhead[TMDNUM];
+ struct init_block ib;
+ int rmdnum;
+ int tmdnum,tmdlast;
+#ifdef RCV_VIA_SKB
+ struct sk_buff *recv_skb[RMDNUM];
+#else
+ void *recvbounce[RMDNUM];
+#endif
+#ifdef XMT_VIA_SKB
+ struct sk_buff *tmd_skb[TMDNUM];
+#endif
+ void *tmdbounce[TMDNUM];
+ int tmdbouncenum;
+ int lock,xmit_queued;
+
+ void *self;
+ int cmdr_addr;
+ int cardno;
+ int features;
+ spinlock_t ring_lock;
+};
+
+static int ni65_probe1(struct net_device *dev,int);
+static irqreturn_t ni65_interrupt(int irq, void * dev_id);
+static void ni65_recv_intr(struct net_device *dev,int);
+static void ni65_xmit_intr(struct net_device *dev,int);
+static int ni65_open(struct net_device *dev);
+static int ni65_lance_reinit(struct net_device *dev);
+static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
+static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+static void ni65_timeout(struct net_device *dev, unsigned int txqueue);
+static int ni65_close(struct net_device *dev);
+static int ni65_alloc_buffer(struct net_device *dev);
+static void ni65_free_buffer(struct priv *p);
+static void set_multicast_list(struct net_device *dev);
+
+static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
+static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
+
+static int debuglevel = 1;
+
+/*
+ * set 'performance' registers .. we must STOP lance for that
+ */
+static void ni65_set_performance(struct priv *p)
+{
+ writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
+
+ if( !(cards[p->cardno].config & 0x02) )
+ return;
+
+ outw(80,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) != 80)
+ return;
+
+ writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
+ outw(0,PORT+L_ADDRREG);
+ outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
+ outw(1,PORT+L_ADDRREG);
+ outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
+
+ outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
+}
+
+/*
+ * open interface (up)
+ */
+static int ni65_open(struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+ int irqval = request_irq(dev->irq, ni65_interrupt,0,
+ cards[p->cardno].cardname,dev);
+ if (irqval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
+ dev->name,dev->irq, irqval);
+ return -EAGAIN;
+ }
+
+ if(ni65_lance_reinit(dev))
+ {
+ netif_start_queue(dev);
+ return 0;
+ }
+ else
+ {
+ free_irq(dev->irq,dev);
+ return -EAGAIN;
+ }
+}
+
+/*
+ * close interface (down)
+ */
+static int ni65_close(struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+
+ netif_stop_queue(dev);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
+
+#ifdef XMT_VIA_SKB
+ {
+ int i;
+ for(i=0;i<TMDNUM;i++)
+ {
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i]);
+ p->tmd_skb[i] = NULL;
+ }
+ }
+ }
+#endif
+ free_irq(dev->irq,dev);
+ return 0;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+ disable_dma(dev->dma);
+ free_dma(dev->dma);
+ release_region(dev->base_addr, cards[p->cardno].total_size);
+ ni65_free_buffer(p);
+}
+
+/* set: io,irq,dma or set it when calling insmod */
+static int irq;
+static int io;
+static int dma;
+
+/*
+ * Probe The Card (not the lance-chip)
+ */
+struct net_device * __init ni65_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(0);
+ static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
+ const int *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ irq = dev->irq;
+ dma = dev->dma;
+ } else {
+ dev->base_addr = io;
+ }
+
+ if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
+ err = ni65_probe1(dev, dev->base_addr);
+ } else if (dev->base_addr > 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = ports; *port && ni65_probe1(dev, *port); port++)
+ ;
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops ni65_netdev_ops = {
+ .ndo_open = ni65_open,
+ .ndo_stop = ni65_close,
+ .ndo_start_xmit = ni65_send_packet,
+ .ndo_tx_timeout = ni65_timeout,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*
+ * this is the real card probe ..
+ */
+static int __init ni65_probe1(struct net_device *dev,int ioaddr)
+{
+ int i,j;
+ struct priv *p;
+ unsigned long flags;
+
+ dev->irq = irq;
+ dev->dma = dma;
+
+ for(i=0;i<NUM_CARDS;i++) {
+ if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
+ continue;
+ if(cards[i].id_offset >= 0) {
+ if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
+ inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
+ release_region(ioaddr, cards[i].total_size);
+ continue;
+ }
+ }
+ if(cards[i].vendor_id) {
+ for(j=0;j<3;j++)
+ if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j])
+ release_region(ioaddr, cards[i].total_size);
+ }
+ break;
+ }
+ if(i == NUM_CARDS)
+ return -ENODEV;
+
+ for(j=0;j<6;j++)
+ dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+
+ if( (j=ni65_alloc_buffer(dev)) < 0) {
+ release_region(ioaddr, cards[i].total_size);
+ return j;
+ }
+ p = dev->ml_priv;
+ p->cmdr_addr = ioaddr + cards[i].cmd_offset;
+ p->cardno = i;
+ spin_lock_init(&p->ring_lock);
+
+ printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (j=readreg(CSR0)) != 0x4) {
+ printk("failed.\n");
+ printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+
+ outw(88,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) == 88) {
+ unsigned long v;
+ v = inw(PORT+L_DATAREG);
+ v <<= 16;
+ outw(89,PORT+L_ADDRREG);
+ v |= inw(PORT+L_DATAREG);
+ printk("Version %#08lx, ",v);
+ p->features = INIT_RING_BEFORE_START;
+ }
+ else {
+ printk("ancient LANCE, ");
+ p->features = 0x0;
+ }
+
+ if(test_bit(0,&cards[i].config)) {
+ dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
+ dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
+ printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
+ }
+ else {
+ if(dev->dma == 0) {
+ /* 'stuck test' from lance.c */
+ unsigned long dma_channels =
+ ((inb(DMA1_STAT_REG) >> 4) & 0x0f)
+ | (inb(DMA2_STAT_REG) & 0xf0);
+ for(i=1;i<5;i++) {
+ int dma = dmatab[i];
+ if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
+ continue;
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ set_dma_mode(dma,DMA_MODE_CASCADE);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ free_dma(dma);
+ release_dma_lock(flags);
+
+ if(readreg(CSR0) & CSR0_IDON)
+ break;
+ }
+ if(i == 5) {
+ printk("failed.\n");
+ printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+ dev->dma = dmatab[i];
+ printk("DMA %d (autodetected), ",dev->dma);
+ }
+ else
+ printk("DMA %d (assigned), ",dev->dma);
+
+ if(dev->irq < 2)
+ {
+ unsigned long irq_mask;
+
+ ni65_init_lance(p,dev->dev_addr,0,0);
+ irq_mask = probe_irq_on();
+ writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
+ msleep(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if(!dev->irq)
+ {
+ printk("Failed to detect IRQ line!\n");
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+ printk("IRQ %d (autodetected).\n",dev->irq);
+ }
+ else
+ printk("IRQ %d (assigned).\n",dev->irq);
+ }
+
+ if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
+ {
+ printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+
+ dev->base_addr = ioaddr;
+ dev->netdev_ops = &ni65_netdev_ops;
+ dev->watchdog_timeo = HZ/2;
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * set lance register and trigger init
+ */
+static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
+{
+ int i;
+ u32 pib;
+
+ writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
+
+ for(i=0;i<6;i++)
+ p->ib.eaddr[i] = daddr[i];
+
+ for(i=0;i<8;i++)
+ p->ib.filter[i] = filter;
+ p->ib.mode = mode;
+
+ p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
+ p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
+ writereg(0,CSR3); /* busmaster/no word-swap */
+ pib = (u32) isa_virt_to_bus(&p->ib);
+ writereg(pib & 0xffff,CSR1);
+ writereg(pib >> 16,CSR2);
+
+ writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
+
+ for(i=0;i<32;i++)
+ {
+ mdelay(4);
+ if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
+ break; /* init ok ? */
+ }
+}
+
+/*
+ * allocate memory area and check the 16MB border
+ */
+static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
+{
+ struct sk_buff *skb=NULL;
+ unsigned char *ptr;
+ void *ret;
+
+ if(type) {
+ ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
+ if(!skb) {
+ printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
+ return NULL;
+ }
+ skb_reserve(skb,2+16);
+ skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
+ ptr = skb->data;
+ }
+ else {
+ ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
+ if(!ret)
+ return NULL;
+ }
+ if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
+ printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
+ if(type)
+ kfree_skb(skb);
+ else
+ kfree(ptr);
+ return NULL;
+ }
+ return ret;
+}
+
+/*
+ * allocate all memory structures .. send/recv buffers etc ...
+ */
+static int ni65_alloc_buffer(struct net_device *dev)
+{
+ unsigned char *ptr;
+ struct priv *p;
+ int i;
+
+ /*
+ * we need 8-aligned memory ..
+ */
+ ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
+ if(!ptr)
+ return -ENOMEM;
+
+ p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
+ memset((char *)p, 0, sizeof(struct priv));
+ p->self = ptr;
+
+ for(i=0;i<TMDNUM;i++)
+ {
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = NULL;
+#endif
+ p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
+ if(!p->tmdbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
+ if(!p->recv_skb[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#else
+ p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
+ if(!p->recvbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#endif
+ }
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * free buffers and private struct
+ */
+static void ni65_free_buffer(struct priv *p)
+{
+ int i;
+
+ if(!p)
+ return;
+
+ for(i=0;i<TMDNUM;i++) {
+ kfree(p->tmdbounce[i]);
+#ifdef XMT_VIA_SKB
+ dev_kfree_skb(p->tmd_skb[i]);
+#endif
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ dev_kfree_skb(p->recv_skb[i]);
+#else
+ kfree(p->recvbounce[i]);
+#endif
+ }
+ kfree(p->self);
+}
+
+
+/*
+ * stop and (re)start lance .. e.g after an error
+ */
+static void ni65_stop_start(struct net_device *dev,struct priv *p)
+{
+ int csr0 = CSR0_INEA;
+
+ writedatareg(CSR0_STOP);
+
+ if(debuglevel > 1)
+ printk(KERN_DEBUG "ni65_stop_start\n");
+
+ if(p->features & INIT_RING_BEFORE_START) {
+ int i;
+#ifdef XMT_VIA_SKB
+ struct sk_buff *skb_save[TMDNUM];
+#endif
+ unsigned long buffer[TMDNUM];
+ short blen[TMDNUM];
+
+ if(p->xmit_queued) {
+ while(1) {
+ if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
+ break;
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ break;
+ }
+ }
+
+ for(i=0;i<TMDNUM;i++) {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ skb_save[i] = p->tmd_skb[i];
+#endif
+ buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
+ blen[i] = tmdp->blen;
+ tmdp->u.s.status = 0x0;
+ }
+
+ for(i=0;i<RMDNUM;i++) {
+ struct rmd *rmdp = p->rmdhead + i;
+ rmdp->u.s.status = RCV_OWN;
+ }
+ p->tmdnum = p->xmit_queued = 0;
+ writedatareg(CSR0_STRT | csr0);
+
+ for(i=0;i<TMDNUM;i++) {
+ int num = (i + p->tmdlast) & (TMDNUM-1);
+ p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
+ p->tmdhead[i].blen = blen[num];
+ if(p->tmdhead[i].u.s.status & XMIT_OWN) {
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+ p->xmit_queued = 1;
+ writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
+ }
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = skb_save[num];
+#endif
+ }
+ p->rmdnum = p->tmdlast = 0;
+ if(!p->lock)
+ if (p->tmdnum || !p->xmit_queued)
+ netif_wake_queue(dev);
+ netif_trans_update(dev); /* prevent tx timeout */
+ }
+ else
+ writedatareg(CSR0_STRT | csr0);
+}
+
+/*
+ * init lance (write init-values .. init-buffers) (open-helper)
+ */
+static int ni65_lance_reinit(struct net_device *dev)
+{
+ int i;
+ struct priv *p = dev->ml_priv;
+ unsigned long flags;
+
+ p->lock = 0;
+ p->xmit_queued = 0;
+
+ flags=claim_dma_lock();
+ disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
+ set_dma_mode(dev->dma,DMA_MODE_CASCADE);
+ enable_dma(dev->dma);
+ release_dma_lock(flags);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (i=readreg(CSR0) ) != 0x4)
+ {
+ printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
+ cards[p->cardno].cardname,(int) i);
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ return 0;
+ }
+
+ p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
+ for(i=0;i<TMDNUM;i++)
+ {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i]);
+ p->tmd_skb[i] = NULL;
+ }
+#endif
+ tmdp->u.buffer = 0x0;
+ tmdp->u.s.status = XMIT_START | XMIT_END;
+ tmdp->blen = tmdp->status2 = 0;
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+ struct rmd *rmdp = p->rmdhead + i;
+#ifdef RCV_VIA_SKB
+ rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
+#else
+ rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
+#endif
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN;
+ }
+
+ if(dev->flags & IFF_PROMISC)
+ ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
+ else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
+ ni65_init_lance(p,dev->dev_addr,0xff,0x0);
+ else
+ ni65_init_lance(p,dev->dev_addr,0x00,0x00);
+
+ /*
+ * ni65_set_lance_mem() sets L_ADDRREG to CSR0
+ * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
+ */
+
+ if(inw(PORT+L_DATAREG) & CSR0_IDON) {
+ ni65_set_performance(p);
+ /* init OK: start lance , enable interrupts */
+ writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
+ return 1; /* ->OK */
+ }
+ printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ return 0; /* ->Error */
+}
+
+/*
+ * interrupt handler
+ */
+static irqreturn_t ni65_interrupt(int irq, void * dev_id)
+{
+ int csr0 = 0;
+ struct net_device *dev = dev_id;
+ struct priv *p;
+ int bcnt = 32;
+
+ p = dev->ml_priv;
+
+ spin_lock(&p->ring_lock);
+
+ while(--bcnt) {
+ csr0 = inw(PORT+L_DATAREG);
+
+#if 0
+ writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
+#else
+ writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
+#endif
+
+ if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
+ break;
+
+ if(csr0 & CSR0_RINT) /* RECV-int? */
+ ni65_recv_intr(dev,csr0);
+ if(csr0 & CSR0_TINT) /* XMIT-int? */
+ ni65_xmit_intr(dev,csr0);
+
+ if(csr0 & CSR0_ERR)
+ {
+ if(debuglevel > 1)
+ printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
+ if(csr0 & CSR0_BABL)
+ dev->stats.tx_errors++;
+ if(csr0 & CSR0_MISS) {
+ int i;
+ for(i=0;i<RMDNUM;i++)
+ printk("%02x ",p->rmdhead[i].u.s.status);
+ printk("\n");
+ dev->stats.rx_errors++;
+ }
+ if(csr0 & CSR0_MERR) {
+ if(debuglevel > 1)
+ printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
+ ni65_stop_start(dev,p);
+ }
+ }
+ }
+
+#ifdef RCV_PARANOIA_CHECK
+{
+ int j;
+ for(j=0;j<RMDNUM;j++)
+ {
+ int i, num2;
+ for(i=RMDNUM-1;i>0;i--) {
+ num2 = (p->rmdnum + i) & (RMDNUM-1);
+ if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
+ break;
+ }
+
+ if(i) {
+ int k, num1;
+ for(k=0;k<RMDNUM;k++) {
+ num1 = (p->rmdnum + k) & (RMDNUM-1);
+ if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
+ break;
+ }
+ if(!k)
+ break;
+
+ if(debuglevel > 0)
+ {
+ char buf[256],*buf1;
+ buf1 = buf;
+ for(k=0;k<RMDNUM;k++) {
+ sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
+ buf1 += 3;
+ }
+ *buf1 = 0;
+ printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
+ }
+
+ p->rmdnum = num1;
+ ni65_recv_intr(dev,csr0);
+ if((p->rmdhead[num2].u.s.status & RCV_OWN))
+ break; /* ok, we are 'in sync' again */
+ }
+ else
+ break;
+ }
+}
+#endif
+
+ if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
+ printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
+ ni65_stop_start(dev,p);
+ }
+ else
+ writedatareg(CSR0_INEA);
+
+ spin_unlock(&p->ring_lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * We have received an Xmit-Interrupt ..
+ * send a new packet if necessary
+ */
+static void ni65_xmit_intr(struct net_device *dev,int csr0)
+{
+ struct priv *p = dev->ml_priv;
+
+ while(p->xmit_queued)
+ {
+ struct tmd *tmdp = p->tmdhead + p->tmdlast;
+ int tmdstat = tmdp->u.s.status;
+
+ if(tmdstat & XMIT_OWN)
+ break;
+
+ if(tmdstat & XMIT_ERR)
+ {
+#if 0
+ if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
+ printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
+#endif
+ /* checking some errors */
+ if(tmdp->status2 & XMIT_RTRY)
+ dev->stats.tx_aborted_errors++;
+ if(tmdp->status2 & XMIT_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
+ /* this stops the xmitter */
+ dev->stats.tx_fifo_errors++;
+ if(debuglevel > 0)
+ printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
+ if(p->features & INIT_RING_BEFORE_START) {
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
+ ni65_stop_start(dev,p);
+ break; /* no more Xmit processing .. */
+ }
+ else
+ ni65_stop_start(dev,p);
+ }
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
+ if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
+ dev->stats.tx_errors++;
+ tmdp->status2 = 0;
+ }
+ else {
+ dev->stats.tx_bytes -= (short)(tmdp->blen);
+ dev->stats.tx_packets++;
+ }
+
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[p->tmdlast]) {
+ dev_consume_skb_irq(p->tmd_skb[p->tmdlast]);
+ p->tmd_skb[p->tmdlast] = NULL;
+ }
+#endif
+
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ p->xmit_queued = 0;
+ }
+ netif_wake_queue(dev);
+}
+
+/*
+ * We have received a packet
+ */
+static void ni65_recv_intr(struct net_device *dev,int csr0)
+{
+ struct rmd *rmdp;
+ int rmdstat,len;
+ int cnt=0;
+ struct priv *p = dev->ml_priv;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
+ {
+ cnt++;
+ if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
+ {
+ if(!(rmdstat & RCV_ERR)) {
+ if(rmdstat & RCV_START)
+ {
+ dev->stats.rx_length_errors++;
+ printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
+ }
+ }
+ else {
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
+ dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
+ if(rmdstat & RCV_FRAM)
+ dev->stats.rx_frame_errors++;
+ if(rmdstat & RCV_OFLO)
+ dev->stats.rx_over_errors++;
+ if(rmdstat & RCV_CRC)
+ dev->stats.rx_crc_errors++;
+ if(rmdstat & RCV_BUF_ERR)
+ dev->stats.rx_fifo_errors++;
+ }
+ if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
+ dev->stats.rx_errors++;
+ }
+ else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
+ {
+#ifdef RCV_VIA_SKB
+ struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
+ if (skb)
+ skb_reserve(skb,16);
+#else
+ struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
+#endif
+ if(skb)
+ {
+ skb_reserve(skb,2);
+#ifdef RCV_VIA_SKB
+ if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
+ skb_put(skb,len);
+ skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
+ }
+ else {
+ struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
+ skb_put(skb,R_BUF_SIZE);
+ p->recv_skb[p->rmdnum] = skb;
+ rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
+ skb = skb1;
+ skb_trim(skb,len);
+ }
+#else
+ skb_put(skb,len);
+ skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
+#endif
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ }
+ else
+ {
+ printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
+ dev->stats.rx_dropped++;
+ }
+ }
+ else {
+ printk(KERN_INFO "%s: received runt packet\n",dev->name);
+ dev->stats.rx_errors++;
+ }
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN; /* change owner */
+ p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+}
+
+/*
+ * kick xmitter ..
+ */
+
+static void ni65_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ int i;
+ struct priv *p = dev->ml_priv;
+
+ printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
+ for(i=0;i<TMDNUM;i++)
+ printk("%02x ",p->tmdhead[i].u.s.status);
+ printk("\n");
+ ni65_lance_reinit(dev);
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+/*
+ * Send a packet
+ */
+
+static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+
+ netif_stop_queue(dev);
+
+ if (test_and_set_bit(0, (void*)&p->lock)) {
+ printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ {
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ struct tmd *tmdp;
+ unsigned long flags;
+
+#ifdef XMT_VIA_SKB
+ if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
+#endif
+
+ skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
+ skb->len > T_BUF_SIZE ? T_BUF_SIZE :
+ skb->len);
+ if (len > skb->len)
+ memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
+ dev_kfree_skb (skb);
+
+ spin_lock_irqsave(&p->ring_lock, flags);
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
+ p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
+
+#ifdef XMT_VIA_SKB
+ }
+ else {
+ spin_lock_irqsave(&p->ring_lock, flags);
+
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
+ p->tmd_skb[p->tmdnum] = skb;
+ }
+#endif
+ tmdp->blen = -len;
+
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
+ writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
+
+ p->xmit_queued = 1;
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+
+ if(p->tmdnum != p->tmdlast)
+ netif_wake_queue(dev);
+
+ p->lock = 0;
+
+ spin_unlock_irqrestore(&p->ring_lock, flags);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ if(!ni65_lance_reinit(dev))
+ printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
+ netif_wake_queue(dev);
+}
+
+#ifdef MODULE
+static struct net_device *dev_ni65;
+
+module_param_hw(irq, int, irq, 0);
+module_param_hw(io, int, ioport, 0);
+module_param_hw(dma, int, dma, 0);
+MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
+MODULE_PARM_DESC(io, "ni6510 I/O base address");
+MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
+
+int __init init_module(void)
+{
+ dev_ni65 = ni65_probe(-1);
+ return PTR_ERR_OR_ZERO(dev_ni65);
+}
+
+void __exit cleanup_module(void)
+{
+ unregister_netdev(dev_ni65);
+ cleanup_card(dev_ni65);
+ free_netdev(dev_ni65);
+}
+#endif /* MODULE */
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ni65.h b/drivers/net/ethernet/amd/ni65.h
new file mode 100644
index 000000000..e6217e35e
--- /dev/null
+++ b/drivers/net/ethernet/amd/ni65.h
@@ -0,0 +1,121 @@
+/* am7990 (lance) definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by
+ * same GNU General Public License that covers that work.
+ *
+ * Michael Hipp
+ * email: mhipp@student.uni-tuebingen.de
+ *
+ * sources: (mail me or ask archie if you need them)
+ * crynwr-packet-driver
+ */
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define M_PROM 0x8000 /* Promiscuous Mode */
+#define M_INTL 0x0040 /* Internal Loopback */
+#define M_DRTY 0x0020 /* Disable Retry */
+#define M_COLL 0x0010 /* Force Collision */
+#define M_DTCR 0x0008 /* Disable Transmit CRC) */
+#define M_LOOP 0x0004 /* Loopback */
+#define M_DTX 0x0002 /* Disable the Transmitter */
+#define M_DRX 0x0001 /* Disable the Receiver */
+
+
+/*
+ * Receive message descriptor bit definitions.
+ */
+
+#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define RCV_ERR 0x40 /* Error Summary */
+#define RCV_FRAM 0x20 /* Framing Error */
+#define RCV_OFLO 0x10 /* Overflow Error */
+#define RCV_CRC 0x08 /* CRC Error */
+#define RCV_BUF_ERR 0x04 /* Buffer Error */
+#define RCV_START 0x02 /* Start of Packet */
+#define RCV_END 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor bit definitions.
+ */
+
+#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define XMIT_ERR 0x40 /* Error Summary */
+#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
+#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
+#define XMIT_DEF 0x04 /* Deferred */
+#define XMIT_START 0x02 /* Start of Packet */
+#define XMIT_END 0x01 /* End of Packet */
+
+/*
+ * transmit status (2) (valid if XMIT_ERR == 1)
+ */
+
+#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */
+#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */
+#define XMIT_LCAR 0x0800 /* Loss of Carrier */
+#define XMIT_LCOL 0x1000 /* Late collision */
+#define XMIT_RESERV 0x2000 /* Reserved */
+#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
+#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
+
+struct init_block {
+ unsigned short mode;
+ unsigned char eaddr[6];
+ unsigned char filter[8];
+ /* bit 29-31: number of rmd's (power of 2) */
+ u32 rrp; /* receive ring pointer (align 8) */
+ /* bit 29-31: number of tmd's (power of 2) */
+ u32 trp; /* transmit ring pointer (align 8) */
+};
+
+struct rmd { /* Receive Message Descriptor */
+ union {
+ volatile u32 buffer;
+ struct {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile short blen;
+ volatile unsigned short mlen;
+};
+
+struct tmd {
+ union {
+ volatile u32 buffer;
+ struct {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile unsigned short blen;
+ volatile unsigned short status2;
+};
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
new file mode 100644
index 000000000..f34881637
--- /dev/null
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -0,0 +1,1507 @@
+/* ----------------------------------------------------------------------------
+Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN.
+ nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao
+
+ The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media
+ Access Controller for Ethernet (MACE). It is essentially the Am2150
+ PCMCIA Ethernet card contained in the Am2150 Demo Kit.
+
+Written by Roger C. Pao <rpao@paonet.org>
+ Copyright 1995 Roger C. Pao
+ Linux 2.5 cleanups Copyright Red Hat 2003
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+
+Ported to Linux 1.3.* network driver environment by
+ Matti Aarnio <mea@utu.fi>
+
+References
+
+ Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993
+ Am79C940 (MACE) Data Sheet, 1994
+ Am79C90 (C-LANCE) Data Sheet, 1994
+ Linux PCMCIA Programmer's Guide v1.17
+ /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8
+
+ Eric Mears, New Media Corporation
+ Tom Pollard, New Media Corporation
+ Dean Siasoyco, New Media Corporation
+ Ken Lesniak, Silicon Graphics, Inc. <lesniak@boston.sgi.com>
+ Donald Becker <becker@scyld.com>
+ David Hinds <dahinds@users.sourceforge.net>
+
+ The Linux client driver is based on the 3c589_cs.c client driver by
+ David Hinds.
+
+ The Linux network driver outline is based on the 3c589_cs.c driver,
+ the 8390.c driver, and the example skeleton.c kernel code, which are
+ by Donald Becker.
+
+ The Am2150 network driver hardware interface code is based on the
+ OS/9000 driver for the New Media Ethernet LAN by Eric Mears.
+
+ Special thanks for testing and help in debugging this driver goes
+ to Ken Lesniak.
+
+-------------------------------------------------------------------------------
+Driver Notes and Issues
+-------------------------------------------------------------------------------
+
+1. Developed on a Dell 320SLi
+ PCMCIA Card Services 2.6.2
+ Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386
+
+2. rc.pcmcia may require loading pcmcia_core with io_speed=300:
+ 'insmod pcmcia_core.o io_speed=300'.
+ This will avoid problems with fast systems which causes rx_framecnt
+ to return random values.
+
+3. If hot extraction does not work for you, use 'ifconfig eth0 down'
+ before extraction.
+
+4. There is a bad slow-down problem in this driver.
+
+5. Future: Multicast processing. In the meantime, do _not_ compile your
+ kernel with multicast ip enabled.
+
+-------------------------------------------------------------------------------
+History
+-------------------------------------------------------------------------------
+Log: nmclan_cs.c,v
+ * 2.5.75-ac1 2003/07/11 Alan Cox <alan@lxorguk.ukuu.org.uk>
+ * Fixed hang on card eject as we probe it
+ * Cleaned up to use new style locking.
+ *
+ * Revision 0.16 1995/07/01 06:42:17 rpao
+ * Bug fix: nmclan_reset() called CardServices incorrectly.
+ *
+ * Revision 0.15 1995/05/24 08:09:47 rpao
+ * Re-implement MULTI_TX dev->tbusy handling.
+ *
+ * Revision 0.14 1995/05/23 03:19:30 rpao
+ * Added, in nmclan_config(), "tuple.Attributes = 0;".
+ * Modified MACE ID check to ignore chip revision level.
+ * Avoid tx_free_frames race condition between _start_xmit and _interrupt.
+ *
+ * Revision 0.13 1995/05/18 05:56:34 rpao
+ * Statistics changes.
+ * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list.
+ * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup.
+ *
+ * Revision 0.12 1995/05/14 00:12:23 rpao
+ * Statistics overhaul.
+ *
+
+95/05/13 rpao V0.10a
+ Bug fix: MACE statistics counters used wrong I/O ports.
+ Bug fix: mace_interrupt() needed to allow statistics to be
+ processed without RX or TX interrupts pending.
+95/05/11 rpao V0.10
+ Multiple transmit request processing.
+ Modified statistics to use MACE counters where possible.
+95/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO.
+ *Released
+95/05/10 rpao V0.08
+ Bug fix: Make all non-exported functions private by using
+ static keyword.
+ Bug fix: Test IntrCnt _before_ reading MACE_IR.
+95/05/10 rpao V0.07 Statistics.
+95/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states.
+
+---------------------------------------------------------------------------- */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "nmclan_cs"
+
+/* ----------------------------------------------------------------------------
+Conditional Compilation Options
+---------------------------------------------------------------------------- */
+
+#define MULTI_TX 0
+#define RESET_ON_TIMEOUT 1
+#define TX_INTERRUPTABLE 1
+#define RESET_XILINX 0
+
+/* ----------------------------------------------------------------------------
+Include Files
+---------------------------------------------------------------------------- */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+
+#include <pcmcia/cisreg.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+/* ----------------------------------------------------------------------------
+Defines
+---------------------------------------------------------------------------- */
+
+#define MACE_LADRF_LEN 8
+ /* 8 bytes in Logical Address Filter */
+
+/* Loop Control Defines */
+#define MACE_MAX_IR_ITERATIONS 10
+#define MACE_MAX_RX_ITERATIONS 12
+ /*
+ TBD: Dean brought this up, and I assumed the hardware would
+ handle it:
+
+ If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be
+ non-zero when the isr exits. We may not get another interrupt
+ to process the remaining packets for some time.
+ */
+
+/*
+The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA)
+which manages the interface between the MACE and the PCMCIA bus. It
+also includes buffer management for the 32K x 8 SRAM to control up to
+four transmit and 12 receive frames at a time.
+*/
+#define AM2150_MAX_TX_FRAMES 4
+#define AM2150_MAX_RX_FRAMES 12
+
+/* Am2150 Ethernet Card I/O Mapping */
+#define AM2150_RCV 0x00
+#define AM2150_XMT 0x04
+#define AM2150_XMT_SKIP 0x09
+#define AM2150_RCV_NEXT 0x0A
+#define AM2150_RCV_FRAME_COUNT 0x0B
+#define AM2150_MACE_BANK 0x0C
+#define AM2150_MACE_BASE 0x10
+
+/* MACE Registers */
+#define MACE_RCVFIFO 0
+#define MACE_XMTFIFO 1
+#define MACE_XMTFC 2
+#define MACE_XMTFS 3
+#define MACE_XMTRC 4
+#define MACE_RCVFC 5
+#define MACE_RCVFS 6
+#define MACE_FIFOFC 7
+#define MACE_IR 8
+#define MACE_IMR 9
+#define MACE_PR 10
+#define MACE_BIUCC 11
+#define MACE_FIFOCC 12
+#define MACE_MACCC 13
+#define MACE_PLSCC 14
+#define MACE_PHYCC 15
+#define MACE_CHIPIDL 16
+#define MACE_CHIPIDH 17
+#define MACE_IAC 18
+/* Reserved */
+#define MACE_LADRF 20
+#define MACE_PADR 21
+/* Reserved */
+/* Reserved */
+#define MACE_MPC 24
+/* Reserved */
+#define MACE_RNTPC 26
+#define MACE_RCVCC 27
+/* Reserved */
+#define MACE_UTR 29
+#define MACE_RTR1 30
+#define MACE_RTR2 31
+
+/* MACE Bit Masks */
+#define MACE_XMTRC_EXDEF 0x80
+#define MACE_XMTRC_XMTRC 0x0F
+
+#define MACE_XMTFS_XMTSV 0x80
+#define MACE_XMTFS_UFLO 0x40
+#define MACE_XMTFS_LCOL 0x20
+#define MACE_XMTFS_MORE 0x10
+#define MACE_XMTFS_ONE 0x08
+#define MACE_XMTFS_DEFER 0x04
+#define MACE_XMTFS_LCAR 0x02
+#define MACE_XMTFS_RTRY 0x01
+
+#define MACE_RCVFS_RCVSTS 0xF000
+#define MACE_RCVFS_OFLO 0x8000
+#define MACE_RCVFS_CLSN 0x4000
+#define MACE_RCVFS_FRAM 0x2000
+#define MACE_RCVFS_FCS 0x1000
+
+#define MACE_FIFOFC_RCVFC 0xF0
+#define MACE_FIFOFC_XMTFC 0x0F
+
+#define MACE_IR_JAB 0x80
+#define MACE_IR_BABL 0x40
+#define MACE_IR_CERR 0x20
+#define MACE_IR_RCVCCO 0x10
+#define MACE_IR_RNTPCO 0x08
+#define MACE_IR_MPCO 0x04
+#define MACE_IR_RCVINT 0x02
+#define MACE_IR_XMTINT 0x01
+
+#define MACE_MACCC_PROM 0x80
+#define MACE_MACCC_DXMT2PD 0x40
+#define MACE_MACCC_EMBA 0x20
+#define MACE_MACCC_RESERVED 0x10
+#define MACE_MACCC_DRCVPA 0x08
+#define MACE_MACCC_DRCVBC 0x04
+#define MACE_MACCC_ENXMT 0x02
+#define MACE_MACCC_ENRCV 0x01
+
+#define MACE_PHYCC_LNKFL 0x80
+#define MACE_PHYCC_DLNKTST 0x40
+#define MACE_PHYCC_REVPOL 0x20
+#define MACE_PHYCC_DAPC 0x10
+#define MACE_PHYCC_LRT 0x08
+#define MACE_PHYCC_ASEL 0x04
+#define MACE_PHYCC_RWAKE 0x02
+#define MACE_PHYCC_AWAKE 0x01
+
+#define MACE_IAC_ADDRCHG 0x80
+#define MACE_IAC_PHYADDR 0x04
+#define MACE_IAC_LOGADDR 0x02
+
+#define MACE_UTR_RTRE 0x80
+#define MACE_UTR_RTRD 0x40
+#define MACE_UTR_RPA 0x20
+#define MACE_UTR_FCOLL 0x10
+#define MACE_UTR_RCVFCSE 0x08
+#define MACE_UTR_LOOP_INCL_MENDEC 0x06
+#define MACE_UTR_LOOP_NO_MENDEC 0x04
+#define MACE_UTR_LOOP_EXTERNAL 0x02
+#define MACE_UTR_LOOP_NONE 0x00
+#define MACE_UTR_RESERVED 0x01
+
+/* Switch MACE register bank (only 0 and 1 are valid) */
+#define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK)
+
+#define MACE_IMR_DEFAULT \
+ (0xFF - \
+ ( \
+ MACE_IR_CERR | \
+ MACE_IR_RCVCCO | \
+ MACE_IR_RNTPCO | \
+ MACE_IR_MPCO | \
+ MACE_IR_RCVINT | \
+ MACE_IR_XMTINT \
+ ) \
+ )
+#undef MACE_IMR_DEFAULT
+#define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */
+
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/* ----------------------------------------------------------------------------
+Type Definitions
+---------------------------------------------------------------------------- */
+
+typedef struct _mace_statistics {
+ /* MACE_XMTFS */
+ int xmtsv;
+ int uflo;
+ int lcol;
+ int more;
+ int one;
+ int defer;
+ int lcar;
+ int rtry;
+
+ /* MACE_XMTRC */
+ int exdef;
+ int xmtrc;
+
+ /* RFS1--Receive Status (RCVSTS) */
+ int oflo;
+ int clsn;
+ int fram;
+ int fcs;
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ int rfs_rntpc;
+
+ /* RFS3--Receive Collision Count (RCVCC) */
+ int rfs_rcvcc;
+
+ /* MACE_IR */
+ int jab;
+ int babl;
+ int cerr;
+ int rcvcco;
+ int rntpco;
+ int mpco;
+
+ /* MACE_MPC */
+ int mpc;
+
+ /* MACE_RNTPC */
+ int rntpc;
+
+ /* MACE_RCVCC */
+ int rcvcc;
+} mace_statistics;
+
+typedef struct _mace_private {
+ struct pcmcia_device *p_dev;
+ mace_statistics mace_stats; /* MACE chip statistics counters */
+
+ /* restore_multicast_list() state variables */
+ int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */
+ int multicast_num_addrs;
+
+ char tx_free_frames; /* Number of free transmit frame buffers */
+ char tx_irq_disabled; /* MACE TX interrupt disabled */
+
+ spinlock_t bank_lock; /* Must be held if you step off bank 0 */
+} mace_private;
+
+/* ----------------------------------------------------------------------------
+Private Global Variables
+---------------------------------------------------------------------------- */
+
+static const char *if_names[]={
+ "Auto", "10baseT", "BNC",
+};
+
+/* ----------------------------------------------------------------------------
+Parameters
+ These are the parameters that can be set during loading with
+ 'insmod'.
+---------------------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("New Media PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */
+INT_MODULE_PARM(if_port, 0);
+
+
+/* ----------------------------------------------------------------------------
+Function Prototypes
+---------------------------------------------------------------------------- */
+
+static int nmclan_config(struct pcmcia_device *link);
+static void nmclan_release(struct pcmcia_device *link);
+
+static void nmclan_reset(struct net_device *dev);
+static int mace_config(struct net_device *dev, struct ifmap *map);
+static int mace_open(struct net_device *dev);
+static int mace_close(struct net_device *dev);
+static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
+static irqreturn_t mace_interrupt(int irq, void *dev_id);
+static struct net_device_stats *mace_get_stats(struct net_device *dev);
+static int mace_rx(struct net_device *dev, unsigned char RxCnt);
+static void restore_multicast_list(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static const struct ethtool_ops netdev_ethtool_ops;
+
+
+static void nmclan_detach(struct pcmcia_device *p_dev);
+
+static const struct net_device_ops mace_netdev_ops = {
+ .ndo_open = mace_open,
+ .ndo_stop = mace_close,
+ .ndo_start_xmit = mace_start_xmit,
+ .ndo_tx_timeout = mace_tx_timeout,
+ .ndo_set_config = mace_config,
+ .ndo_get_stats = mace_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int nmclan_probe(struct pcmcia_device *link)
+{
+ mace_private *lp;
+ struct net_device *dev;
+
+ dev_dbg(&link->dev, "nmclan_attach()\n");
+
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(mace_private));
+ if (!dev)
+ return -ENOMEM;
+ lp = netdev_priv(dev);
+ lp->p_dev = link;
+ link->priv = dev;
+
+ spin_lock_init(&lp->bank_lock);
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_index = 1;
+ link->config_regs = PRESENT_OPTION;
+
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ dev->netdev_ops = &mace_netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return nmclan_config(link);
+} /* nmclan_attach */
+
+static void nmclan_detach(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ dev_dbg(&link->dev, "nmclan_detach\n");
+
+ unregister_netdev(dev);
+
+ nmclan_release(link);
+
+ free_netdev(dev);
+} /* nmclan_detach */
+
+/* ----------------------------------------------------------------------------
+mace_read
+ Reads a MACE register. This is bank independent; however, the
+ caller must ensure that this call is not interruptable. We are
+ assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
+{
+ int data = 0xFF;
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ data = inb(ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ spin_lock_irqsave(&lp->bank_lock, flags);
+ MACEBANK(1);
+ data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ spin_unlock_irqrestore(&lp->bank_lock, flags);
+ break;
+ }
+ return data & 0xFF;
+} /* mace_read */
+
+/* ----------------------------------------------------------------------------
+mace_write
+ Writes to a MACE register. This is bank independent; however,
+ the caller must ensure that this call is not interruptable. We
+ are assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
+ int data)
+{
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ spin_lock_irqsave(&lp->bank_lock, flags);
+ MACEBANK(1);
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ spin_unlock_irqrestore(&lp->bank_lock, flags);
+ break;
+ }
+} /* mace_write */
+
+/* ----------------------------------------------------------------------------
+mace_init
+ Resets the MACE chip.
+---------------------------------------------------------------------------- */
+static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
+{
+ int i;
+ int ct = 0;
+
+ /* MACE Software reset */
+ mace_write(lp, ioaddr, MACE_BIUCC, 1);
+ while (mace_read(lp, ioaddr, MACE_BIUCC) & 0x01) {
+ /* Wait for reset bit to be cleared automatically after <= 200ns */;
+ if(++ct > 500)
+ {
+ pr_err("reset failed, card removed?\n");
+ return -1;
+ }
+ udelay(1);
+ }
+ mace_write(lp, ioaddr, MACE_BIUCC, 0);
+
+ /* The Am2150 requires that the MACE FIFOs operate in burst mode. */
+ mace_write(lp, ioaddr, MACE_FIFOCC, 0x0F);
+
+ mace_write(lp,ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */
+ mace_write(lp, ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */
+
+ /*
+ * Bit 2-1 PORTSEL[1-0] Port Select.
+ * 00 AUI/10Base-2
+ * 01 10Base-T
+ * 10 DAI Port (reserved in Am2150)
+ * 11 GPSI
+ * For this card, only the first two are valid.
+ * So, PLSCC should be set to
+ * 0x00 for 10Base-2
+ * 0x02 for 10Base-T
+ * Or just set ASEL in PHYCC below!
+ */
+ switch (if_port) {
+ case 1:
+ mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
+ break;
+ case 2:
+ mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
+ break;
+ default:
+ mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
+ /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
+ and the MACE device will automatically select the operating media
+ interface port. */
+ break;
+ }
+
+ mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR);
+ /* Poll ADDRCHG bit */
+ ct = 0;
+ while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ {
+ if(++ ct > 500)
+ {
+ pr_err("ADDRCHG timeout, card removed?\n");
+ return -1;
+ }
+ }
+ /* Set PADR register */
+ for (i = 0; i < ETH_ALEN; i++)
+ mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
+
+ /* MAC Configuration Control Register should be written last */
+ /* Let set_multicast_list set this. */
+ /* mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */
+ mace_write(lp, ioaddr, MACE_MACCC, 0x00);
+ return 0;
+} /* mace_init */
+
+static int nmclan_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ mace_private *lp = netdev_priv(dev);
+ u8 *buf;
+ size_t len;
+ int i, ret;
+ unsigned int ioaddr;
+
+ dev_dbg(&link->dev, "nmclan_config\n");
+
+ link->io_lines = 5;
+ ret = pcmcia_request_io(link);
+ if (ret)
+ goto failed;
+ ret = pcmcia_request_irq(link, mace_interrupt);
+ if (ret)
+ goto failed;
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ ioaddr = dev->base_addr;
+
+ /* Read the ethernet address from the CIS. */
+ len = pcmcia_get_tuple(link, 0x80, &buf);
+ if (!buf || len < ETH_ALEN) {
+ kfree(buf);
+ goto failed;
+ }
+ memcpy(dev->dev_addr, buf, ETH_ALEN);
+ kfree(buf);
+
+ /* Verify configuration by reading the MACE ID. */
+ {
+ char sig[2];
+
+ sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL);
+ sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH);
+ if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) {
+ dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n",
+ sig[0], sig[1]);
+ } else {
+ pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
+ sig[0], sig[1]);
+ goto failed;
+ }
+ }
+
+ if(mace_init(lp, ioaddr, dev->dev_addr) == -1)
+ goto failed;
+
+ /* The if_port symbol can be set when the module is loaded */
+ if (if_port <= 2)
+ dev->if_port = if_port;
+ else
+ pr_notice("invalid if_port requested\n");
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ i = register_netdev(dev);
+ if (i != 0) {
+ pr_notice("register_netdev() failed\n");
+ goto failed;
+ }
+
+ netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n",
+ dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr);
+ return 0;
+
+failed:
+ nmclan_release(link);
+ return -ENODEV;
+} /* nmclan_config */
+
+static void nmclan_release(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "nmclan_release\n");
+ pcmcia_disable_device(link);
+}
+
+static int nmclan_suspend(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open)
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+static int nmclan_resume(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open) {
+ nmclan_reset(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+
+/* ----------------------------------------------------------------------------
+nmclan_reset
+ Reset and restore all of the Xilinx and MACE registers.
+---------------------------------------------------------------------------- */
+static void nmclan_reset(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+#if RESET_XILINX
+ struct pcmcia_device *link = &lp->link;
+ u8 OrigCorValue;
+
+ /* Save original COR value */
+ pcmcia_read_config_byte(link, CISREG_COR, &OrigCorValue);
+
+ /* Reset Xilinx */
+ dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%x, resetting...\n",
+ OrigCorValue);
+ pcmcia_write_config_byte(link, CISREG_COR, COR_SOFT_RESET);
+ /* Need to wait for 20 ms for PCMCIA to finish reset. */
+
+ /* Restore original COR configuration index */
+ pcmcia_write_config_byte(link, CISREG_COR,
+ (COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK)));
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+#endif /* #if RESET_XILINX */
+
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ /* Reinitialize the MACE chip for operation. */
+ mace_init(lp, dev->base_addr, dev->dev_addr);
+ mace_write(lp, dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT);
+
+ /* Restore the multicast list and enable TX and RX. */
+ restore_multicast_list(dev);
+} /* nmclan_reset */
+
+/* ----------------------------------------------------------------------------
+mace_config
+ [Someone tell me what this is supposed to do? Is if_port a defined
+ standard? If so, there should be defines to indicate 1=10Base-T,
+ 2=10Base-2, etc. including limited automatic detection.]
+---------------------------------------------------------------------------- */
+static int mace_config(struct net_device *dev, struct ifmap *map)
+{
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 2) {
+ dev->if_port = map->port;
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+ } else
+ return -EINVAL;
+ }
+ return 0;
+} /* mace_config */
+
+/* ----------------------------------------------------------------------------
+mace_open
+ Open device driver.
+---------------------------------------------------------------------------- */
+static int mace_open(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
+
+ link->open++;
+
+ MACEBANK(0);
+
+ netif_start_queue(dev);
+ nmclan_reset(dev);
+
+ return 0; /* Always succeed */
+} /* mace_open */
+
+/* ----------------------------------------------------------------------------
+mace_close
+ Closes device driver.
+---------------------------------------------------------------------------- */
+static int mace_close(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+
+ /* Mask off all interrupts from the MACE chip. */
+ outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+
+ link->open--;
+ netif_stop_queue(dev);
+
+ return 0;
+} /* mace_close */
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+/* ----------------------------------------------------------------------------
+mace_start_xmit
+ This routine begins the packet transmit function. When completed,
+ it will generate a transmit interrupt.
+
+ According to /usr/src/linux/net/inet/dev.c, if _start_xmit
+ returns 0, the "packet is now solely the responsibility of the
+ driver." If _start_xmit returns non-zero, the "transmission
+ failed, put skb back into a list."
+---------------------------------------------------------------------------- */
+
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ mace_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ netdev_notice(dev, "transmit timed out -- ");
+#if RESET_ON_TIMEOUT
+ pr_cont("resetting card\n");
+ pcmcia_reset_card(link->socket);
+#else /* #if RESET_ON_TIMEOUT */
+ pr_cont("NOT resetting card\n");
+#endif /* #if RESET_ON_TIMEOUT */
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ pr_debug("%s: mace_start_xmit(length = %ld) called.\n",
+ dev->name, (long)skb->len);
+
+#if (!TX_INTERRUPTABLE)
+ /* Disable MACE TX interrupts. */
+ outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT,
+ ioaddr + AM2150_MACE_BASE + MACE_IMR);
+ lp->tx_irq_disabled=1;
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ {
+ /* This block must not be interrupted by another transmit request!
+ mace_tx_timeout will take care of timer-based retransmissions from
+ the upper layers. The interrupt handler is guaranteed never to
+ service a transmit interrupt while we are in here.
+ */
+
+ dev->stats.tx_bytes += skb->len;
+ lp->tx_free_frames--;
+
+ /* WARNING: Write the _exact_ number of bytes written in the header! */
+ /* Put out the word header [must be an outw()] . . . */
+ outw(skb->len, ioaddr + AM2150_XMT);
+ /* . . . and the packet [may be any combination of outw() and outb()] */
+ outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1);
+ if (skb->len & 1) {
+ /* Odd byte transfer */
+ outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
+ }
+
+#if MULTI_TX
+ if (lp->tx_free_frames > 0)
+ netif_start_queue(dev);
+#endif /* #if MULTI_TX */
+ }
+
+#if (!TX_INTERRUPTABLE)
+ /* Re-enable MACE TX interrupts. */
+ lp->tx_irq_disabled=0;
+ outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+} /* mace_start_xmit */
+
+/* ----------------------------------------------------------------------------
+mace_interrupt
+ The interrupt handler.
+---------------------------------------------------------------------------- */
+static irqreturn_t mace_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ mace_private *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ int status;
+ int IntrCnt = MACE_MAX_IR_ITERATIONS;
+
+ if (dev == NULL) {
+ pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
+ irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+
+ if (lp->tx_irq_disabled) {
+ const char *msg;
+ if (lp->tx_irq_disabled)
+ msg = "Interrupt with tx_irq_disabled";
+ else
+ msg = "Re-entering the interrupt handler";
+ netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n",
+ msg,
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IMR));
+ /* WARNING: MACE_IR has been read! */
+ return IRQ_NONE;
+ }
+
+ if (!netif_device_present(dev)) {
+ netdev_dbg(dev, "interrupt from dead card\n");
+ return IRQ_NONE;
+ }
+
+ do {
+ /* WARNING: MACE_IR is a READ/CLEAR port! */
+ status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+ if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
+ return IRQ_NONE;
+
+ pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
+
+ if (status & MACE_IR_RCVINT) {
+ mace_rx(dev, MACE_MAX_RX_ITERATIONS);
+ }
+
+ if (status & MACE_IR_XMTINT) {
+ unsigned char fifofc;
+ unsigned char xmtrc;
+ unsigned char xmtfs;
+
+ fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
+ if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
+ dev->stats.tx_errors++;
+ outb(0xFF, ioaddr + AM2150_XMT_SKIP);
+ }
+
+ /* Transmit Retry Count (XMTRC, reg 4) */
+ xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC);
+ if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++;
+ lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC);
+
+ if (
+ (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) &
+ MACE_XMTFS_XMTSV /* Transmit Status Valid */
+ ) {
+ lp->mace_stats.xmtsv++;
+
+ if (xmtfs & ~MACE_XMTFS_XMTSV) {
+ if (xmtfs & MACE_XMTFS_UFLO) {
+ /* Underflow. Indicates that the Transmit FIFO emptied before
+ the end of frame was reached. */
+ lp->mace_stats.uflo++;
+ }
+ if (xmtfs & MACE_XMTFS_LCOL) {
+ /* Late Collision */
+ lp->mace_stats.lcol++;
+ }
+ if (xmtfs & MACE_XMTFS_MORE) {
+ /* MORE than one retry was needed */
+ lp->mace_stats.more++;
+ }
+ if (xmtfs & MACE_XMTFS_ONE) {
+ /* Exactly ONE retry occurred */
+ lp->mace_stats.one++;
+ }
+ if (xmtfs & MACE_XMTFS_DEFER) {
+ /* Transmission was defered */
+ lp->mace_stats.defer++;
+ }
+ if (xmtfs & MACE_XMTFS_LCAR) {
+ /* Loss of carrier */
+ lp->mace_stats.lcar++;
+ }
+ if (xmtfs & MACE_XMTFS_RTRY) {
+ /* Retry error: transmit aborted after 16 attempts */
+ lp->mace_stats.rtry++;
+ }
+ } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */
+
+ } /* if (xmtfs & MACE_XMTFS_XMTSV) */
+
+ dev->stats.tx_packets++;
+ lp->tx_free_frames++;
+ netif_wake_queue(dev);
+ } /* if (status & MACE_IR_XMTINT) */
+
+ if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) {
+ if (status & MACE_IR_JAB) {
+ /* Jabber Error. Excessive transmit duration (20-150ms). */
+ lp->mace_stats.jab++;
+ }
+ if (status & MACE_IR_BABL) {
+ /* Babble Error. >1518 bytes transmitted. */
+ lp->mace_stats.babl++;
+ }
+ if (status & MACE_IR_CERR) {
+ /* Collision Error. CERR indicates the absence of the
+ Signal Quality Error Test message after a packet
+ transmission. */
+ lp->mace_stats.cerr++;
+ }
+ if (status & MACE_IR_RCVCCO) {
+ /* Receive Collision Count Overflow; */
+ lp->mace_stats.rcvcco++;
+ }
+ if (status & MACE_IR_RNTPCO) {
+ /* Runt Packet Count Overflow */
+ lp->mace_stats.rntpco++;
+ }
+ if (status & MACE_IR_MPCO) {
+ /* Missed Packet Count Overflow */
+ lp->mace_stats.mpco++;
+ }
+ } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */
+
+ } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt));
+
+ return IRQ_HANDLED;
+} /* mace_interrupt */
+
+/* ----------------------------------------------------------------------------
+mace_rx
+ Receives packets.
+---------------------------------------------------------------------------- */
+static int mace_rx(struct net_device *dev, unsigned char RxCnt)
+{
+ mace_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ unsigned char rx_framecnt;
+ unsigned short rx_status;
+
+ while (
+ ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) &&
+ (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */
+ (RxCnt--)
+ ) {
+ rx_status = inw(ioaddr + AM2150_RCV);
+
+ pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status"
+ " 0x%X.\n", dev->name, rx_framecnt, rx_status);
+
+ if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
+ dev->stats.rx_errors++;
+ if (rx_status & MACE_RCVFS_OFLO) {
+ lp->mace_stats.oflo++;
+ }
+ if (rx_status & MACE_RCVFS_CLSN) {
+ lp->mace_stats.clsn++;
+ }
+ if (rx_status & MACE_RCVFS_FRAM) {
+ lp->mace_stats.fram++;
+ }
+ if (rx_status & MACE_RCVFS_FCS) {
+ lp->mace_stats.fcs++;
+ }
+ } else {
+ short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4;
+ /* Auto Strip is off, always subtract 4 */
+ struct sk_buff *skb;
+
+ lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV);
+ /* runt packet count */
+ lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
+ /* rcv collision count */
+
+ pr_debug(" receiving packet size 0x%X rx_status"
+ " 0x%X.\n", pkt_len, rx_status);
+
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
+
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
+ if (pkt_len & 1)
+ *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ continue;
+ } else {
+ pr_debug("%s: couldn't allocate a sk_buff of size"
+ " %d.\n", dev->name, pkt_len);
+ dev->stats.rx_dropped++;
+ }
+ }
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ } /* while */
+
+ return 0;
+} /* mace_rx */
+
+/* ----------------------------------------------------------------------------
+pr_linux_stats
+---------------------------------------------------------------------------- */
+static void pr_linux_stats(struct net_device_stats *pstats)
+{
+ pr_debug("pr_linux_stats\n");
+ pr_debug(" rx_packets=%-7ld tx_packets=%ld\n",
+ (long)pstats->rx_packets, (long)pstats->tx_packets);
+ pr_debug(" rx_errors=%-7ld tx_errors=%ld\n",
+ (long)pstats->rx_errors, (long)pstats->tx_errors);
+ pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n",
+ (long)pstats->rx_dropped, (long)pstats->tx_dropped);
+ pr_debug(" multicast=%-7ld collisions=%ld\n",
+ (long)pstats->multicast, (long)pstats->collisions);
+
+ pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n",
+ (long)pstats->rx_length_errors, (long)pstats->rx_over_errors);
+ pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n",
+ (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors);
+ pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n",
+ (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors);
+
+ pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n",
+ (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors);
+ pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n",
+ (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors);
+ pr_debug(" tx_window_errors=%ld\n",
+ (long)pstats->tx_window_errors);
+} /* pr_linux_stats */
+
+/* ----------------------------------------------------------------------------
+pr_mace_stats
+---------------------------------------------------------------------------- */
+static void pr_mace_stats(mace_statistics *pstats)
+{
+ pr_debug("pr_mace_stats\n");
+
+ pr_debug(" xmtsv=%-7d uflo=%d\n",
+ pstats->xmtsv, pstats->uflo);
+ pr_debug(" lcol=%-7d more=%d\n",
+ pstats->lcol, pstats->more);
+ pr_debug(" one=%-7d defer=%d\n",
+ pstats->one, pstats->defer);
+ pr_debug(" lcar=%-7d rtry=%d\n",
+ pstats->lcar, pstats->rtry);
+
+ /* MACE_XMTRC */
+ pr_debug(" exdef=%-7d xmtrc=%d\n",
+ pstats->exdef, pstats->xmtrc);
+
+ /* RFS1--Receive Status (RCVSTS) */
+ pr_debug(" oflo=%-7d clsn=%d\n",
+ pstats->oflo, pstats->clsn);
+ pr_debug(" fram=%-7d fcs=%d\n",
+ pstats->fram, pstats->fcs);
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ /* RFS3--Receive Collision Count (RCVCC) */
+ pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n",
+ pstats->rfs_rntpc, pstats->rfs_rcvcc);
+
+ /* MACE_IR */
+ pr_debug(" jab=%-7d babl=%d\n",
+ pstats->jab, pstats->babl);
+ pr_debug(" cerr=%-7d rcvcco=%d\n",
+ pstats->cerr, pstats->rcvcco);
+ pr_debug(" rntpco=%-7d mpco=%d\n",
+ pstats->rntpco, pstats->mpco);
+
+ /* MACE_MPC */
+ pr_debug(" mpc=%d\n", pstats->mpc);
+
+ /* MACE_RNTPC */
+ pr_debug(" rntpc=%d\n", pstats->rntpc);
+
+ /* MACE_RCVCC */
+ pr_debug(" rcvcc=%d\n", pstats->rcvcc);
+
+} /* pr_mace_stats */
+
+/* ----------------------------------------------------------------------------
+update_stats
+ Update statistics. We change to register window 1, so this
+ should be run single-threaded if the device is active. This is
+ expected to be a rare operation, and it's simpler for the rest
+ of the driver to assume that window 0 is always valid rather
+ than use a special window-state variable.
+
+ oflo & uflo should _never_ occur since it would mean the Xilinx
+ was not able to transfer data between the MACE FIFO and the
+ card's SRAM fast enough. If this happens, something is
+ seriously wrong with the hardware.
+---------------------------------------------------------------------------- */
+static void update_stats(unsigned int ioaddr, struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+ lp->mace_stats.rcvcc += mace_read(lp, ioaddr, MACE_RCVCC);
+ lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC);
+ lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC);
+ /* At this point, mace_stats is fully updated for this call.
+ We may now update the netdev stats. */
+
+ /* The MACE has no equivalent for netdev stats field which are commented
+ out. */
+
+ /* dev->stats.multicast; */
+ dev->stats.collisions =
+ lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
+ /* Collision: The MACE may retry sending a packet 15 times
+ before giving up. The retry count is in XMTRC.
+ Does each retry constitute a collision?
+ If so, why doesn't the RCVCC record these collisions? */
+
+ /* detailed rx_errors: */
+ dev->stats.rx_length_errors =
+ lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
+ /* dev->stats.rx_over_errors */
+ dev->stats.rx_crc_errors = lp->mace_stats.fcs;
+ dev->stats.rx_frame_errors = lp->mace_stats.fram;
+ dev->stats.rx_fifo_errors = lp->mace_stats.oflo;
+ dev->stats.rx_missed_errors =
+ lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
+
+ /* detailed tx_errors */
+ dev->stats.tx_aborted_errors = lp->mace_stats.rtry;
+ dev->stats.tx_carrier_errors = lp->mace_stats.lcar;
+ /* LCAR usually results from bad cabling. */
+ dev->stats.tx_fifo_errors = lp->mace_stats.uflo;
+ dev->stats.tx_heartbeat_errors = lp->mace_stats.cerr;
+ /* dev->stats.tx_window_errors; */
+} /* update_stats */
+
+/* ----------------------------------------------------------------------------
+mace_get_stats
+ Gathers ethernet statistics from the MACE chip.
+---------------------------------------------------------------------------- */
+static struct net_device_stats *mace_get_stats(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+ update_stats(dev->base_addr, dev);
+
+ pr_debug("%s: updating the statistics.\n", dev->name);
+ pr_linux_stats(&dev->stats);
+ pr_mace_stats(&lp->mace_stats);
+
+ return &dev->stats;
+} /* net_device_stats */
+
+/* ----------------------------------------------------------------------------
+updateCRC
+ Modified from Am79C90 data sheet.
+---------------------------------------------------------------------------- */
+
+#ifdef BROKEN_MULTICAST
+
+static void updateCRC(int *CRC, int bit)
+{
+ static const int poly[]={
+ 1,1,1,0, 1,1,0,1,
+ 1,0,1,1, 1,0,0,0,
+ 1,0,0,0, 0,0,1,1,
+ 0,0,1,0, 0,0,0,0
+ }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the
+ CRC generator polynomial. */
+
+ int j;
+
+ /* shift CRC and control bit (CRC[32]) */
+ for (j = 32; j > 0; j--)
+ CRC[j] = CRC[j-1];
+ CRC[0] = 0;
+
+ /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */
+ if (bit ^ CRC[32])
+ for (j = 0; j < 32; j++)
+ CRC[j] ^= poly[j];
+} /* updateCRC */
+
+/* ----------------------------------------------------------------------------
+BuildLAF
+ Build logical address filter.
+ Modified from Am79C90 data sheet.
+
+Input
+ ladrf: logical address filter (contents initialized to 0)
+ adr: ethernet address
+---------------------------------------------------------------------------- */
+static void BuildLAF(int *ladrf, int *adr)
+{
+ int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */
+
+ int i, byte; /* temporary array indices */
+ int hashcode; /* the output object */
+
+ CRC[32]=0;
+
+ for (byte = 0; byte < 6; byte++)
+ for (i = 0; i < 8; i++)
+ updateCRC(CRC, (adr[byte] >> i) & 1);
+
+ hashcode = 0;
+ for (i = 0; i < 6; i++)
+ hashcode = (hashcode << 1) + CRC[i];
+
+ byte = hashcode >> 3;
+ ladrf[byte] |= (1 << (hashcode & 7));
+
+#ifdef PCMCIA_DEBUG
+ if (0)
+ printk(KERN_DEBUG " adr =%pM\n", adr);
+ printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
+ for (i = 0; i < 8; i++)
+ pr_cont(" %02X", ladrf[i]);
+ pr_cont("\n");
+#endif
+} /* BuildLAF */
+
+/* ----------------------------------------------------------------------------
+restore_multicast_list
+ Restores the multicast filter for MACE chip to the last
+ set_multicast_list() call.
+
+Input
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+static void restore_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ int num_addrs = lp->multicast_num_addrs;
+ int *ladrf = lp->multicast_ladrf;
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ pr_debug("%s: restoring Rx mode to %d addresses.\n",
+ dev->name, num_addrs);
+
+ if (num_addrs > 0) {
+
+ pr_debug("Attempt to restore multicast list detected.\n");
+
+ mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR);
+ /* Poll ADDRCHG bit */
+ while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ ;
+ /* Set LADRF register */
+ for (i = 0; i < MACE_LADRF_LEN; i++)
+ mace_write(lp, ioaddr, MACE_LADRF, ladrf[i]);
+
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ } else if (num_addrs < 0) {
+
+ /* Promiscuous mode: receive all packets */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+
+ } else {
+
+ /* Normal mode */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ }
+} /* restore_multicast_list */
+
+/* ----------------------------------------------------------------------------
+set_multicast_list
+ Set or clear the multicast filter for this adaptor.
+
+Input
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+Output
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ int adr[ETH_ALEN] = {0}; /* Ethernet address */
+ struct netdev_hw_addr *ha;
+
+#ifdef PCMCIA_DEBUG
+ {
+ static int old;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
+ pr_debug("%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ /* Set multicast_num_addrs. */
+ lp->multicast_num_addrs = netdev_mc_count(dev);
+
+ /* Set multicast_ladrf. */
+ if (num_addrs > 0) {
+ /* Calculate multicast logical address filter */
+ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(adr, ha->addr, ETH_ALEN);
+ BuildLAF(lp->multicast_ladrf, adr);
+ }
+ }
+
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+#endif /* BROKEN_MULTICAST */
+
+static void restore_multicast_list(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+
+ pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name,
+ lp->multicast_num_addrs);
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode: receive all packets */
+ mace_write(lp,ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+ } else {
+ /* Normal mode */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+ }
+} /* restore_multicast_list */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+#ifdef PCMCIA_DEBUG
+ {
+ static int old;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
+ pr_debug("%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ lp->multicast_num_addrs = netdev_mc_count(dev);
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+static const struct pcmcia_device_id nmclan_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Ethernet", 0x085a850b, 0x00b2e941),
+ PCMCIA_DEVICE_PROD_ID12("Portable Add-ons", "Ethernet+", 0xebf1d60, 0xad673aaf),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, nmclan_ids);
+
+static struct pcmcia_driver nmclan_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "nmclan_cs",
+ .probe = nmclan_probe,
+ .remove = nmclan_detach,
+ .id_table = nmclan_ids,
+ .suspend = nmclan_suspend,
+ .resume = nmclan_resume,
+};
+module_pcmcia_driver(nmclan_cs_driver);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
new file mode 100644
index 000000000..f78daba60
--- /dev/null
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -0,0 +1,3039 @@
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ * Copyright 1996-1999 Thomas Bogendoerfer
+ *
+ * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver is for PCnet32 and PCnetPCI based ethercards
+ */
+/**************************************************************************
+ * 23 Oct, 2000.
+ * Fixed a few bugs, related to running the controller in 32bit mode.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ *************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "pcnet32"
+#define DRV_RELDATE "21.Apr.2008"
+#define PFX DRV_NAME ": "
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+/*
+ * PCI device identifiers for "new style" Linux PCI Device Drivers
+ */
+static const struct pci_device_id pcnet32_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
+
+ /*
+ * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
+ * the incorrect vendor id.
+ */
+ { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
+
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
+
+static int cards_found;
+
+/*
+ * VLB I/O addresses
+ */
+static unsigned int pcnet32_portlist[] =
+ { 0x300, 0x320, 0x340, 0x360, 0 };
+
+static int pcnet32_debug;
+static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
+static int pcnet32vlb; /* check for VLB cards ? */
+
+static struct net_device *pcnet32_dev;
+
+static int max_interrupt_work = 2;
+static int rx_copybreak = 200;
+
+#define PCNET32_PORT_AUI 0x00
+#define PCNET32_PORT_10BT 0x01
+#define PCNET32_PORT_GPSI 0x02
+#define PCNET32_PORT_MII 0x03
+
+#define PCNET32_PORT_PORTSEL 0x03
+#define PCNET32_PORT_ASEL 0x04
+#define PCNET32_PORT_100 0x40
+#define PCNET32_PORT_FD 0x80
+
+#define PCNET32_DMA_MASK 0xffffffff
+
+#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
+#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
+
+/*
+ * table to translate option values from tulip
+ * to internal options
+ */
+static const unsigned char options_mapping[] = {
+ PCNET32_PORT_ASEL, /* 0 Auto-select */
+ PCNET32_PORT_AUI, /* 1 BNC/AUI */
+ PCNET32_PORT_AUI, /* 2 AUI/BNC */
+ PCNET32_PORT_ASEL, /* 3 not supported */
+ PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
+ PCNET32_PORT_ASEL, /* 5 not supported */
+ PCNET32_PORT_ASEL, /* 6 not supported */
+ PCNET32_PORT_ASEL, /* 7 not supported */
+ PCNET32_PORT_ASEL, /* 8 not supported */
+ PCNET32_PORT_MII, /* 9 MII 10baseT */
+ PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
+ PCNET32_PORT_MII, /* 11 MII (autosel) */
+ PCNET32_PORT_10BT, /* 12 10BaseT */
+ PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
+ /* 14 MII 100BaseTx-FD */
+ PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
+ PCNET32_PORT_ASEL /* 15 not supported */
+};
+
+static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Loopback test (offline)"
+};
+
+#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test)
+
+#define PCNET32_NUM_REGS 136
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+static int homepna[MAX_UNITS];
+
+/*
+ * Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#ifndef PCNET32_LOG_TX_BUFFERS
+#define PCNET32_LOG_TX_BUFFERS 4
+#define PCNET32_LOG_RX_BUFFERS 5
+#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
+#define PCNET32_LOG_MAX_RX_BUFFERS 9
+#endif
+
+#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
+#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
+
+#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
+#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
+
+#define PKT_BUF_SKB 1544
+/* actual buffer length after being aligned */
+#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
+/* chip wants twos complement of the (aligned) buffer length */
+#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
+
+/* Offsets from base I/O address. */
+#define PCNET32_WIO_RDP 0x10
+#define PCNET32_WIO_RAP 0x12
+#define PCNET32_WIO_RESET 0x14
+#define PCNET32_WIO_BDP 0x16
+
+#define PCNET32_DWIO_RDP 0x10
+#define PCNET32_DWIO_RAP 0x14
+#define PCNET32_DWIO_RESET 0x18
+#define PCNET32_DWIO_BDP 0x1C
+
+#define PCNET32_TOTAL_SIZE 0x20
+
+#define CSR0 0
+#define CSR0_INIT 0x1
+#define CSR0_START 0x2
+#define CSR0_STOP 0x4
+#define CSR0_TXPOLL 0x8
+#define CSR0_INTEN 0x40
+#define CSR0_IDON 0x0100
+#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
+#define PCNET32_INIT_LOW 1
+#define PCNET32_INIT_HIGH 2
+#define CSR3 3
+#define CSR4 4
+#define CSR5 5
+#define CSR5_SUSPEND 0x0001
+#define CSR15 15
+#define PCNET32_MC_FILTER 8
+
+#define PCNET32_79C970A 0x2621
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+ __le32 base;
+ __le16 buf_length; /* two`s complement of length */
+ __le16 status;
+ __le32 msg_length;
+ __le32 reserved;
+};
+
+struct pcnet32_tx_head {
+ __le32 base;
+ __le16 length; /* two`s complement of length */
+ __le16 status;
+ __le32 misc;
+ __le32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+ __le16 mode;
+ __le16 tlen_rlen;
+ u8 phys_addr[6];
+ __le16 reserved;
+ __le32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ __le32 rx_ring;
+ __le32 tx_ring;
+};
+
+/* PCnet32 access functions */
+struct pcnet32_access {
+ u16 (*read_csr) (unsigned long, int);
+ void (*write_csr) (unsigned long, int, u16);
+ u16 (*read_bcr) (unsigned long, int);
+ void (*write_bcr) (unsigned long, int, u16);
+ u16 (*read_rap) (unsigned long);
+ void (*write_rap) (unsigned long, u16);
+ void (*reset) (unsigned long);
+};
+
+/*
+ * The first field of pcnet32_private is read by the ethernet device
+ * so the structure should be allocated using dma_alloc_coherent().
+ */
+struct pcnet32_private {
+ struct pcnet32_init_block *init_block;
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+ struct pcnet32_rx_head *rx_ring;
+ struct pcnet32_tx_head *tx_ring;
+ dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
+ returned by dma_alloc_coherent */
+ struct pci_dev *pci_dev;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff **tx_skbuff;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *tx_dma_addr;
+ dma_addr_t *rx_dma_addr;
+ const struct pcnet32_access *a;
+ spinlock_t lock; /* Guard lock */
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int rx_ring_size; /* current rx ring size */
+ unsigned int tx_ring_size; /* current tx ring size */
+ unsigned int rx_mod_mask; /* rx ring modular mask */
+ unsigned int tx_mod_mask; /* tx ring modular mask */
+ unsigned short rx_len_bits;
+ unsigned short tx_len_bits;
+ dma_addr_t rx_ring_dma_addr;
+ dma_addr_t tx_ring_dma_addr;
+ unsigned int dirty_rx, /* ring entries to be freed. */
+ dirty_tx;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+ char tx_full;
+ char phycount; /* number of phys found */
+ int options;
+ unsigned int shared_irq:1, /* shared irq possible */
+ dxsuflo:1, /* disable transmit stop on uflo */
+ mii:1, /* mii port available */
+ autoneg:1, /* autoneg enabled */
+ port_tp:1, /* port set to TP */
+ fdx:1; /* full duplex enabled */
+ struct net_device *next;
+ struct mii_if_info mii_if;
+ struct timer_list watchdog_timer;
+ u32 msg_enable; /* debug message level */
+
+ /* each bit indicates an available PHY */
+ u32 phymask;
+ unsigned short chip_version; /* which variant this is */
+
+ /* saved registers during ethtool blink */
+ u16 save_regs[4];
+};
+
+static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
+static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
+static int pcnet32_open(struct net_device *);
+static int pcnet32_init_ring(struct net_device *);
+static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
+ struct net_device *);
+static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue);
+static irqreturn_t pcnet32_interrupt(int, void *);
+static int pcnet32_close(struct net_device *);
+static struct net_device_stats *pcnet32_get_stats(struct net_device *);
+static void pcnet32_load_multicast(struct net_device *dev);
+static void pcnet32_set_multicast_list(struct net_device *);
+static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+static void pcnet32_watchdog(struct timer_list *);
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
+ int val);
+static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
+static void pcnet32_ethtool_test(struct net_device *dev,
+ struct ethtool_test *eth_test, u64 * data);
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
+static int pcnet32_get_regs_len(struct net_device *dev);
+static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr);
+static void pcnet32_purge_tx_ring(struct net_device *dev);
+static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
+static void pcnet32_free_ring(struct net_device *dev);
+static void pcnet32_check_media(struct net_device *dev, int verbose);
+
+static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RDP);
+}
+
+static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_RDP);
+}
+
+static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_BDP);
+}
+
+static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_BDP);
+}
+
+static u16 pcnet32_wio_read_rap(unsigned long addr)
+{
+ return inw(addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
+{
+ outw(val, addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_reset(unsigned long addr)
+{
+ inw(addr + PCNET32_WIO_RESET);
+}
+
+static int pcnet32_wio_check(unsigned long addr)
+{
+ outw(88, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RAP) == 88;
+}
+
+static const struct pcnet32_access pcnet32_wio = {
+ .read_csr = pcnet32_wio_read_csr,
+ .write_csr = pcnet32_wio_write_csr,
+ .read_bcr = pcnet32_wio_read_bcr,
+ .write_bcr = pcnet32_wio_write_bcr,
+ .read_rap = pcnet32_wio_read_rap,
+ .write_rap = pcnet32_wio_write_rap,
+ .reset = pcnet32_wio_reset
+};
+
+static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_RDP);
+}
+
+static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_BDP);
+}
+
+static u16 pcnet32_dwio_read_rap(unsigned long addr)
+{
+ return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
+{
+ outl(val, addr + PCNET32_DWIO_RAP);
+}
+
+static void pcnet32_dwio_reset(unsigned long addr)
+{
+ inl(addr + PCNET32_DWIO_RESET);
+}
+
+static int pcnet32_dwio_check(unsigned long addr)
+{
+ outl(88, addr + PCNET32_DWIO_RAP);
+ return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
+}
+
+static const struct pcnet32_access pcnet32_dwio = {
+ .read_csr = pcnet32_dwio_read_csr,
+ .write_csr = pcnet32_dwio_write_csr,
+ .read_bcr = pcnet32_dwio_read_bcr,
+ .write_bcr = pcnet32_dwio_write_bcr,
+ .read_rap = pcnet32_dwio_read_rap,
+ .write_rap = pcnet32_dwio_write_rap,
+ .reset = pcnet32_dwio_reset
+};
+
+static void pcnet32_netif_stop(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ netif_trans_update(dev); /* prevent tx timeout */
+ napi_disable(&lp->napi);
+ netif_tx_disable(dev);
+}
+
+static void pcnet32_netif_start(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ ulong ioaddr = dev->base_addr;
+ u16 val;
+
+ netif_wake_queue(dev);
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val &= 0x00ff;
+ lp->a->write_csr(ioaddr, CSR3, val);
+ napi_enable(&lp->napi);
+}
+
+/*
+ * Allocate space for the new sized tx ring.
+ * Free old resources
+ * Save new resources.
+ * Any failure keeps old resources.
+ * Must be called with lp->lock held.
+ */
+static void pcnet32_realloc_tx_ring(struct net_device *dev,
+ struct pcnet32_private *lp,
+ unsigned int size)
+{
+ dma_addr_t new_ring_dma_addr;
+ dma_addr_t *new_dma_addr_list;
+ struct pcnet32_tx_head *new_tx_ring;
+ struct sk_buff **new_skb_list;
+ unsigned int entries = BIT(size);
+
+ pcnet32_purge_tx_ring(dev);
+
+ new_tx_ring =
+ dma_alloc_coherent(&lp->pci_dev->dev,
+ sizeof(struct pcnet32_tx_head) * entries,
+ &new_ring_dma_addr, GFP_ATOMIC);
+ if (new_tx_ring == NULL)
+ return;
+
+ new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
+ if (!new_dma_addr_list)
+ goto free_new_tx_ring;
+
+ new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
+ if (!new_skb_list)
+ goto free_new_lists;
+
+ kfree(lp->tx_skbuff);
+ kfree(lp->tx_dma_addr);
+ dma_free_coherent(&lp->pci_dev->dev,
+ sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+ lp->tx_ring, lp->tx_ring_dma_addr);
+
+ lp->tx_ring_size = entries;
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->tx_len_bits = (size << 12);
+ lp->tx_ring = new_tx_ring;
+ lp->tx_ring_dma_addr = new_ring_dma_addr;
+ lp->tx_dma_addr = new_dma_addr_list;
+ lp->tx_skbuff = new_skb_list;
+ return;
+
+free_new_lists:
+ kfree(new_dma_addr_list);
+free_new_tx_ring:
+ dma_free_coherent(&lp->pci_dev->dev,
+ sizeof(struct pcnet32_tx_head) * entries,
+ new_tx_ring, new_ring_dma_addr);
+}
+
+/*
+ * Allocate space for the new sized rx ring.
+ * Re-use old receive buffers.
+ * alloc extra buffers
+ * free unneeded buffers
+ * free unneeded buffers
+ * Save new resources.
+ * Any failure keeps old resources.
+ * Must be called with lp->lock held.
+ */
+static void pcnet32_realloc_rx_ring(struct net_device *dev,
+ struct pcnet32_private *lp,
+ unsigned int size)
+{
+ dma_addr_t new_ring_dma_addr;
+ dma_addr_t *new_dma_addr_list;
+ struct pcnet32_rx_head *new_rx_ring;
+ struct sk_buff **new_skb_list;
+ int new, overlap;
+ unsigned int entries = BIT(size);
+
+ new_rx_ring =
+ dma_alloc_coherent(&lp->pci_dev->dev,
+ sizeof(struct pcnet32_rx_head) * entries,
+ &new_ring_dma_addr, GFP_ATOMIC);
+ if (new_rx_ring == NULL)
+ return;
+
+ new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
+ if (!new_dma_addr_list)
+ goto free_new_rx_ring;
+
+ new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
+ if (!new_skb_list)
+ goto free_new_lists;
+
+ /* first copy the current receive buffers */
+ overlap = min(entries, lp->rx_ring_size);
+ for (new = 0; new < overlap; new++) {
+ new_rx_ring[new] = lp->rx_ring[new];
+ new_dma_addr_list[new] = lp->rx_dma_addr[new];
+ new_skb_list[new] = lp->rx_skbuff[new];
+ }
+ /* now allocate any new buffers needed */
+ for (; new < entries; new++) {
+ struct sk_buff *rx_skbuff;
+ new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
+ rx_skbuff = new_skb_list[new];
+ if (!rx_skbuff) {
+ /* keep the original lists and buffers */
+ netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
+ __func__);
+ goto free_all_new;
+ }
+ skb_reserve(rx_skbuff, NET_IP_ALIGN);
+
+ new_dma_addr_list[new] =
+ dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
+ PKT_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) {
+ netif_err(lp, drv, dev, "%s dma mapping failed\n",
+ __func__);
+ dev_kfree_skb(new_skb_list[new]);
+ goto free_all_new;
+ }
+ new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
+ new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
+ new_rx_ring[new].status = cpu_to_le16(0x8000);
+ }
+ /* and free any unneeded buffers */
+ for (; new < lp->rx_ring_size; new++) {
+ if (lp->rx_skbuff[new]) {
+ if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[new]))
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->rx_dma_addr[new],
+ PKT_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(lp->rx_skbuff[new]);
+ }
+ }
+
+ kfree(lp->rx_skbuff);
+ kfree(lp->rx_dma_addr);
+ dma_free_coherent(&lp->pci_dev->dev,
+ sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
+ lp->rx_ring, lp->rx_ring_dma_addr);
+
+ lp->rx_ring_size = entries;
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->rx_len_bits = (size << 4);
+ lp->rx_ring = new_rx_ring;
+ lp->rx_ring_dma_addr = new_ring_dma_addr;
+ lp->rx_dma_addr = new_dma_addr_list;
+ lp->rx_skbuff = new_skb_list;
+ return;
+
+free_all_new:
+ while (--new >= lp->rx_ring_size) {
+ if (new_skb_list[new]) {
+ if (!dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new]))
+ dma_unmap_single(&lp->pci_dev->dev,
+ new_dma_addr_list[new],
+ PKT_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(new_skb_list[new]);
+ }
+ }
+ kfree(new_skb_list);
+free_new_lists:
+ kfree(new_dma_addr_list);
+free_new_rx_ring:
+ dma_free_coherent(&lp->pci_dev->dev,
+ sizeof(struct pcnet32_rx_head) * entries,
+ new_rx_ring, new_ring_dma_addr);
+}
+
+static void pcnet32_purge_rx_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int i;
+
+ /* free all allocated skbuffs */
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ lp->rx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->rx_skbuff[i]) {
+ if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i]))
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->rx_dma_addr[i],
+ PKT_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void pcnet32_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ pcnet32_interrupt(0, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/*
+ * lp->lock must be held.
+ */
+static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
+ int can_sleep)
+{
+ int csr5;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a;
+ ulong ioaddr = dev->base_addr;
+ int ticks;
+
+ /* really old chips have to be stopped. */
+ if (lp->chip_version < PCNET32_79C970A)
+ return 0;
+
+ /* set SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = a->read_csr(ioaddr, CSR5);
+ a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
+
+ /* poll waiting for bit to be set */
+ ticks = 0;
+ while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
+ spin_unlock_irqrestore(&lp->lock, *flags);
+ if (can_sleep)
+ msleep(1);
+ else
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, *flags);
+ ticks++;
+ if (ticks > 200) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error getting into suspend!\n");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr)
+{
+ int csr5 = lp->a->read_csr(ioaddr, CSR5);
+ /* clear SUSPEND (SPND) - CSR5 bit 0 */
+ lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND);
+}
+
+static int pcnet32_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->mii) {
+ mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
+ } else if (lp->chip_version == PCNET32_79C970A) {
+ if (lp->autoneg) {
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ if (lp->a->read_bcr(dev->base_addr, 4) == 0xc0)
+ cmd->base.port = PORT_AUI;
+ else
+ cmd->base.port = PORT_TP;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = lp->port_tp ? PORT_TP : PORT_AUI;
+ }
+ cmd->base.duplex = lp->fdx ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.speed = SPEED_10;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported,
+ SUPPORTED_TP | SUPPORTED_AUI);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
+}
+
+static int pcnet32_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+ int suspended, bcr2, bcr9, csr15;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->mii) {
+ r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
+ } else if (lp->chip_version == PCNET32_79C970A) {
+ suspended = pcnet32_suspend(dev, &flags, 0);
+ if (!suspended)
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+
+ lp->autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+ bcr2 = lp->a->read_bcr(ioaddr, 2);
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ lp->a->write_bcr(ioaddr, 2, bcr2 | 0x0002);
+ } else {
+ lp->a->write_bcr(ioaddr, 2, bcr2 & ~0x0002);
+
+ lp->port_tp = cmd->base.port == PORT_TP;
+ csr15 = lp->a->read_csr(ioaddr, CSR15) & ~0x0180;
+ if (cmd->base.port == PORT_TP)
+ csr15 |= 0x0080;
+ lp->a->write_csr(ioaddr, CSR15, csr15);
+ lp->init_block->mode = cpu_to_le16(csr15);
+
+ lp->fdx = cmd->base.duplex == DUPLEX_FULL;
+ bcr9 = lp->a->read_bcr(ioaddr, 9) & ~0x0003;
+ if (cmd->base.duplex == DUPLEX_FULL)
+ bcr9 |= 0x0003;
+ lp->a->write_bcr(ioaddr, 9, bcr9);
+ }
+ if (suspended)
+ pcnet32_clr_suspend(lp, ioaddr);
+ else if (netif_running(dev))
+ pcnet32_restart(dev, CSR0_NORMAL);
+ r = 0;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return r;
+}
+
+static void pcnet32_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ if (lp->pci_dev)
+ strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ sizeof(info->bus_info));
+ else
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "VLB 0x%lx", dev->base_addr);
+}
+
+static u32 pcnet32_get_link(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->mii) {
+ r = mii_link_ok(&lp->mii_if);
+ } else if (lp->chip_version == PCNET32_79C970A) {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ /* only read link if port is set to TP */
+ if (!lp->autoneg && lp->port_tp)
+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+ else /* link always up for AUI port or port auto select */
+ r = 1;
+ } else if (lp->chip_version > PCNET32_79C970A) {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+ } else { /* can not detect link on really old chips */
+ r = 1;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return r;
+}
+
+static u32 pcnet32_get_msglevel(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ return lp->msg_enable;
+}
+
+static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ lp->msg_enable = value;
+}
+
+static int pcnet32_nway_reset(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_nway_restart(&lp->mii_if);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
+}
+
+static void pcnet32_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ ering->tx_max_pending = TX_MAX_RING_SIZE;
+ ering->tx_pending = lp->tx_ring_size;
+ ering->rx_max_pending = RX_MAX_RING_SIZE;
+ ering->rx_pending = lp->rx_ring_size;
+}
+
+static int pcnet32_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ unsigned int size;
+ ulong ioaddr = dev->base_addr;
+ int i;
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ pcnet32_netif_stop(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+
+ size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
+
+ /* set the minimum ring size to 4, to allow the loopback test to work
+ * unchanged.
+ */
+ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
+ if (size <= (1 << i))
+ break;
+ }
+ if ((1 << i) != lp->tx_ring_size)
+ pcnet32_realloc_tx_ring(dev, lp, i);
+
+ size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
+ for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
+ if (size <= (1 << i))
+ break;
+ }
+ if ((1 << i) != lp->rx_ring_size)
+ pcnet32_realloc_rx_ring(dev, lp, i);
+
+ lp->napi.weight = lp->rx_ring_size / 2;
+
+ if (netif_running(dev)) {
+ pcnet32_netif_start(dev);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
+ lp->rx_ring_size, lp->tx_ring_size);
+
+ return 0;
+}
+
+static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
+}
+
+static int pcnet32_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return PCNET32_TEST_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void pcnet32_ethtool_test(struct net_device *dev,
+ struct ethtool_test *test, u64 * data)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int rc;
+
+ if (test->flags == ETH_TEST_FL_OFFLINE) {
+ rc = pcnet32_loopback_test(dev, data);
+ if (rc) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test failed\n");
+ test->flags |= ETH_TEST_FL_FAILED;
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test passed\n");
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "No tests to run (specify 'Offline' on ethtool)\n");
+} /* end pcnet32_ethtool_test */
+
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a; /* access to registers */
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ struct sk_buff *skb; /* sk buff */
+ int x, i; /* counters */
+ int numbuffs = 4; /* number of TX/RX buffers and descs */
+ u16 status = 0x8300; /* TX ring status */
+ __le16 teststatus; /* test of ring status */
+ int rc; /* return code */
+ int size; /* size of packets */
+ unsigned char *packet; /* source packet data */
+ static const int data_len = 60; /* length of source packets */
+ unsigned long flags;
+ unsigned long ticks;
+
+ rc = 1; /* default to fail */
+
+ if (netif_running(dev))
+ pcnet32_netif_stop(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+
+ numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
+
+ /* Reset the PCNET32 */
+ lp->a->reset(ioaddr);
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a->write_bcr(ioaddr, 20, 2);
+
+ /* purge & init rings but don't actually restart */
+ pcnet32_restart(dev, 0x0000);
+
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
+
+ /* Initialize Transmit buffers. */
+ size = data_len + 15;
+ for (x = 0; x < numbuffs; x++) {
+ skb = netdev_alloc_skb(dev, size);
+ if (!skb) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Cannot allocate skb at line: %d!\n",
+ __LINE__);
+ goto clean_up;
+ }
+ packet = skb->data;
+ skb_put(skb, size); /* create space for data */
+ lp->tx_skbuff[x] = skb;
+ lp->tx_ring[x].length = cpu_to_le16(-skb->len);
+ lp->tx_ring[x].misc = 0;
+
+ /* put DA and SA into the skb */
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ /* type */
+ *packet++ = 0x08;
+ *packet++ = 0x06;
+ /* packet number */
+ *packet++ = x;
+ /* fill packet with data */
+ for (i = 0; i < data_len; i++)
+ *packet++ = i;
+
+ lp->tx_dma_addr[x] =
+ dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "DMA mapping error at line: %d!\n",
+ __LINE__);
+ goto clean_up;
+ }
+ lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[x].status = cpu_to_le16(status);
+ }
+
+ x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
+ a->write_bcr(ioaddr, 32, x | 0x0002);
+
+ /* set int loopback in CSR15 */
+ x = a->read_csr(ioaddr, CSR15) & 0xfffc;
+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
+
+ teststatus = cpu_to_le16(0x8000);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
+
+ /* Check status of descriptors */
+ for (x = 0; x < numbuffs; x++) {
+ ticks = 0;
+ rmb();
+ while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&lp->lock, flags);
+ rmb();
+ ticks++;
+ }
+ if (ticks == 200) {
+ netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
+ break;
+ }
+ }
+
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
+ wmb();
+ if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
+ netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
+
+ for (x = 0; x < numbuffs; x++) {
+ netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
+ skb = lp->rx_skbuff[x];
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", *(skb->data + i));
+ pr_cont("\n");
+ }
+ }
+
+ x = 0;
+ rc = 0;
+ while (x < numbuffs && !rc) {
+ skb = lp->rx_skbuff[x];
+ packet = lp->tx_skbuff[x]->data;
+ for (i = 0; i < size; i++) {
+ if (*(skb->data + i) != packet[i]) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error in compare! %2x - %02x %02x\n",
+ i, *(skb->data + i), packet[i]);
+ rc = 1;
+ break;
+ }
+ }
+ x++;
+ }
+
+clean_up:
+ *data1 = rc;
+ pcnet32_purge_tx_ring(dev);
+
+ x = a->read_csr(ioaddr, CSR15);
+ a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
+
+ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
+ a->write_bcr(ioaddr, 32, (x & ~0x0002));
+
+ if (netif_running(dev)) {
+ pcnet32_netif_start(dev);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ } else {
+ pcnet32_purge_rx_ring(dev);
+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return rc;
+} /* end pcnet32_loopback_test */
+
+static int pcnet32_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int i;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Save the current value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 2; /* cycle on/off twice per second */
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_OFF:
+ /* Blink the led */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore the original value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return 0;
+}
+
+/*
+ * process one receive descriptor entry
+ */
+
+static void pcnet32_rx_entry(struct net_device *dev,
+ struct pcnet32_private *lp,
+ struct pcnet32_rx_head *rxp,
+ int entry)
+{
+ int status = (short)le16_to_cpu(rxp->status) >> 8;
+ int rx_in_place = 0;
+ struct sk_buff *skb;
+ short pkt_len;
+
+ if (status != 0x03) { /* There was an error. */
+ /*
+ * There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with full-sized
+ * buffers it's possible for a jabber packet to use two
+ * buffers, with only the last correctly noting the error.
+ */
+ if (status & 0x01) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet. */
+ if (status & 0x20)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x10)
+ dev->stats.rx_over_errors++;
+ if (status & 0x08)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x04)
+ dev->stats.rx_fifo_errors++;
+ return;
+ }
+
+ pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
+
+ /* Discard oversize frames. */
+ if (unlikely(pkt_len > PKT_BUF_SIZE)) {
+ netif_err(lp, drv, dev, "Impossible packet size %d!\n",
+ pkt_len);
+ dev->stats.rx_errors++;
+ return;
+ }
+ if (pkt_len < 60) {
+ netif_err(lp, rx_err, dev, "Runt packet!\n");
+ dev->stats.rx_errors++;
+ return;
+ }
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+ dma_addr_t new_dma_addr;
+
+ newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
+ /*
+ * map the new buffer, if mapping fails, drop the packet and
+ * reuse the old buffer
+ */
+ if (newskb) {
+ skb_reserve(newskb, NET_IP_ALIGN);
+ new_dma_addr = dma_map_single(&lp->pci_dev->dev,
+ newskb->data,
+ PKT_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr)) {
+ netif_err(lp, rx_err, dev,
+ "DMA mapping error.\n");
+ dev_kfree_skb(newskb);
+ skb = NULL;
+ } else {
+ skb = lp->rx_skbuff[entry];
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->rx_dma_addr[entry],
+ PKT_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ lp->rx_dma_addr[entry] = new_dma_addr;
+ rxp->base = cpu_to_le32(new_dma_addr);
+ rx_in_place = 1;
+ }
+ } else
+ skb = NULL;
+ } else
+ skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
+
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ return;
+ }
+ if (!rx_in_place) {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pkt_len); /* Make room */
+ dma_sync_single_for_cpu(&lp->pci_dev->dev,
+ lp->rx_dma_addr[entry], pkt_len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)(lp->rx_skbuff[entry]->data),
+ pkt_len);
+ dma_sync_single_for_device(&lp->pci_dev->dev,
+ lp->rx_dma_addr[entry], pkt_len,
+ DMA_FROM_DEVICE);
+ }
+ dev->stats.rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ dev->stats.rx_packets++;
+}
+
+static int pcnet32_rx(struct net_device *dev, int budget)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int entry = lp->cur_rx & lp->rx_mod_mask;
+ struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
+ int npackets = 0;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) {
+ pcnet32_rx_entry(dev, lp, rxp, entry);
+ npackets += 1;
+ /*
+ * The docs say that the buffer length isn't touched, but Andrew
+ * Boyd of QNX reports that some revs of the 79C965 clear it.
+ */
+ rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
+ wmb(); /* Make sure owner changes after others are visible */
+ rxp->status = cpu_to_le16(0x8000);
+ entry = (++lp->cur_rx) & lp->rx_mod_mask;
+ rxp = &lp->rx_ring[entry];
+ }
+
+ return npackets;
+}
+
+static int pcnet32_tx(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned int dirty_tx = lp->dirty_tx;
+ int delta;
+ int must_restart = 0;
+
+ while (dirty_tx != lp->cur_tx) {
+ int entry = dirty_tx & lp->tx_mod_mask;
+ int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x4000) {
+ /* There was a major error, log it. */
+ int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
+ dev->stats.tx_errors++;
+ netif_err(lp, tx_err, dev,
+ "Tx error status=%04x err_status=%08x\n",
+ status, err_status);
+ if (err_status & 0x04000000)
+ dev->stats.tx_aborted_errors++;
+ if (err_status & 0x08000000)
+ dev->stats.tx_carrier_errors++;
+ if (err_status & 0x10000000)
+ dev->stats.tx_window_errors++;
+#ifndef DO_DXSUFLO
+ if (err_status & 0x40000000) {
+ dev->stats.tx_fifo_errors++;
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
+ must_restart = 1;
+ }
+#else
+ if (err_status & 0x40000000) {
+ dev->stats.tx_fifo_errors++;
+ if (!lp->dxsuflo) { /* If controller doesn't recover ... */
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
+ must_restart = 1;
+ }
+ }
+#endif
+ } else {
+ if (status & 0x1800)
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[entry]) {
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->tx_dma_addr[entry],
+ lp->tx_skbuff[entry]->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ lp->tx_dma_addr[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+ delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
+ if (delta > lp->tx_ring_size) {
+ netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += lp->tx_ring_size;
+ delta -= lp->tx_ring_size;
+ }
+
+ if (lp->tx_full &&
+ netif_queue_stopped(dev) &&
+ delta < lp->tx_ring_size - 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ lp->dirty_tx = dirty_tx;
+
+ return must_restart;
+}
+
+static int pcnet32_poll(struct napi_struct *napi, int budget)
+{
+ struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
+ struct net_device *dev = lp->dev;
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+ int work_done;
+ u16 val;
+
+ work_done = pcnet32_rx(dev, budget);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (pcnet32_tx(dev)) {
+ /* reset the chip to clear the error condition, then restart */
+ lp->a->reset(ioaddr);
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+ pcnet32_restart(dev, CSR0_START);
+ netif_wake_queue(dev);
+ }
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ /* clear interrupt masks */
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val &= 0x00ff;
+ lp->a->write_csr(ioaddr, CSR3, val);
+
+ /* Set interrupt enable. */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return work_done;
+}
+
+#define PCNET32_REGS_PER_PHY 32
+#define PCNET32_MAX_PHYS 32
+static int pcnet32_get_regs_len(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int j = lp->phycount * PCNET32_REGS_PER_PHY;
+
+ return (PCNET32_NUM_REGS + j) * sizeof(u16);
+}
+
+static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr)
+{
+ int i, csr0;
+ u16 *buff = ptr;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ csr0 = a->read_csr(ioaddr, CSR0);
+ if (!(csr0 & CSR0_STOP)) /* If not stopped */
+ pcnet32_suspend(dev, &flags, 1);
+
+ /* read address PROM */
+ for (i = 0; i < 16; i += 2)
+ *buff++ = inw(ioaddr + i);
+
+ /* read control and status registers */
+ for (i = 0; i < 90; i++)
+ *buff++ = a->read_csr(ioaddr, i);
+
+ *buff++ = a->read_csr(ioaddr, 112);
+ *buff++ = a->read_csr(ioaddr, 114);
+
+ /* read bus configuration registers */
+ for (i = 0; i < 30; i++)
+ *buff++ = a->read_bcr(ioaddr, i);
+
+ *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
+
+ for (i = 31; i < 36; i++)
+ *buff++ = a->read_bcr(ioaddr, i);
+
+ /* read mii phy registers */
+ if (lp->mii) {
+ int j;
+ for (j = 0; j < PCNET32_MAX_PHYS; j++) {
+ if (lp->phymask & (1 << j)) {
+ for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
+ lp->a->write_bcr(ioaddr, 33,
+ (j << 5) | i);
+ *buff++ = lp->a->read_bcr(ioaddr, 34);
+ }
+ }
+ }
+ }
+
+ if (!(csr0 & CSR0_STOP)) /* If not stopped */
+ pcnet32_clr_suspend(lp, ioaddr);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static const struct ethtool_ops pcnet32_ethtool_ops = {
+ .get_drvinfo = pcnet32_get_drvinfo,
+ .get_msglevel = pcnet32_get_msglevel,
+ .set_msglevel = pcnet32_set_msglevel,
+ .nway_reset = pcnet32_nway_reset,
+ .get_link = pcnet32_get_link,
+ .get_ringparam = pcnet32_get_ringparam,
+ .set_ringparam = pcnet32_set_ringparam,
+ .get_strings = pcnet32_get_strings,
+ .self_test = pcnet32_ethtool_test,
+ .set_phys_id = pcnet32_set_phys_id,
+ .get_regs_len = pcnet32_get_regs_len,
+ .get_regs = pcnet32_get_regs,
+ .get_sset_count = pcnet32_get_sset_count,
+ .get_link_ksettings = pcnet32_get_link_ksettings,
+ .set_link_ksettings = pcnet32_set_link_ksettings,
+};
+
+/* only probes for non-PCI devices, the rest are handled by
+ * pci_register_driver via pcnet32_probe_pci */
+
+static void pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
+{
+ unsigned int *port, ioaddr;
+
+ /* search for PCnet32 VLB cards at known addresses */
+ for (port = pcnet32_portlist; (ioaddr = *port); port++) {
+ if (request_region
+ (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
+ /* check if there is really a pcnet chip on that ioaddr */
+ if ((inb(ioaddr + 14) == 0x57) &&
+ (inb(ioaddr + 15) == 0x57)) {
+ pcnet32_probe1(ioaddr, 0, NULL);
+ } else {
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ }
+ }
+ }
+}
+
+static int
+pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned long ioaddr;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err < 0) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("failed to enable device -- err=%d\n", err);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ if (!pci_resource_len(pdev, 0)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("card has no PCI IO resources, aborting\n");
+ err = -ENODEV;
+ goto err_disable_dev;
+ }
+
+ err = dma_set_mask(&pdev->dev, PCNET32_DMA_MASK);
+ if (err) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("architecture does not support 32bit PCI busmaster DMA\n");
+ goto err_disable_dev;
+ }
+
+ ioaddr = pci_resource_start(pdev, 0);
+ if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("io address range already allocated\n");
+ err = -EBUSY;
+ goto err_disable_dev;
+ }
+
+ err = pcnet32_probe1(ioaddr, 1, pdev);
+
+err_disable_dev:
+ if (err < 0)
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static const struct net_device_ops pcnet32_netdev_ops = {
+ .ndo_open = pcnet32_open,
+ .ndo_stop = pcnet32_close,
+ .ndo_start_xmit = pcnet32_start_xmit,
+ .ndo_tx_timeout = pcnet32_tx_timeout,
+ .ndo_get_stats = pcnet32_get_stats,
+ .ndo_set_rx_mode = pcnet32_set_multicast_list,
+ .ndo_do_ioctl = pcnet32_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pcnet32_poll_controller,
+#endif
+};
+
+/* pcnet32_probe1
+ * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
+ * pdev will be NULL when called from pcnet32_probe_vlbus.
+ */
+static int
+pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+{
+ struct pcnet32_private *lp;
+ int i, media;
+ int fdx, mii, fset, dxsuflo, sram;
+ int chip_version;
+ char *chipname;
+ struct net_device *dev;
+ const struct pcnet32_access *a = NULL;
+ u8 promaddr[ETH_ALEN];
+ int ret = -ENODEV;
+
+ /* reset the chip */
+ pcnet32_wio_reset(ioaddr);
+
+ /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
+ if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
+ a = &pcnet32_wio;
+ } else {
+ pcnet32_dwio_reset(ioaddr);
+ if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
+ pcnet32_dwio_check(ioaddr)) {
+ a = &pcnet32_dwio;
+ } else {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("No access methods\n");
+ goto err_release_region;
+ }
+ }
+
+ chip_version =
+ a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
+ if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
+ pr_info(" PCnet chip version is %#x\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("Unsupported chip version\n");
+ goto err_release_region;
+ }
+
+ /* initialize variables */
+ fdx = mii = fset = dxsuflo = sram = 0;
+ chip_version = (chip_version >> 12) & 0xffff;
+
+ switch (chip_version) {
+ case 0x2420:
+ chipname = "PCnet/PCI 79C970"; /* PCI */
+ break;
+ case 0x2430:
+ if (shared)
+ chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
+ else
+ chipname = "PCnet/32 79C965"; /* 486/VL bus */
+ break;
+ case 0x2621:
+ chipname = "PCnet/PCI II 79C970A"; /* PCI */
+ fdx = 1;
+ break;
+ case 0x2623:
+ chipname = "PCnet/FAST 79C971"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ fset = 1;
+ break;
+ case 0x2624:
+ chipname = "PCnet/FAST+ 79C972"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ fset = 1;
+ break;
+ case 0x2625:
+ chipname = "PCnet/FAST III 79C973"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ sram = 1;
+ break;
+ case 0x2626:
+ chipname = "PCnet/Home 79C978"; /* PCI */
+ fdx = 1;
+ /*
+ * This is based on specs published at www.amd.com. This section
+ * assumes that a card with a 79C978 wants to go into standard
+ * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
+ * and the module option homepna=1 can select this instead.
+ */
+ media = a->read_bcr(ioaddr, 49);
+ media &= ~3; /* default to 10Mb ethernet */
+ if (cards_found < MAX_UNITS && homepna[cards_found])
+ media |= 1; /* switch to home wiring mode */
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
+ (media & 1) ? "1" : "10");
+ a->write_bcr(ioaddr, 49, media);
+ break;
+ case 0x2627:
+ chipname = "PCnet/FAST III 79C975"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ sram = 1;
+ break;
+ case 0x2628:
+ chipname = "PCnet/PRO 79C976";
+ fdx = 1;
+ mii = 1;
+ break;
+ default:
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("PCnet version %#x, no PCnet32 chip\n",
+ chip_version);
+ goto err_release_region;
+ }
+
+ /*
+ * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
+ * starting until the packet is loaded. Strike one for reliability, lose
+ * one for latency - although on PCI this isn't a big loss. Older chips
+ * have FIFO's smaller than a packet, so you can't do this.
+ * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
+ */
+
+ if (fset) {
+ a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
+ a->write_csr(ioaddr, 80,
+ (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
+ dxsuflo = 1;
+ }
+
+ /*
+ * The Am79C973/Am79C975 controllers come with 12K of SRAM
+ * which we can use for the Tx/Rx buffers but most importantly,
+ * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
+ * Tx fifo underflows.
+ */
+ if (sram) {
+ /*
+ * The SRAM is being configured in two steps. First we
+ * set the SRAM size in the BCR25:SRAM_SIZE bits. According
+ * to the datasheet, each bit corresponds to a 512-byte
+ * page so we can have at most 24 pages. The SRAM_SIZE
+ * holds the value of the upper 8 bits of the 16-bit SRAM size.
+ * The low 8-bits start at 0x00 and end at 0xff. So the
+ * address range is from 0x0000 up to 0x17ff. Therefore,
+ * the SRAM_SIZE is set to 0x17. The next step is to set
+ * the BCR26:SRAM_BND midway through so the Tx and Rx
+ * buffers can share the SRAM equally.
+ */
+ a->write_bcr(ioaddr, 25, 0x17);
+ a->write_bcr(ioaddr, 26, 0xc);
+ /* And finally enable the NOUFLO bit */
+ a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
+ }
+
+ dev = alloc_etherdev(sizeof(*lp));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+
+ if (pdev)
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("%s at %#3lx,", chipname, ioaddr);
+
+ /* In most chips, after a chip reset, the ethernet address is read from the
+ * station address PROM at the base address and programmed into the
+ * "Physical Address Registers" CSR12-14.
+ * As a precautionary measure, we read the PROM values and complain if
+ * they disagree with the CSRs. If they miscompare, and the PROM addr
+ * is valid, then the PROM addr is used.
+ */
+ for (i = 0; i < 3; i++) {
+ unsigned int val;
+ val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
+ /* There may be endianness issues here. */
+ dev->dev_addr[2 * i] = val & 0x0ff;
+ dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+ }
+
+ /* read PROM address and compare with CSR address */
+ for (i = 0; i < ETH_ALEN; i++)
+ promaddr[i] = inb(ioaddr + i);
+
+ if (!ether_addr_equal(promaddr, dev->dev_addr) ||
+ !is_valid_ether_addr(dev->dev_addr)) {
+ if (is_valid_ether_addr(promaddr)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ pr_cont(" warning: CSR address invalid,\n");
+ pr_info(" using instead PROM address of");
+ }
+ memcpy(dev->dev_addr, promaddr, ETH_ALEN);
+ }
+ }
+
+ /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
+ if (!is_valid_ether_addr(dev->dev_addr))
+ eth_zero_addr(dev->dev_addr);
+
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ pr_cont(" %pM", dev->dev_addr);
+
+ /* Version 0x2623 and 0x2624 */
+ if (((chip_version + 1) & 0xfffe) == 0x2624) {
+ i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
+ pr_info(" tx_start_pt(0x%04x):", i);
+ switch (i >> 10) {
+ case 0:
+ pr_cont(" 20 bytes,");
+ break;
+ case 1:
+ pr_cont(" 64 bytes,");
+ break;
+ case 2:
+ pr_cont(" 128 bytes,");
+ break;
+ case 3:
+ pr_cont("~220 bytes,");
+ break;
+ }
+ i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
+ pr_cont(" BCR18(%x):", i & 0xffff);
+ if (i & (1 << 5))
+ pr_cont("BurstWrEn ");
+ if (i & (1 << 6))
+ pr_cont("BurstRdEn ");
+ if (i & (1 << 7))
+ pr_cont("DWordIO ");
+ if (i & (1 << 11))
+ pr_cont("NoUFlow ");
+ i = a->read_bcr(ioaddr, 25);
+ pr_info(" SRAMSIZE=0x%04x,", i << 8);
+ i = a->read_bcr(ioaddr, 26);
+ pr_cont(" SRAM_BND=0x%04x,", i << 8);
+ i = a->read_bcr(ioaddr, 27);
+ if (i & (1 << 14))
+ pr_cont("LowLatRx");
+ }
+ }
+
+ dev->base_addr = ioaddr;
+ lp = netdev_priv(dev);
+ /* dma_alloc_coherent returns page-aligned memory, so we do not have to check the alignment */
+ lp->init_block = dma_alloc_coherent(&pdev->dev,
+ sizeof(*lp->init_block),
+ &lp->init_dma_addr, GFP_KERNEL);
+ if (!lp->init_block) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("Coherent memory allocation failed\n");
+ ret = -ENOMEM;
+ goto err_free_netdev;
+ }
+ lp->pci_dev = pdev;
+
+ lp->dev = dev;
+
+ spin_lock_init(&lp->lock);
+
+ lp->name = chipname;
+ lp->shared_irq = shared;
+ lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
+ lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
+ lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
+ lp->mii_if.full_duplex = fdx;
+ lp->mii_if.phy_id_mask = 0x1f;
+ lp->mii_if.reg_num_mask = 0x1f;
+ lp->dxsuflo = dxsuflo;
+ lp->mii = mii;
+ lp->chip_version = chip_version;
+ lp->msg_enable = pcnet32_debug;
+ if ((cards_found >= MAX_UNITS) ||
+ (options[cards_found] >= sizeof(options_mapping)))
+ lp->options = PCNET32_PORT_ASEL;
+ else
+ lp->options = options_mapping[options[cards_found]];
+ /* force default port to TP on 79C970A so link detection can work */
+ if (lp->chip_version == PCNET32_79C970A)
+ lp->options = PCNET32_PORT_10BT;
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = mdio_read;
+ lp->mii_if.mdio_write = mdio_write;
+
+ /* napi.weight is used in both the napi and non-napi cases */
+ lp->napi.weight = lp->rx_ring_size / 2;
+
+ netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
+
+ if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
+ ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
+ lp->options |= PCNET32_PORT_FD;
+
+ lp->a = a;
+
+ /* prior to register_netdev, dev->name is not yet correct */
+ if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
+ ret = -ENOMEM;
+ goto err_free_ring;
+ }
+ /* detect special T1/E1 WAN card by checking for MAC address */
+ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
+ dev->dev_addr[2] == 0x75)
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
+
+ lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */
+ lp->init_block->tlen_rlen =
+ cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
+ for (i = 0; i < 6; i++)
+ lp->init_block->phys_addr[i] = dev->dev_addr[i];
+ lp->init_block->filter[0] = 0x00000000;
+ lp->init_block->filter[1] = 0x00000000;
+ lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
+ lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
+
+ /* switch pcnet32 to 32bit mode */
+ a->write_bcr(ioaddr, 20, 2);
+
+ a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
+ a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
+
+ if (pdev) { /* use the IRQ provided by PCI */
+ dev->irq = pdev->irq;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_cont(" assigned IRQ %d\n", dev->irq);
+ } else {
+ unsigned long irq_mask = probe_irq_on();
+
+ /*
+ * To auto-IRQ we enable the initialization-done and DMA error
+ * interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ * boards will work.
+ */
+ /* Trigger an initialization just for the interrupt. */
+ a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);
+ mdelay(1);
+
+ dev->irq = probe_irq_off(irq_mask);
+ if (!dev->irq) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_cont(", failed to detect IRQ line\n");
+ ret = -ENODEV;
+ goto err_free_ring;
+ }
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_cont(", probed IRQ %d\n", dev->irq);
+ }
+
+ /* Set the mii phy_id so that we can query the link state */
+ if (lp->mii) {
+ /* lp->phycount and lp->phymask are set to 0 by memset above */
+
+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
+ /* scan for PHYs */
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ unsigned short id1, id2;
+
+ id1 = mdio_read(dev, i, MII_PHYSID1);
+ if (id1 == 0xffff)
+ continue;
+ id2 = mdio_read(dev, i, MII_PHYSID2);
+ if (id2 == 0xffff)
+ continue;
+ if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
+ continue; /* 79C971 & 79C972 have phantom phy at id 31 */
+ lp->phycount++;
+ lp->phymask |= (1 << i);
+ lp->mii_if.phy_id = i;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("Found PHY %04x:%04x at address %d\n",
+ id1, id2, i);
+ }
+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
+ if (lp->phycount > 1)
+ lp->options |= PCNET32_PORT_MII;
+ }
+
+ timer_setup(&lp->watchdog_timer, pcnet32_watchdog, 0);
+
+ /* The PCNET32-specific entries in the device structure. */
+ dev->netdev_ops = &pcnet32_netdev_ops;
+ dev->ethtool_ops = &pcnet32_ethtool_ops;
+ dev->watchdog_timeo = (5 * HZ);
+
+ /* Fill in the generic fields of the device structure. */
+ if (register_netdev(dev))
+ goto err_free_ring;
+
+ if (pdev) {
+ pci_set_drvdata(pdev, dev);
+ } else {
+ lp->next = pcnet32_dev;
+ pcnet32_dev = dev;
+ }
+
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("%s: registered as %s\n", dev->name, lp->name);
+ cards_found++;
+
+ /* enable LED writes */
+ a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
+
+ return 0;
+
+err_free_ring:
+ pcnet32_free_ring(dev);
+ dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
+ lp->init_block, lp->init_dma_addr);
+err_free_netdev:
+ free_netdev(dev);
+err_release_region:
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ return ret;
+}
+
+/* if any allocation fails, caller must also call pcnet32_free_ring */
+static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
+ sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+ &lp->tx_ring_dma_addr, GFP_KERNEL);
+ if (lp->tx_ring == NULL) {
+ netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
+ sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
+ &lp->rx_ring_dma_addr, GFP_KERNEL);
+ if (lp->rx_ring == NULL) {
+ netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!lp->tx_dma_addr)
+ return -ENOMEM;
+
+ lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!lp->rx_dma_addr)
+ return -ENOMEM;
+
+ lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!lp->tx_skbuff)
+ return -ENOMEM;
+
+ lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!lp->rx_skbuff)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void pcnet32_free_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ kfree(lp->tx_skbuff);
+ lp->tx_skbuff = NULL;
+
+ kfree(lp->rx_skbuff);
+ lp->rx_skbuff = NULL;
+
+ kfree(lp->tx_dma_addr);
+ lp->tx_dma_addr = NULL;
+
+ kfree(lp->rx_dma_addr);
+ lp->rx_dma_addr = NULL;
+
+ if (lp->tx_ring) {
+ dma_free_coherent(&lp->pci_dev->dev,
+ sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+ lp->tx_ring, lp->tx_ring_dma_addr);
+ lp->tx_ring = NULL;
+ }
+
+ if (lp->rx_ring) {
+ dma_free_coherent(&lp->pci_dev->dev,
+ sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
+ lp->rx_ring, lp->rx_ring_dma_addr);
+ lp->rx_ring = NULL;
+ }
+}
+
+static int pcnet32_open(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ struct pci_dev *pdev = lp->pci_dev;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val;
+ int i;
+ int rc;
+ unsigned long flags;
+
+ if (request_irq(dev->irq, pcnet32_interrupt,
+ lp->shared_irq ? IRQF_SHARED : 0, dev->name,
+ (void *)dev)) {
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Check for a valid station address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ rc = -EINVAL;
+ goto err_free_irq;
+ }
+
+ /* Reset the PCNET32 */
+ lp->a->reset(ioaddr);
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a->write_bcr(ioaddr, 20, 2);
+
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
+ __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
+ (u32) (lp->rx_ring_dma_addr),
+ (u32) (lp->init_dma_addr));
+
+ lp->autoneg = !!(lp->options & PCNET32_PORT_ASEL);
+ lp->port_tp = !!(lp->options & PCNET32_PORT_10BT);
+ lp->fdx = !!(lp->options & PCNET32_PORT_FD);
+
+ /* set/reset autoselect bit */
+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
+ if (lp->options & PCNET32_PORT_ASEL)
+ val |= 2;
+ lp->a->write_bcr(ioaddr, 2, val);
+
+ /* handle full duplex setting */
+ if (lp->mii_if.full_duplex) {
+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
+ if (lp->options & PCNET32_PORT_FD) {
+ val |= 1;
+ if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+ val |= 2;
+ } else if (lp->options & PCNET32_PORT_ASEL) {
+ /* workaround of xSeries250, turn on for 79C975 only */
+ if (lp->chip_version == 0x2627)
+ val |= 3;
+ }
+ lp->a->write_bcr(ioaddr, 9, val);
+ }
+
+ /* set/reset GPSI bit in test register */
+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
+ if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
+ val |= 0x10;
+ lp->a->write_csr(ioaddr, 124, val);
+
+ /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
+ if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
+ (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
+ pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
+ netif_printk(lp, link, KERN_DEBUG, dev,
+ "Setting 100Mb-Full Duplex\n");
+ }
+ }
+ if (lp->phycount < 2) {
+ /*
+ * 24 Jun 2004 according AMD, in order to change the PHY,
+ * DANAS (or DISPM for 79C976) must be set; then select the speed,
+ * duplex, and/or enable auto negotiation, and clear DANAS
+ */
+ if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
+ lp->a->write_bcr(ioaddr, 32,
+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
+ /* disable Auto Negotiation, set 10Mpbs, HD */
+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
+ if (lp->options & PCNET32_PORT_FD)
+ val |= 0x10;
+ if (lp->options & PCNET32_PORT_100)
+ val |= 0x08;
+ lp->a->write_bcr(ioaddr, 32, val);
+ } else {
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->a->write_bcr(ioaddr, 32,
+ lp->a->read_bcr(ioaddr,
+ 32) | 0x0080);
+ /* enable auto negotiate, setup, disable fd */
+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
+ val |= 0x20;
+ lp->a->write_bcr(ioaddr, 32, val);
+ }
+ }
+ } else {
+ int first_phy = -1;
+ u16 bmcr;
+ u32 bcr9;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+
+ /*
+ * There is really no good other way to handle multiple PHYs
+ * other than turning off all automatics
+ */
+ val = lp->a->read_bcr(ioaddr, 2);
+ lp->a->write_bcr(ioaddr, 2, val & ~2);
+ val = lp->a->read_bcr(ioaddr, 32);
+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
+
+ if (!(lp->options & PCNET32_PORT_ASEL)) {
+ /* setup ecmd */
+ ecmd.port = PORT_MII;
+ ecmd.transceiver = XCVR_INTERNAL;
+ ecmd.autoneg = AUTONEG_DISABLE;
+ ethtool_cmd_speed_set(&ecmd,
+ (lp->options & PCNET32_PORT_100) ?
+ SPEED_100 : SPEED_10);
+ bcr9 = lp->a->read_bcr(ioaddr, 9);
+
+ if (lp->options & PCNET32_PORT_FD) {
+ ecmd.duplex = DUPLEX_FULL;
+ bcr9 |= (1 << 0);
+ } else {
+ ecmd.duplex = DUPLEX_HALF;
+ bcr9 |= ~(1 << 0);
+ }
+ lp->a->write_bcr(ioaddr, 9, bcr9);
+ }
+
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ if (lp->phymask & (1 << i)) {
+ /* isolate all but the first PHY */
+ bmcr = mdio_read(dev, i, MII_BMCR);
+ if (first_phy == -1) {
+ first_phy = i;
+ mdio_write(dev, i, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ } else {
+ mdio_write(dev, i, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+ }
+ /* use mii_ethtool_sset to setup PHY */
+ lp->mii_if.phy_id = i;
+ ecmd.phy_address = i;
+ if (lp->options & PCNET32_PORT_ASEL) {
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ ecmd.autoneg = AUTONEG_ENABLE;
+ }
+ mii_ethtool_sset(&lp->mii_if, &ecmd);
+ }
+ }
+ lp->mii_if.phy_id = first_phy;
+ netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
+ }
+
+#ifdef DO_DXSUFLO
+ if (lp->dxsuflo) { /* Disable transmit stop on underflow */
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val |= 0x40;
+ lp->a->write_csr(ioaddr, CSR3, val);
+ }
+#endif
+
+ lp->init_block->mode =
+ cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ pcnet32_load_multicast(dev);
+
+ if (pcnet32_init_ring(dev)) {
+ rc = -ENOMEM;
+ goto err_free_ring;
+ }
+
+ napi_enable(&lp->napi);
+
+ /* Re-initialize the PCNET32, and start it when done. */
+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
+
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
+
+ netif_start_queue(dev);
+
+ if (lp->chip_version >= PCNET32_79C970A) {
+ /* Print the link status and start the watchdog */
+ pcnet32_check_media(dev, 1);
+ mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
+ }
+
+ i = 0;
+ while (i++ < 100)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
+
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
+ i,
+ (u32) (lp->init_dma_addr),
+ lp->a->read_csr(ioaddr, CSR0));
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0; /* Always succeed */
+
+err_free_ring:
+ /* free any allocated skbuffs */
+ pcnet32_purge_rx_ring(dev);
+
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a->write_bcr(ioaddr, 20, 4);
+
+err_free_irq:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ free_irq(dev->irq, dev);
+ return rc;
+}
+
+/*
+ * The LANCE has been halted for one reason or another (busmaster memory
+ * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ * etc.). Modern LANCE variants always reload their ring-buffer
+ * configuration when restarted, so we must reinitialize our ring
+ * context before restarting. As part of this reinitialization,
+ * find all packets still on the Tx ring and pretend that they had been
+ * sent (in effect, drop the packets on the floor) - the higher-level
+ * protocols will time out and retransmit. It'd be better to shuffle
+ * these skbs to a temp list and then actually re-Tx them after
+ * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+ */
+
+static void pcnet32_purge_tx_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->tx_skbuff[i]) {
+ if (!dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[i]))
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ }
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+}
+
+/* Initialize the PCNET32 Rx and Tx rings. */
+static int pcnet32_init_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int i;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
+ if (rx_skbuff == NULL) {
+ lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
+ rx_skbuff = lp->rx_skbuff[i];
+ if (!rx_skbuff) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
+ __func__);
+ return -1;
+ }
+ skb_reserve(rx_skbuff, NET_IP_ALIGN);
+ }
+
+ rmb();
+ if (lp->rx_dma_addr[i] == 0) {
+ lp->rx_dma_addr[i] =
+ dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
+ PKT_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev,
+ "%s pci dma mapping error\n",
+ __func__);
+ return -1;
+ }
+ }
+ lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->rx_ring[i].status = cpu_to_le16(0x8000);
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ * the upper ownership bit. */
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ lp->tx_ring[i].base = 0;
+ lp->tx_dma_addr[i] = 0;
+ }
+
+ lp->init_block->tlen_rlen =
+ cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
+ for (i = 0; i < 6; i++)
+ lp->init_block->phys_addr[i] = dev->dev_addr[i];
+ lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
+ lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
+ wmb(); /* Make sure all changes are visible */
+ return 0;
+}
+
+/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
+ * then flush the pending transmit operations, re-initialize the ring,
+ * and tell the chip to initialize.
+ */
+static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+
+ /* wait for stop */
+ for (i = 0; i < 100; i++)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
+ break;
+
+ if (i >= 100)
+ netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
+ __func__);
+
+ pcnet32_purge_tx_ring(dev);
+ if (pcnet32_init_ring(dev))
+ return;
+
+ /* ReInit Ring */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
+ i = 0;
+ while (i++ < 1000)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
+ break;
+
+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
+}
+
+static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr, flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Transmitter timeout, serious problems. */
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ pr_err("%s: transmit timed out, status %4.4x, resetting\n",
+ dev->name, lp->a->read_csr(ioaddr, CSR0));
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+ dev->stats.tx_errors++;
+ if (netif_msg_tx_err(lp)) {
+ int i;
+ printk(KERN_DEBUG
+ " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0; i < lp->rx_ring_size; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->rx_ring[i].base),
+ (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
+ 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
+ le16_to_cpu(lp->rx_ring[i].status));
+ for (i = 0; i < lp->tx_ring_size; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->tx_ring[i].base),
+ (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
+ le32_to_cpu(lp->tx_ring[i].misc),
+ le16_to_cpu(lp->tx_ring[i].status));
+ printk("\n");
+ }
+ pcnet32_restart(dev, CSR0_NORMAL);
+
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ u16 status;
+ int entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ netif_printk(lp, tx_queued, KERN_DEBUG, dev,
+ "%s() called, csr0 %4.4x\n",
+ __func__, lp->a->read_csr(ioaddr, CSR0));
+
+ /* Default status -- will not enable Successful-TxDone
+ * interrupt when that option is available to us.
+ */
+ status = 0x8300;
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & lp->tx_mod_mask;
+
+ /* Caution: the write order is important here, set the status
+ * with the "ownership" bits last. */
+
+ lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
+
+ lp->tx_ring[entry].misc = 0x00000000;
+
+ lp->tx_dma_addr[entry] =
+ dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[entry])) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ goto drop_packet;
+ }
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[entry].status = cpu_to_le16(status);
+
+ lp->cur_tx++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* Trigger an immediate send poll. */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
+
+ if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
+ lp->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+drop_packet:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+/* The PCNET32 interrupt handler. */
+static irqreturn_t
+pcnet32_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct pcnet32_private *lp;
+ unsigned long ioaddr;
+ u16 csr0;
+ int boguscnt = max_interrupt_work;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+
+ csr0 = lp->a->read_csr(ioaddr, CSR0);
+ while ((csr0 & 0x8f00) && --boguscnt >= 0) {
+ if (csr0 == 0xffff)
+ break; /* PCMCIA remove happened */
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
+
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "interrupt csr0=%#2.2x new csr=%#2.2x\n",
+ csr0, lp->a->read_csr(ioaddr, CSR0));
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) {
+ /*
+ * This happens when our receive ring is full. This
+ * shouldn't be a problem as we will see normal rx
+ * interrupts for the frames in the receive ring. But
+ * there are some PCI chipsets (I can reproduce this
+ * on SP3G with Intel saturn chipset) which have
+ * sometimes problems and will fill up the receive
+ * ring with error descriptors. In this situation we
+ * don't get a rx interrupt, but a missed frame
+ * interrupt sooner or later.
+ */
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & 0x0800) {
+ netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
+ csr0);
+ /* unlike for the lance, there is no restart needed */
+ }
+ if (napi_schedule_prep(&lp->napi)) {
+ u16 val;
+ /* set interrupt masks */
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val |= 0x5f00;
+ lp->a->write_csr(ioaddr, CSR3, val);
+
+ __napi_schedule(&lp->napi);
+ break;
+ }
+ csr0 = lp->a->read_csr(ioaddr, CSR0);
+ }
+
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "exiting interrupt, csr0=%#4.4x\n",
+ lp->a->read_csr(ioaddr, CSR0));
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int pcnet32_close(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ del_timer_sync(&lp->watchdog_timer);
+
+ netif_stop_queue(dev);
+ napi_disable(&lp->napi);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
+
+ netif_printk(lp, ifdown, KERN_DEBUG, dev,
+ "Shutting down ethercard, status was %2.2x\n",
+ lp->a->read_csr(ioaddr, CSR0));
+
+ /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a->write_bcr(ioaddr, 20, 4);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ free_irq(dev->irq, dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ pcnet32_purge_rx_ring(dev);
+ pcnet32_purge_tx_ring(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0;
+}
+
+static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return &dev->stats;
+}
+
+/* taken from the sunlance driver, which it took from the depca driver */
+static void pcnet32_load_multicast(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ volatile struct pcnet32_init_block *ib = lp->init_block;
+ volatile __le16 *mcast_table = (__le16 *)ib->filter;
+ struct netdev_hw_addr *ha;
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = cpu_to_le32(~0U);
+ ib->filter[1] = cpu_to_le32(~0U);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
+ }
+ for (i = 0; i < 4; i++)
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
+ le16_to_cpu(mcast_table[i]));
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void pcnet32_set_multicast_list(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr, flags;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int csr15, suspended;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ suspended = pcnet32_suspend(dev, &flags, 0);
+ csr15 = lp->a->read_csr(ioaddr, CSR15);
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
+ lp->init_block->mode =
+ cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
+ 7);
+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
+ } else {
+ lp->init_block->mode =
+ cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
+ pcnet32_load_multicast(dev);
+ }
+
+ if (suspended) {
+ pcnet32_clr_suspend(lp, ioaddr);
+ } else {
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/* This routine assumes that the lp->lock is held */
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ u16 val_out;
+
+ if (!lp->mii)
+ return 0;
+
+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ val_out = lp->a->read_bcr(ioaddr, 34);
+
+ return val_out;
+}
+
+/* This routine assumes that the lp->lock is held */
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+
+ if (!lp->mii)
+ return;
+
+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ lp->a->write_bcr(ioaddr, 34, val);
+}
+
+static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ /* SIOC[GS]MIIxxx ioctls */
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int pcnet32_check_otherphy(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ struct mii_if_info mii = lp->mii_if;
+ u16 bmcr;
+ int i;
+
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ if (i == lp->mii_if.phy_id)
+ continue; /* skip active phy */
+ if (lp->phymask & (1 << i)) {
+ mii.phy_id = i;
+ if (mii_link_ok(&mii)) {
+ /* found PHY with active link */
+ netif_info(lp, link, dev, "Using PHY number %d\n",
+ i);
+
+ /* isolate inactive phy */
+ bmcr =
+ mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
+ mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+
+ /* de-isolate new phy */
+ bmcr = mdio_read(dev, i, MII_BMCR);
+ mdio_write(dev, i, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+
+ /* set new phy address */
+ lp->mii_if.phy_id = i;
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Show the status of the media. Similar to mii_check_media however it
+ * correctly shows the link speed for all (tested) pcnet32 variants.
+ * Devices with no mii just report link state without speed.
+ *
+ * Caller is assumed to hold and release the lp->lock.
+ */
+
+static void pcnet32_check_media(struct net_device *dev, int verbose)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int curr_link;
+ int prev_link = netif_carrier_ok(dev) ? 1 : 0;
+ u32 bcr9;
+
+ if (lp->mii) {
+ curr_link = mii_link_ok(&lp->mii_if);
+ } else if (lp->chip_version == PCNET32_79C970A) {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ /* only read link if port is set to TP */
+ if (!lp->autoneg && lp->port_tp)
+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+ else /* link always up for AUI port or port auto select */
+ curr_link = 1;
+ } else {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+ }
+ if (!curr_link) {
+ if (prev_link || verbose) {
+ netif_carrier_off(dev);
+ netif_info(lp, link, dev, "link down\n");
+ }
+ if (lp->phycount > 1) {
+ curr_link = pcnet32_check_otherphy(dev);
+ prev_link = 0;
+ }
+ } else if (verbose || !prev_link) {
+ netif_carrier_on(dev);
+ if (lp->mii) {
+ if (netif_msg_link(lp)) {
+ struct ethtool_cmd ecmd = {
+ .cmd = ETHTOOL_GSET };
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ netdev_info(dev, "link up, %uMbps, %s-duplex\n",
+ ethtool_cmd_speed(&ecmd),
+ (ecmd.duplex == DUPLEX_FULL)
+ ? "full" : "half");
+ }
+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
+ if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
+ if (lp->mii_if.full_duplex)
+ bcr9 |= (1 << 0);
+ else
+ bcr9 &= ~(1 << 0);
+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
+ }
+ } else {
+ netif_info(lp, link, dev, "link up\n");
+ }
+ }
+}
+
+/*
+ * Check for loss of link and link establishment.
+ * Could possibly be changed to use mii_check_media instead.
+ */
+
+static void pcnet32_watchdog(struct timer_list *t)
+{
+ struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer);
+ struct net_device *dev = lp->dev;
+ unsigned long flags;
+
+ /* Print the link status if it has changed */
+ spin_lock_irqsave(&lp->lock, flags);
+ pcnet32_check_media(dev, 0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
+}
+
+static int __maybe_unused pcnet32_pm_suspend(struct device *device_d)
+{
+ struct net_device *dev = dev_get_drvdata(device_d);
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ pcnet32_close(dev);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused pcnet32_pm_resume(struct device *device_d)
+{
+ struct net_device *dev = dev_get_drvdata(device_d);
+
+ if (netif_running(dev)) {
+ pcnet32_open(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+static void pcnet32_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ pcnet32_free_ring(dev);
+ release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
+ dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
+ lp->init_block, lp->init_dma_addr);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ }
+}
+
+static SIMPLE_DEV_PM_OPS(pcnet32_pm_ops, pcnet32_pm_suspend, pcnet32_pm_resume);
+
+static struct pci_driver pcnet32_driver = {
+ .name = DRV_NAME,
+ .probe = pcnet32_probe_pci,
+ .remove = pcnet32_remove_one,
+ .id_table = pcnet32_pci_tbl,
+ .driver = {
+ .pm = &pcnet32_pm_ops,
+ },
+};
+
+/* An additional parameter that may be passed in... */
+static int debug = -1;
+static int tx_start_pt = -1;
+static int pcnet32_have_pci;
+
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, DRV_NAME " debug level");
+module_param(max_interrupt_work, int, 0);
+MODULE_PARM_DESC(max_interrupt_work,
+ DRV_NAME " maximum events handled per interrupt");
+module_param(rx_copybreak, int, 0);
+MODULE_PARM_DESC(rx_copybreak,
+ DRV_NAME " copy breakpoint for copy-only-tiny-frames");
+module_param(tx_start_pt, int, 0);
+MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
+module_param(pcnet32vlb, int, 0);
+MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
+module_param_array(options, int, NULL, 0);
+MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
+/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
+module_param_array(homepna, int, NULL, 0);
+MODULE_PARM_DESC(homepna,
+ DRV_NAME
+ " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
+
+MODULE_AUTHOR("Thomas Bogendoerfer");
+MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
+MODULE_LICENSE("GPL");
+
+#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int __init pcnet32_init_module(void)
+{
+ pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
+
+ if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
+ tx_start = tx_start_pt;
+
+ /* find the PCI devices */
+ if (!pci_register_driver(&pcnet32_driver))
+ pcnet32_have_pci = 1;
+
+ /* should we find any remaining VLbus devices ? */
+ if (pcnet32vlb)
+ pcnet32_probe_vlbus(pcnet32_portlist);
+
+ if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
+ pr_info("%d cards_found\n", cards_found);
+
+ return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
+}
+
+static void __exit pcnet32_cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ while (pcnet32_dev) {
+ struct pcnet32_private *lp = netdev_priv(pcnet32_dev);
+ next_dev = lp->next;
+ unregister_netdev(pcnet32_dev);
+ pcnet32_free_ring(pcnet32_dev);
+ release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+ dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
+ lp->init_block, lp->init_dma_addr);
+ free_netdev(pcnet32_dev);
+ pcnet32_dev = next_dev;
+ }
+
+ if (pcnet32_have_pci)
+ pci_unregister_driver(&pcnet32_driver);
+}
+
+module_init(pcnet32_init_module);
+module_exit(pcnet32_cleanup_module);
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
new file mode 100644
index 000000000..00ae10812
--- /dev/null
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -0,0 +1,947 @@
+/* sun3lance.c: Ethernet driver for SUN3 Lance chip */
+/*
+
+ Sun3 Lance ethernet driver, by Sam Creasey (sammy@users.qual.net).
+ This driver is a part of the linux kernel, and is thus distributed
+ under the GNU General Public License.
+
+ The values used in LANCE_OBIO and LANCE_IRQ seem to be empirically
+ true for the correct IRQ and address of the lance registers. They
+ have not been widely tested, however. What we probably need is a
+ "proper" way to search for a device in the sun3's prom, but, alas,
+ linux has no such thing.
+
+ This driver is largely based on atarilance.c, by Roman Hodek. Other
+ sources of inspiration were the NetBSD sun3 am7990 driver, and the
+ linux sparc lance driver (sunlance.c).
+
+ There are more assumptions made throughout this driver, it almost
+ certainly still needs work, but it does work at least for RARP/BOOTP and
+ mounting the root NFS filesystem.
+
+*/
+
+static const char version[] =
+"sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n";
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/pgtable.h>
+
+#include <asm/cacheflush.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dvma.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+
+#ifdef CONFIG_SUN3
+#include <asm/sun3mmu.h>
+#else
+#include <asm/sun3xprom.h>
+#endif
+
+/* sun3/60 addr/irq for the lance chip. If your sun is different,
+ change this. */
+#define LANCE_OBIO 0x120000
+#define LANCE_IRQ IRQ_AUTO_3
+
+/* Debug level:
+ * 0 = silent, print only serious errors
+ * 1 = normal, print error messages
+ * 2 = debug, print debug infos
+ * 3 = debug, print even more debug infos (packet data)
+ */
+
+#define LANCE_DEBUG 0
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+module_param(lance_debug, int, 0);
+MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
+MODULE_LICENSE("GPL");
+
+#define DPRINTK(n,a) \
+ do { \
+ if (lance_debug >= n) \
+ printk a; \
+ } while( 0 )
+
+
+/* we're only using 32k of memory, so we use 4 TX
+ buffers and 16 RX buffers. These values are expressed as log2. */
+
+#define TX_LOG_RING_SIZE 3
+#define RX_LOG_RING_SIZE 5
+
+/* These are the derived values */
+
+#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
+#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
+#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+/* Definitions for packet buffer access: */
+#define PKT_BUF_SZ 1544
+
+/* Get the address of a packet buffer corresponding to a given buffer head */
+#define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base)
+
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short buf_length; /* This length is 2s complement! */
+ volatile short msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short length; /* Length is 2s complement! */
+ volatile short misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode */
+ unsigned char hwaddr[6]; /* Physical ethernet address */
+ unsigned int filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with length bits. */
+ unsigned short rdra;
+ unsigned short rlen;
+ unsigned short tdra;
+ unsigned short tlen;
+ unsigned short pad[4]; /* is thie needed? */
+};
+
+/* The whole layout of the Lance shared memory */
+struct lance_memory {
+ struct lance_init_block init;
+ struct lance_tx_head tx_head[TX_RING_SIZE];
+ struct lance_rx_head rx_head[RX_RING_SIZE];
+ char rx_data[RX_RING_SIZE][PKT_BUF_SZ];
+ char tx_data[TX_RING_SIZE][PKT_BUF_SZ];
+};
+
+/* The driver's private device structure */
+
+struct lance_private {
+ volatile unsigned short *iobase;
+ struct lance_memory *mem;
+ int new_rx, new_tx; /* The next free ring entry */
+ int old_tx, old_rx; /* ring entry to be processed */
+/* These two must be longs for set_bit() */
+ long tx_full;
+ long lock;
+};
+
+/* I/O register access macros */
+
+#define MEM lp->mem
+#define DREG lp->iobase[0]
+#define AREG lp->iobase[1]
+#define REGA(a) (*( AREG = (a), &DREG ))
+
+/* Definitions for the Lance */
+
+/* tx_head flags */
+#define TMD1_ENP 0x01 /* end of packet */
+#define TMD1_STP 0x02 /* start of packet */
+#define TMD1_DEF 0x04 /* deferred */
+#define TMD1_ONE 0x08 /* one retry needed */
+#define TMD1_MORE 0x10 /* more than one retry needed */
+#define TMD1_ERR 0x40 /* error summary */
+#define TMD1_OWN 0x80 /* ownership (set: chip owns) */
+
+#define TMD1_OWN_CHIP TMD1_OWN
+#define TMD1_OWN_HOST 0
+
+/* tx_head misc field */
+#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
+#define TMD3_RTRY 0x0400 /* failed after 16 retries */
+#define TMD3_LCAR 0x0800 /* carrier lost */
+#define TMD3_LCOL 0x1000 /* late collision */
+#define TMD3_UFLO 0x4000 /* underflow (late memory) */
+#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
+
+/* rx_head flags */
+#define RMD1_ENP 0x01 /* end of packet */
+#define RMD1_STP 0x02 /* start of packet */
+#define RMD1_BUFF 0x04 /* buffer error */
+#define RMD1_CRC 0x08 /* CRC error */
+#define RMD1_OFLO 0x10 /* overflow */
+#define RMD1_FRAM 0x20 /* framing error */
+#define RMD1_ERR 0x40 /* error summary */
+#define RMD1_OWN 0x80 /* ownership (set: ship owns) */
+
+#define RMD1_OWN_CHIP RMD1_OWN
+#define RMD1_OWN_HOST 0
+
+/* register names */
+#define CSR0 0 /* mode/status */
+#define CSR1 1 /* init block addr (low) */
+#define CSR2 2 /* init block addr (high) */
+#define CSR3 3 /* misc */
+#define CSR8 8 /* address filter */
+#define CSR15 15 /* promiscuous mode */
+
+/* CSR0 */
+/* (R=readable, W=writeable, S=set on write, C=clear on write) */
+#define CSR0_INIT 0x0001 /* initialize (RS) */
+#define CSR0_STRT 0x0002 /* start (RS) */
+#define CSR0_STOP 0x0004 /* stop (RS) */
+#define CSR0_TDMD 0x0008 /* transmit demand (RS) */
+#define CSR0_TXON 0x0010 /* transmitter on (R) */
+#define CSR0_RXON 0x0020 /* receiver on (R) */
+#define CSR0_INEA 0x0040 /* interrupt enable (RW) */
+#define CSR0_INTR 0x0080 /* interrupt active (R) */
+#define CSR0_IDON 0x0100 /* initialization done (RC) */
+#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
+#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
+#define CSR0_MERR 0x0800 /* memory error (RC) */
+#define CSR0_MISS 0x1000 /* missed frame (RC) */
+#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
+#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
+#define CSR0_ERR 0x8000 /* error (RC) */
+
+/* CSR3 */
+#define CSR3_BCON 0x0001 /* byte control */
+#define CSR3_ACON 0x0002 /* ALE control */
+#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
+
+/***************************** Prototypes *****************************/
+
+static int lance_probe( struct net_device *dev);
+static int lance_open( struct net_device *dev );
+static void lance_init_ring( struct net_device *dev );
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t lance_interrupt( int irq, void *dev_id);
+static int lance_rx( struct net_device *dev );
+static int lance_close( struct net_device *dev );
+static void set_multicast_list( struct net_device *dev );
+
+/************************* End of Prototypes **************************/
+
+struct net_device * __init sun3lance_probe(int unit)
+{
+ struct net_device *dev;
+ static int found;
+ int err = -ENODEV;
+
+ if (!MACH_IS_SUN3 && !MACH_IS_SUN3X)
+ return ERR_PTR(-ENODEV);
+
+ /* check that this machine has an onboard lance */
+ switch(idprom->id_machtype) {
+ case SM_SUN3|SM_3_50:
+ case SM_SUN3|SM_3_60:
+ case SM_SUN3X|SM_3_80:
+ /* these machines have lance */
+ break;
+
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (found)
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ if (!lance_probe(dev))
+ goto out;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ found = 1;
+ return dev;
+
+out1:
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)dev->base_addr);
+#endif
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_mac_address = NULL,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init lance_probe( struct net_device *dev)
+{
+ unsigned long ioaddr;
+
+ struct lance_private *lp;
+ int i;
+ static int did_version;
+ volatile unsigned short *ioaddr_probe;
+ unsigned short tmp1, tmp2;
+
+#ifdef CONFIG_SUN3
+ ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE);
+ if (!ioaddr)
+ return 0;
+#else
+ ioaddr = SUN3X_LANCE;
+#endif
+
+ /* test to see if there's really a lance here */
+ /* (CSRO_INIT shouldn't be readable) */
+
+ ioaddr_probe = (volatile unsigned short *)ioaddr;
+ tmp1 = ioaddr_probe[0];
+ tmp2 = ioaddr_probe[1];
+
+ ioaddr_probe[1] = CSR0;
+ ioaddr_probe[0] = CSR0_INIT | CSR0_STOP;
+
+ if(ioaddr_probe[0] != CSR0_STOP) {
+ ioaddr_probe[0] = tmp1;
+ ioaddr_probe[1] = tmp2;
+
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)ioaddr);
+#endif
+ return 0;
+ }
+
+ lp = netdev_priv(dev);
+
+ /* XXX - leak? */
+ MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
+ if (MEM == NULL) {
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)ioaddr);
+#endif
+ printk(KERN_WARNING "SUN3 Lance couldn't allocate DVMA memory\n");
+ return 0;
+ }
+
+ lp->iobase = (volatile unsigned short *)ioaddr;
+ dev->base_addr = (unsigned long)ioaddr; /* informational only */
+
+ REGA(CSR0) = CSR0_STOP;
+
+ if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) {
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)ioaddr);
+#endif
+ dvma_free((void *)MEM);
+ printk(KERN_WARNING "SUN3 Lance unable to allocate IRQ\n");
+ return 0;
+ }
+ dev->irq = (unsigned short)LANCE_IRQ;
+
+
+ printk("%s: SUN3 Lance at io %#lx, mem %#lx, irq %d, hwaddr ",
+ dev->name,
+ (unsigned long)ioaddr,
+ (unsigned long)MEM,
+ dev->irq);
+
+ /* copy in the ethernet address from the prom */
+ for(i = 0; i < 6 ; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ /* tell the card it's ether address, bytes swapped */
+ MEM->init.hwaddr[0] = dev->dev_addr[1];
+ MEM->init.hwaddr[1] = dev->dev_addr[0];
+ MEM->init.hwaddr[2] = dev->dev_addr[3];
+ MEM->init.hwaddr[3] = dev->dev_addr[2];
+ MEM->init.hwaddr[4] = dev->dev_addr[5];
+ MEM->init.hwaddr[5] = dev->dev_addr[4];
+
+ printk("%pM\n", dev->dev_addr);
+
+ MEM->init.mode = 0x0000;
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rdra = dvma_vtob(MEM->rx_head);
+ MEM->init.rlen = (RX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->rx_head) >> 16);
+ MEM->init.tdra = dvma_vtob(MEM->tx_head);
+ MEM->init.tlen = (TX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->tx_head) >> 16);
+
+ DPRINTK(2, ("initaddr: %08lx rx_ring: %08lx tx_ring: %08lx\n",
+ dvma_vtob(&(MEM->init)), dvma_vtob(MEM->rx_head),
+ (dvma_vtob(MEM->tx_head))));
+
+ if (did_version++ == 0)
+ printk( version );
+
+ dev->netdev_ops = &lance_netdev_ops;
+// KLUDGE -- REMOVE ME
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+
+
+ return 1;
+}
+
+static int lance_open( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+
+ DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
+
+ REGA(CSR0) = CSR0_STOP;
+
+ lance_init_ring(dev);
+
+ /* From now on, AREG is kept to point to CSR0 */
+ REGA(CSR0) = CSR0_INIT;
+
+ i = 1000000;
+ while (--i > 0)
+ if (DREG & CSR0_IDON)
+ break;
+ if (i <= 0 || (DREG & CSR0_ERR)) {
+ DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
+ dev->name, i, DREG ));
+ DREG = CSR0_STOP;
+ return -EIO;
+ }
+
+ DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA;
+
+ netif_start_queue(dev);
+
+ DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
+
+ return 0;
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+
+static void lance_init_ring( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+
+ lp->lock = 0;
+ lp->tx_full = 0;
+ lp->new_rx = lp->new_tx = 0;
+ lp->old_rx = lp->old_tx = 0;
+
+ for( i = 0; i < TX_RING_SIZE; i++ ) {
+ MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]);
+ MEM->tx_head[i].flag = 0;
+ MEM->tx_head[i].base_hi =
+ (dvma_vtob(MEM->tx_data[i])) >>16;
+ MEM->tx_head[i].length = 0;
+ MEM->tx_head[i].misc = 0;
+ }
+
+ for( i = 0; i < RX_RING_SIZE; i++ ) {
+ MEM->rx_head[i].base = dvma_vtob(MEM->rx_data[i]);
+ MEM->rx_head[i].flag = RMD1_OWN_CHIP;
+ MEM->rx_head[i].base_hi =
+ (dvma_vtob(MEM->rx_data[i])) >> 16;
+ MEM->rx_head[i].buf_length = -PKT_BUF_SZ | 0xf000;
+ MEM->rx_head[i].msg_length = 0;
+ }
+
+ /* tell the card it's ether address, bytes swapped */
+ MEM->init.hwaddr[0] = dev->dev_addr[1];
+ MEM->init.hwaddr[1] = dev->dev_addr[0];
+ MEM->init.hwaddr[2] = dev->dev_addr[3];
+ MEM->init.hwaddr[3] = dev->dev_addr[2];
+ MEM->init.hwaddr[4] = dev->dev_addr[5];
+ MEM->init.hwaddr[5] = dev->dev_addr[4];
+
+ MEM->init.mode = 0x0000;
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rdra = dvma_vtob(MEM->rx_head);
+ MEM->init.rlen = (RX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->rx_head) >> 16);
+ MEM->init.tdra = dvma_vtob(MEM->tx_head);
+ MEM->init.tlen = (TX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->tx_head) >> 16);
+
+
+ /* tell the lance the address of its init block */
+ REGA(CSR1) = dvma_vtob(&(MEM->init));
+ REGA(CSR2) = dvma_vtob(&(MEM->init)) >> 16;
+
+#ifdef CONFIG_SUN3X
+ REGA(CSR3) = CSR3_BSWP | CSR3_ACON | CSR3_BCON;
+#else
+ REGA(CSR3) = CSR3_BSWP;
+#endif
+
+}
+
+
+static netdev_tx_t
+lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry, len;
+ struct lance_tx_head *head;
+ unsigned long flags;
+
+ DPRINTK( 1, ( "%s: transmit start.\n",
+ dev->name));
+
+ /* Transmitter timeout, serious problems. */
+ if (netif_queue_stopped(dev)) {
+ int tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/5)
+ return NETDEV_TX_BUSY;
+
+ DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, DREG ));
+ DREG = CSR0_STOP;
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA(CSR3) = CSR3_BSWP;
+ dev->stats.tx_errors++;
+
+ if(lance_debug >= 2) {
+ int i;
+ printk("Ring data: old_tx %d new_tx %d%s new_rx %d\n",
+ lp->old_tx, lp->new_tx,
+ lp->tx_full ? " (full)" : "",
+ lp->new_rx );
+ for( i = 0 ; i < RX_RING_SIZE; i++ )
+ printk( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
+ i, MEM->rx_head[i].base,
+ -MEM->rx_head[i].buf_length,
+ MEM->rx_head[i].msg_length);
+ for( i = 0 ; i < TX_RING_SIZE; i++ )
+ printk("tx #%d: base=%04x len=%04x misc=%04x\n",
+ i, MEM->tx_head[i].base,
+ -MEM->tx_head[i].length,
+ MEM->tx_head[i].misc );
+ }
+
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
+
+ netif_start_queue(dev);
+
+ return NETDEV_TX_OK;
+ }
+
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+
+ /* Block a timer-based transmit from overlapping with us by
+ stopping the queue for a bit... */
+
+ netif_stop_queue(dev);
+
+ if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) {
+ printk( "%s: tx queue lock!.\n", dev->name);
+ /* don't clear dev->tbusy flag. */
+ return NETDEV_TX_BUSY;
+ }
+
+ AREG = CSR0;
+ DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, DREG ));
+
+#ifdef CONFIG_SUN3X
+ /* this weirdness doesn't appear on sun3... */
+ if(!(DREG & CSR0_INIT)) {
+ DPRINTK( 1, ("INIT not set, reinitializing...\n"));
+ REGA( CSR0 ) = CSR0_STOP;
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INIT | CSR0_STRT;
+ }
+#endif
+
+ /* Fill in a Tx ring entry */
+#if 0
+ if (lance_debug >= 2) {
+ printk( "%s: TX pkt %d type 0x%04x"
+ " from %s to %s"
+ " data at 0x%08x len %d\n",
+ dev->name, lp->new_tx, ((u_short *)skb->data)[6],
+ DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data),
+ (int)skb->data, (int)skb->len );
+ }
+#endif
+ /* We're not prepared for the int until the last flags are set/reset.
+ * And the int may happen already after setting the OWN_CHIP... */
+ local_irq_save(flags);
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->new_tx;
+ head = &(MEM->tx_head[entry]);
+
+ /* Caution: the write order is important here, set the "ownership" bits
+ * last.
+ */
+
+ /* the sun3's lance needs it's buffer padded to the minimum
+ size */
+ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+
+// head->length = -len;
+ head->length = (-len) | 0xf000;
+ head->misc = 0;
+
+ skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len);
+ if (len != skb->len)
+ memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len);
+
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK;
+ dev->stats.tx_bytes += skb->len;
+
+ /* Trigger an immediate send poll. */
+ REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT;
+ AREG = CSR0;
+ DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
+ dev->name, DREG ));
+ dev_kfree_skb(skb);
+
+ lp->lock = 0;
+ if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
+ TMD1_OWN_HOST)
+ netif_start_queue(dev);
+
+ local_irq_restore(flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* The LANCE interrupt handler. */
+
+static irqreturn_t lance_interrupt( int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+
+ still_more:
+ flush_cache_all();
+
+ AREG = CSR0;
+ csr0 = DREG;
+
+ /* ack interrupts */
+ DREG = csr0 & (CSR0_TINT | CSR0_RINT | CSR0_IDON);
+
+ /* clear errors */
+ if(csr0 & CSR0_ERR)
+ DREG = CSR0_BABL | CSR0_MERR | CSR0_CERR | CSR0_MISS;
+
+
+ DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
+ dev->name, csr0, DREG ));
+
+ if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
+ int old_tx = lp->old_tx;
+
+// if(lance_debug >= 3) {
+// int i;
+//
+// printk("%s: tx int\n", dev->name);
+//
+// for(i = 0; i < TX_RING_SIZE; i++)
+// printk("ring %d flag=%04x\n", i,
+// MEM->tx_head[i].flag);
+// }
+
+ while( old_tx != lp->new_tx) {
+ struct lance_tx_head *head = &(MEM->tx_head[old_tx]);
+
+ DPRINTK(3, ("on tx_ring %d\n", old_tx));
+
+ if (head->flag & TMD1_OWN_CHIP)
+ break; /* It still hasn't been Txed */
+
+ if (head->flag & TMD1_ERR) {
+ int status = head->misc;
+ dev->stats.tx_errors++;
+ if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
+ if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
+ if (status & TMD3_LCOL) dev->stats.tx_window_errors++;
+ if (status & (TMD3_UFLO | TMD3_BUFF)) {
+ dev->stats.tx_fifo_errors++;
+ printk("%s: Tx FIFO error\n",
+ dev->name);
+ REGA(CSR0) = CSR0_STOP;
+ REGA(CSR3) = CSR3_BSWP;
+ lance_init_ring(dev);
+ REGA(CSR0) = CSR0_STRT | CSR0_INEA;
+ return IRQ_HANDLED;
+ }
+ } else if(head->flag & (TMD1_ENP | TMD1_STP)) {
+
+ head->flag &= ~(TMD1_ENP | TMD1_STP);
+ if(head->flag & (TMD1_ONE | TMD1_MORE))
+ dev->stats.collisions++;
+
+ dev->stats.tx_packets++;
+ DPRINTK(3, ("cleared tx ring %d\n", old_tx));
+ }
+ old_tx = (old_tx +1) & TX_RING_MOD_MASK;
+ }
+
+ lp->old_tx = old_tx;
+ }
+
+
+ if (netif_queue_stopped(dev)) {
+ /* The ring is no longer full, clear tbusy. */
+ netif_start_queue(dev);
+ netif_wake_queue(dev);
+ }
+
+ if (csr0 & CSR0_RINT) /* Rx interrupt */
+ lance_rx( dev );
+
+ /* Log misc errors. */
+ if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & CSR0_MERR) {
+ DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
+ "status %04x.\n", dev->name, csr0 ));
+ /* Restart the chip. */
+ REGA(CSR0) = CSR0_STOP;
+ REGA(CSR3) = CSR3_BSWP;
+ lance_init_ring(dev);
+ REGA(CSR0) = CSR0_STRT | CSR0_INEA;
+ }
+
+
+ /* Clear any other interrupt, and set interrupt enable. */
+// DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
+// CSR0_IDON | CSR0_INEA;
+
+ REGA(CSR0) = CSR0_INEA;
+
+ if(DREG & (CSR0_RINT | CSR0_TINT)) {
+ DPRINTK(2, ("restarting interrupt, csr0=%#04x\n", DREG));
+ goto still_more;
+ }
+
+ DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
+ dev->name, DREG ));
+ return IRQ_HANDLED;
+}
+
+/* get packet, toss into skbuff */
+static int lance_rx( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry = lp->new_rx;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
+ struct lance_rx_head *head = &(MEM->rx_head[entry]);
+ int status = head->flag;
+
+ if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with
+ full-sized buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & RMD1_ENP) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
+ if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
+ if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
+ if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
+ head->flag &= (RMD1_ENP|RMD1_STP);
+ } else {
+ /* Malloc up new buffer, compatible with net-3. */
+// short pkt_len = head->msg_length;// & 0xfff;
+ short pkt_len = (head->msg_length & 0xfff) - 4;
+ struct sk_buff *skb;
+
+ if (pkt_len < 60) {
+ printk( "%s: Runt packet!\n", dev->name );
+ dev->stats.rx_errors++;
+ }
+ else {
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ head->msg_length = 0;
+ head->flag |= RMD1_OWN_CHIP;
+ lp->new_rx = (lp->new_rx+1) &
+ RX_RING_MOD_MASK;
+ }
+
+#if 0
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head);
+ printk("%s: RX pkt %d type 0x%04x"
+ " from %pM to %pM",
+ dev->name, lp->new_tx, ((u_short *)data)[6],
+ &data[6], data);
+
+ printk(" data %02x %02x %02x %02x %02x %02x %02x %02x "
+ "len %d at %08x\n",
+ data[15], data[16], data[17], data[18],
+ data[19], data[20], data[21], data[22],
+ pkt_len, data);
+ }
+#endif
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head);
+ printk( "%s: RX pkt %d type 0x%04x len %d\n ", dev->name, entry, ((u_short *)data)[6], pkt_len);
+ }
+
+
+ skb_reserve( skb, 2 ); /* 16 byte align */
+ skb_put( skb, pkt_len ); /* Make room */
+ skb_copy_to_linear_data(skb,
+ PKTBUF_ADDR(head),
+ pkt_len);
+
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ }
+
+// head->buf_length = -PKT_BUF_SZ | 0xf000;
+ head->msg_length = 0;
+ head->flag = RMD1_OWN_CHIP;
+
+ entry = lp->new_rx = (lp->new_rx +1) & RX_RING_MOD_MASK;
+ }
+
+ /* From lance.c (Donald Becker): */
+ /* We should check that at least two ring entries are free.
+ If not, we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+
+static int lance_close( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ AREG = CSR0;
+
+ DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, DREG ));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ DREG = CSR0_STOP;
+ return 0;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+
+/* completely untested on a sun3 */
+static void set_multicast_list( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ if(netif_queue_stopped(dev))
+ /* Only possible if board is already started */
+ return;
+
+ /* We take the simple way out and always enable promiscuous mode. */
+ DREG = CSR0_STOP; /* Temporarily stop the lance. */
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name ));
+ REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = netdev_mc_count(dev);
+ int i;
+ /* We don't use the multicast table, but rely on upper-layer
+ * filtering. */
+ memset( multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table) );
+ for( i = 0; i < 4; i++ )
+ REGA( CSR8+i ) = multicast_table[i];
+ REGA( CSR15 ) = 0; /* Unset promiscuous mode */
+ }
+
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP;
+
+ /* Resume normal operation and reset AREG to CSR0 */
+ REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
+}
+
+
+#ifdef MODULE
+
+static struct net_device *sun3lance_dev;
+
+int __init init_module(void)
+{
+ sun3lance_dev = sun3lance_probe(-1);
+ return PTR_ERR_OR_ZERO(sun3lance_dev);
+}
+
+void __exit cleanup_module(void)
+{
+ unregister_netdev(sun3lance_dev);
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)sun3lance_dev->base_addr);
+#endif
+ free_netdev(sun3lance_dev);
+}
+
+#endif /* MODULE */
+
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
new file mode 100644
index 000000000..ddece276a
--- /dev/null
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -0,0 +1,1524 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $
+ * lance.c: Linux/Sparc/Lance driver
+ *
+ * Written 1995, 1996 by Miguel de Icaza
+ * Sources:
+ * The Linux depca driver
+ * The Linux lance driver.
+ * The Linux skeleton driver.
+ * The NetBSD Sparc/Lance driver.
+ * Theo de Raadt (deraadt@openbsd.org)
+ * NCR92C990 Lan Controller manual
+ *
+ * 1.4:
+ * Added support to run with a ledma on the Sun4m
+ *
+ * 1.5:
+ * Added multiple card detection.
+ *
+ * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller
+ * (davem@caip.rutgers.edu)
+ *
+ * 5/29/96: override option 'tpe-link-test?', if it is 'false', as
+ * this disables auto carrier detection on sun4m. Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 1.7:
+ * 6/26/96: Bug fix for multiple ledmas, miguel.
+ *
+ * 1.8:
+ * Stole multicast code from depca.c, fixed lance_tx.
+ *
+ * 1.9:
+ * 8/21/96: Fixed the multicast code (Pedro Roque)
+ *
+ * 8/28/96: Send fake packet in lance_open() if auto_select is true,
+ * so we can detect the carrier loss condition in time.
+ * Eddie C. Dost (ecd@skynet.be)
+ *
+ * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an
+ * MNA trap during chksum_partial_copy(). (ecd@skynet.be)
+ *
+ * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be)
+ *
+ * 12/22/96: Don't loop forever in lance_rx() on incomplete packets.
+ * This was the sun4c killer. Shit, stupid bug.
+ * (ecd@skynet.be)
+ *
+ * 1.10:
+ * 1/26/97: Modularize driver. (ecd@skynet.be)
+ *
+ * 1.11:
+ * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz)
+ *
+ * 1.12:
+ * 11/3/99: Fixed SMP race in lance_start_xmit found by davem.
+ * Anton Blanchard (anton@progsoc.uts.edu.au)
+ * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces.
+ * David S. Miller (davem@redhat.com)
+ * 2.01:
+ * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com)
+ *
+ */
+
+#undef DEBUG_DRIVER
+
+static char lancestr[] = "LANCE";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/errno.h>
+#include <linux/socket.h> /* Used for the temporal inet entries and routing */
+#include <linux/route.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gfp.h>
+#include <linux/pgtable.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h> /* Used by the checksum routines */
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#include <asm/auxio.h> /* For tpe-link-test? setting */
+#include <asm/irq.h>
+
+#define DRV_NAME "sunlance"
+#define DRV_RELDATE "8/24/03"
+#define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun Lance ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define LE_CSR0 0
+#define LE_CSR1 1
+#define LE_CSR2 2
+#define LE_CSR3 3
+
+#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
+
+#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
+#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
+#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
+#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
+#define LE_C0_MERR 0x0800 /* ME: Memory error */
+#define LE_C0_RINT 0x0400 /* Received interrupt */
+#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
+#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
+#define LE_C0_INTR 0x0080 /* Interrupt or error */
+#define LE_C0_INEA 0x0040 /* Interrupt enable */
+#define LE_C0_RXON 0x0020 /* Receiver on */
+#define LE_C0_TXON 0x0010 /* Transmitter on */
+#define LE_C0_TDMD 0x0008 /* Transmitter demand */
+#define LE_C0_STOP 0x0004 /* Stop the card */
+#define LE_C0_STRT 0x0002 /* Start the card */
+#define LE_C0_INIT 0x0001 /* Init the card */
+
+#define LE_C3_BSWP 0x4 /* SWAP */
+#define LE_C3_ACON 0x2 /* ALE Control */
+#define LE_C3_BCON 0x1 /* Byte control */
+
+/* Receive message descriptor 1 */
+#define LE_R1_OWN 0x80 /* Who owns the entry */
+#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */
+#define LE_R1_FRA 0x20 /* FRA: Frame error */
+#define LE_R1_OFL 0x10 /* OFL: Frame overflow */
+#define LE_R1_CRC 0x08 /* CRC error */
+#define LE_R1_BUF 0x04 /* BUF: Buffer error */
+#define LE_R1_SOP 0x02 /* Start of packet */
+#define LE_R1_EOP 0x01 /* End of packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+#define LE_T1_OWN 0x80 /* Lance owns the packet */
+#define LE_T1_ERR 0x40 /* Error summary */
+#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */
+#define LE_T1_EONE 0x08 /* Error: one retry needed */
+#define LE_T1_EDEF 0x04 /* Error: deferred */
+#define LE_T1_SOP 0x02 /* Start of packet */
+#define LE_T1_EOP 0x01 /* End of packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+#define LE_T3_BUF 0x8000 /* Buffer error */
+#define LE_T3_UFL 0x4000 /* Error underflow */
+#define LE_T3_LCOL 0x1000 /* Error late collision */
+#define LE_T3_CLOS 0x0800 /* Error carrier loss */
+#define LE_T3_RTY 0x0400 /* Error retry */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK)
+
+#define PKT_BUF_SZ 1544
+#define RX_BUFF_SIZE PKT_BUF_SZ
+#define TX_BUFF_SIZE PKT_BUF_SZ
+
+struct lance_rx_desc {
+ u16 rmd0; /* low address of packet */
+ u8 rmd1_bits; /* descriptor bits */
+ u8 rmd1_hadr; /* high address of packet */
+ s16 length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ u16 mblength; /* This is the actual number of bytes received */
+};
+
+struct lance_tx_desc {
+ u16 tmd0; /* low address of packet */
+ u8 tmd1_bits; /* descriptor bits */
+ u8 tmd1_hadr; /* high address of packet */
+ s16 length; /* Length is 2s complement (negative)! */
+ u16 misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+/* On the Sparc, this block should be on a DMA region */
+struct lance_init_block {
+ u16 mode; /* Pre-set mode (reg. 15) */
+ u8 phys_addr[6]; /* Physical ethernet address */
+ u32 filter[2]; /* Multicast filter. */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ u16 rx_ptr; /* receive descriptor addr */
+ u16 rx_len; /* receive len and high addr */
+ u16 tx_ptr; /* transmit descriptor addr */
+ u16 tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+
+ u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
+ u8 pad[2]; /* align rx_buf for copy_and_sum(). */
+ u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
+};
+
+#define libdesc_offset(rt, elem) \
+((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
+
+#define libbuff_offset(rt, elem) \
+((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0])))))
+
+struct lance_private {
+ void __iomem *lregs; /* Lance RAP/RDP regs. */
+ void __iomem *dregs; /* DMA controller regs. */
+ struct lance_init_block __iomem *init_block_iomem;
+ struct lance_init_block *init_block_mem;
+
+ spinlock_t lock;
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ struct platform_device *ledma; /* If set this points to ledma */
+ char tpe; /* cable-selection is TPE */
+ char auto_select; /* cable-selection by carrier */
+ char burst_sizes; /* ledma SBus burst sizes */
+ char pio_buffer; /* init block in PIO space? */
+
+ unsigned short busmaster_regval;
+
+ void (*init_ring)(struct net_device *);
+ void (*rx)(struct net_device *);
+ void (*tx)(struct net_device *);
+
+ char *name;
+ dma_addr_t init_block_dvma;
+ struct net_device *dev; /* Backpointer */
+ struct platform_device *op;
+ struct platform_device *lebuffer;
+ struct timer_list multicast_timer;
+};
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+/* Lance registers. */
+#define RDP 0x00UL /* register data port */
+#define RAP 0x02UL /* register address port */
+#define LANCE_REG_SIZE 0x04UL
+
+#define STOP_LANCE(__lp) \
+do { void __iomem *__base = (__lp)->lregs; \
+ sbus_writew(LE_CSR0, __base + RAP); \
+ sbus_writew(LE_C0_STOP, __base + RDP); \
+} while (0)
+
+int sparc_lance_debug = 2;
+
+/* The Lance uses 24 bit addresses */
+/* On the Sun4c the DVMA will provide the remaining bytes for us */
+/* On the Sun4m we have to instruct the ledma to provide them */
+/* Even worse, on scsi/ether SBUS cards, the init block and the
+ * transmit/receive buffers are addresses as offsets from absolute
+ * zero on the lebuffer PIO area. -DaveM
+ */
+
+#define LANCE_ADDR(x) ((long)(x) & ~0xff000000)
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ u32 leptr;
+
+ if (lp->pio_buffer)
+ leptr = 0;
+ else
+ leptr = LANCE_ADDR(lp->init_block_dvma);
+
+ sbus_writew(LE_CSR1, lp->lregs + RAP);
+ sbus_writew(leptr & 0xffff, lp->lregs + RDP);
+ sbus_writew(LE_CSR2, lp->lregs + RAP);
+ sbus_writew(leptr >> 16, lp->lregs + RDP);
+ sbus_writew(LE_CSR3, lp->lregs + RAP);
+ sbus_writew(lp->busmaster_regval, lp->lregs + RDP);
+
+ /* Point back to csr0 */
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ dma_addr_t aib = lp->init_block_dvma;
+ __u32 leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ ib->phys_addr [0] = dev->dev_addr [1];
+ ib->phys_addr [1] = dev->dev_addr [0];
+ ib->phys_addr [2] = dev->dev_addr [3];
+ ib->phys_addr [3] = dev->dev_addr [2];
+ ib->phys_addr [4] = dev->dev_addr [5];
+ ib->phys_addr [5] = dev->dev_addr [4];
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i));
+ ib->btx_ring [i].tmd0 = leptr;
+ ib->btx_ring [i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring [i].tmd1_bits = 0;
+ ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring [i].misc = 0;
+ }
+
+ /* Setup the Rx ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i));
+
+ ib->brx_ring [i].rmd0 = leptr;
+ ib->brx_ring [i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring [i].mblength = 0;
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0));
+ ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0));
+ ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+}
+
+static void lance_init_ring_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u32 leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]);
+ sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]);
+ sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]);
+ sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]);
+ sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]);
+ sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]);
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ leptr = libbuff_offset(tx_buf, i);
+ sbus_writew(leptr, &ib->btx_ring [i].tmd0);
+ sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr);
+ sbus_writeb(0, &ib->btx_ring [i].tmd1_bits);
+
+ /* The ones required by tmd2 */
+ sbus_writew(0xf000, &ib->btx_ring [i].length);
+ sbus_writew(0, &ib->btx_ring [i].misc);
+ }
+
+ /* Setup the Rx ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = libbuff_offset(rx_buf, i);
+
+ sbus_writew(leptr, &ib->brx_ring [i].rmd0);
+ sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr);
+ sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits);
+ sbus_writew(-RX_BUFF_SIZE|0xf000,
+ &ib->brx_ring [i].length);
+ sbus_writew(0, &ib->brx_ring [i].mblength);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = libdesc_offset(brx_ring, 0);
+ sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16),
+ &ib->rx_len);
+ sbus_writew(leptr, &ib->rx_ptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = libdesc_offset(btx_ring, 0);
+ sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16),
+ &ib->tx_len);
+ sbus_writew(leptr, &ib->tx_ptr);
+}
+
+static void init_restart_ledma(struct lance_private *lp)
+{
+ u32 csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ if (!(csr & DMA_HNDL_ERROR)) {
+ /* E-Cache draining */
+ while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN)
+ barrier();
+ }
+
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ csr &= ~DMA_E_BURSTS;
+ if (lp->burst_sizes & DMA_BURST32)
+ csr |= DMA_E_BURST32;
+ else
+ csr |= DMA_E_BURST16;
+
+ csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV);
+
+ if (lp->tpe)
+ csr |= DMA_EN_ENETAUI;
+ else
+ csr &= ~DMA_EN_ENETAUI;
+ udelay(20);
+ sbus_writel(csr, lp->dregs + DMA_CSR);
+ udelay(200);
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ u16 regval = 0;
+ int i;
+
+ if (lp->dregs)
+ init_restart_ledma(lp);
+
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+ sbus_writew(LE_C0_INIT, lp->lregs + RDP);
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; i < 100; i++) {
+ regval = sbus_readw(lp->lregs + RDP);
+
+ if (regval & (LE_C0_ERR | LE_C0_IDON))
+ break;
+ barrier();
+ }
+ if (i == 100 || (regval & LE_C0_ERR)) {
+ printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, regval);
+ if (lp->dregs)
+ printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR));
+ return -1;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ sbus_writew(LE_C0_IDON, lp->lregs + RDP);
+ sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP);
+
+ if (lp->dregs) {
+ u32 csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ csr |= DMA_INT_ENAB;
+ sbus_writel(csr, lp->dregs + DMA_CSR);
+ }
+
+ return 0;
+}
+
+static void lance_rx_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ struct lance_rx_desc *rd;
+ u8 bits;
+ int len, entry = lp->rx_new;
+ struct sk_buff *skb;
+
+ for (rd = &ib->brx_ring [entry];
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring [entry]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) dev->stats.rx_errors++;
+ } else {
+ len = (rd->mblength & 0xfff) - 4;
+ skb = netdev_alloc_skb(dev, len + 2);
+
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = RX_NEXT(entry);
+ return;
+ }
+
+ dev->stats.rx_bytes += len;
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&(ib->rx_buf [entry][0]),
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ entry = RX_NEXT(entry);
+ }
+
+ lp->rx_new = entry;
+}
+
+static void lance_tx_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ int i, j;
+
+ spin_lock(&lp->lock);
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ struct lance_tx_desc *td = &ib->btx_ring [i];
+ u8 bits = td->tmd1_bits;
+
+ /* If we hit a packet not owned by us, stop */
+ if (bits & LE_T1_OWN)
+ break;
+
+ if (bits & LE_T1_ERR) {
+ u16 status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
+ dev->name, lp->tpe?"TPE":"AUI");
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ }
+
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits = bits & ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = TX_NEXT(j);
+ }
+ lp->tx_old = j;
+out:
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+
+ spin_unlock(&lp->lock);
+}
+
+static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len)
+{
+ u16 *p16 = (u16 *) skb->data;
+ u32 *p32;
+ u8 *p8;
+ void __iomem *pbuf = piobuf;
+
+ /* We know here that both src and dest are on a 16bit boundary. */
+ *p16++ = sbus_readw(pbuf);
+ p32 = (u32 *) p16;
+ pbuf += 2;
+ len -= 2;
+
+ while (len >= 4) {
+ *p32++ = sbus_readl(pbuf);
+ pbuf += 4;
+ len -= 4;
+ }
+ p8 = (u8 *) p32;
+ if (len >= 2) {
+ p16 = (u16 *) p32;
+ *p16++ = sbus_readw(pbuf);
+ pbuf += 2;
+ len -= 2;
+ p8 = (u8 *) p16;
+ }
+ if (len >= 1)
+ *p8 = sbus_readb(pbuf);
+}
+
+static void lance_rx_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ struct lance_rx_desc __iomem *rd;
+ unsigned char bits;
+ int len, entry;
+ struct sk_buff *skb;
+
+ entry = lp->rx_new;
+ for (rd = &ib->brx_ring [entry];
+ !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN);
+ rd = &ib->brx_ring [entry]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) dev->stats.rx_errors++;
+ } else {
+ len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
+ skb = netdev_alloc_skb(dev, len + 2);
+
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ sbus_writew(0, &rd->mblength);
+ sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
+ lp->rx_new = RX_NEXT(entry);
+ return;
+ }
+
+ dev->stats.rx_bytes += len;
+
+ skb_reserve (skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ sbus_writew(0, &rd->mblength);
+ sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
+ entry = RX_NEXT(entry);
+ }
+
+ lp->rx_new = entry;
+}
+
+static void lance_tx_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ int i, j;
+
+ spin_lock(&lp->lock);
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ struct lance_tx_desc __iomem *td = &ib->btx_ring [i];
+ u8 bits = sbus_readb(&td->tmd1_bits);
+
+ /* If we hit a packet not owned by us, stop */
+ if (bits & LE_T1_OWN)
+ break;
+
+ if (bits & LE_T1_ERR) {
+ u16 status = sbus_readw(&td->misc);
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
+ dev->name, lp->tpe?"TPE":"AUI");
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ }
+
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits);
+
+ /* One collision before packet was sent. */
+ if (bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = TX_NEXT(j);
+ }
+ lp->tx_old = j;
+
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+out:
+ spin_unlock(&lp->lock);
+}
+
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+ csr0 = sbus_readw(lp->lregs + RDP);
+
+ /* Acknowledge all the interrupt sources ASAP */
+ sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT),
+ lp->lregs + RDP);
+
+ if ((csr0 & LE_C0_ERR) != 0) {
+ /* Clear the error condition */
+ sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
+ LE_C0_CERR | LE_C0_MERR),
+ lp->lregs + RDP);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lp->rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lp->tx(dev);
+
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++;
+
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++;
+
+ if (csr0 & LE_C0_MERR) {
+ if (lp->dregs) {
+ u32 addr = sbus_readl(lp->dregs + DMA_ADDR);
+
+ printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n",
+ dev->name, csr0, addr & 0xffffff);
+ } else {
+ printk(KERN_ERR "%s: Memory error, status %04x\n",
+ dev->name, csr0);
+ }
+
+ sbus_writew(LE_C0_STOP, lp->lregs + RDP);
+
+ if (lp->dregs) {
+ u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ dma_csr |= DMA_FIFO_INV;
+ sbus_writel(dma_csr, lp->dregs + DMA_CSR);
+ }
+
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+ }
+
+ sbus_writew(LE_C0_INEA, lp->lregs + RDP);
+
+ return IRQ_HANDLED;
+}
+
+/* Build a fake network packet and send it to ourselves. */
+static void build_fake_packet(struct lance_private *lp)
+{
+ struct net_device *dev = lp->dev;
+ int i, entry;
+
+ entry = lp->tx_new & TX_RING_MOD_MASK;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]);
+ struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet;
+ for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++)
+ sbus_writew(0, &packet[i]);
+ for (i = 0; i < 6; i++) {
+ sbus_writeb(dev->dev_addr[i], &eth->h_dest[i]);
+ sbus_writeb(dev->dev_addr[i], &eth->h_source[i]);
+ }
+ sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length);
+ sbus_writew(0, &ib->btx_ring[entry].misc);
+ sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ u16 *packet = (u16 *) &(ib->tx_buf[entry][0]);
+ struct ethhdr *eth = (struct ethhdr *) packet;
+ memset(packet, 0, ETH_ZLEN);
+ for (i = 0; i < 6; i++) {
+ eth->h_dest[i] = dev->dev_addr[i];
+ eth->h_source[i] = dev->dev_addr[i];
+ }
+ ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ }
+ lp->tx_new = TX_NEXT(entry);
+}
+
+static int lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status = 0;
+
+ STOP_LANCE(lp);
+
+ if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED,
+ lancestr, (void *) dev)) {
+ printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq);
+ return -EAGAIN;
+ }
+
+ /* On the 4m, setup the ledma to provide the upper bits for buffers */
+ if (lp->dregs) {
+ u32 regval = lp->init_block_dvma & 0xff000000;
+
+ sbus_writel(regval, lp->dregs + DMA_TEST);
+ }
+
+ /* Set mode and clear multicast filter only at device open,
+ * so that lance_init_ring() called at any error will not
+ * forget multicast filters.
+ *
+ * BTW it is common bug in all lance drivers! --ANK
+ */
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writew(0, &ib->mode);
+ sbus_writel(0, &ib->filter[0]);
+ sbus_writel(0, &ib->filter[1]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->mode = 0;
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
+ }
+
+ lp->init_ring(dev);
+ load_csrs(lp);
+
+ netif_start_queue(dev);
+
+ status = init_restart_lance(lp);
+ if (!status && lp->auto_select) {
+ build_fake_packet(lp);
+ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
+ }
+
+ return status;
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ STOP_LANCE(lp);
+
+ free_irq(dev->irq, (void *) dev);
+ return 0;
+}
+
+static int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
+
+ STOP_LANCE(lp);
+
+ /* On the 4m, reset the dma too */
+ if (lp->dregs) {
+ u32 csr, addr;
+
+ printk(KERN_ERR "resetting ledma\n");
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
+ udelay(200);
+ sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
+
+ addr = lp->init_block_dvma & 0xff000000;
+ sbus_writel(addr, lp->dregs + DMA_TEST);
+ }
+ lp->init_ring(dev);
+ load_csrs(lp);
+ netif_trans_update(dev); /* prevent tx timeout */
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len)
+{
+ void __iomem *piobuf = dest;
+ u32 *p32;
+ u16 *p16;
+ u8 *p8;
+
+ switch ((unsigned long)src & 0x3) {
+ case 0:
+ p32 = (u32 *) src;
+ while (len >= 4) {
+ sbus_writel(*p32, piobuf);
+ p32++;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p32;
+ break;
+ case 1:
+ case 3:
+ p8 = (u8 *) src;
+ while (len >= 4) {
+ u32 val;
+
+ val = p8[0] << 24;
+ val |= p8[1] << 16;
+ val |= p8[2] << 8;
+ val |= p8[3];
+ sbus_writel(val, piobuf);
+ p8 += 4;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p8;
+ break;
+ case 2:
+ p16 = (u16 *) src;
+ while (len >= 4) {
+ u32 val = p16[0]<<16 | p16[1];
+ sbus_writel(val, piobuf);
+ p16 += 2;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p16;
+ break;
+ }
+ if (len >= 2) {
+ u16 val = src[0] << 8 | src[1];
+ sbus_writew(val, piobuf);
+ src += 2;
+ piobuf += 2;
+ len -= 2;
+ }
+ if (len >= 1)
+ sbus_writeb(src[0], piobuf);
+}
+
+static void lance_piozero(void __iomem *dest, int len)
+{
+ void __iomem *piobuf = dest;
+
+ if ((unsigned long)piobuf & 1) {
+ sbus_writeb(0, piobuf);
+ piobuf += 1;
+ len -= 1;
+ if (len == 0)
+ return;
+ }
+ if (len == 1) {
+ sbus_writeb(0, piobuf);
+ return;
+ }
+ if ((unsigned long)piobuf & 2) {
+ sbus_writew(0, piobuf);
+ piobuf += 2;
+ len -= 2;
+ if (len == 0)
+ return;
+ }
+ while (len >= 4) {
+ sbus_writel(0, piobuf);
+ piobuf += 4;
+ len -= 4;
+ }
+ if (len >= 2) {
+ sbus_writew(0, piobuf);
+ piobuf += 2;
+ len -= 2;
+ }
+ if (len >= 1)
+ sbus_writeb(0, piobuf);
+}
+
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
+ dev->name, sbus_readw(lp->lregs + RDP));
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry, skblen, len;
+
+ skblen = skb->len;
+
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+
+ spin_lock_irq(&lp->lock);
+
+ dev->stats.tx_bytes += len;
+
+ entry = lp->tx_new & TX_RING_MOD_MASK;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length);
+ sbus_writew(0, &ib->btx_ring[entry].misc);
+ lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen);
+ if (len != skblen)
+ lance_piozero(&ib->tx_buf[entry][skblen], len - skblen);
+ sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->btx_ring [entry].length = (-len) | 0xf000;
+ ib->btx_ring [entry].misc = 0;
+ skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
+ if (len != skblen)
+ memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
+ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
+ }
+
+ lp->tx_new = TX_NEXT(entry);
+
+ if (TX_BUFFS_AVAIL <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
+
+ /* Read back CSR to invalidate the E-Cache.
+ * This is needed, because DMA_DSBL_WR_INV is set.
+ */
+ if (lp->dregs)
+ sbus_readw(lp->lregs + RDP);
+
+ spin_unlock_irq(&lp->lock);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* taken from the depca driver */
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u32 crc;
+ u32 val;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI)
+ val = ~0;
+ else
+ val = 0;
+
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writel(val, &ib->filter[0]);
+ sbus_writel(val, &ib->filter[1]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->filter [0] = val;
+ ib->filter [1] = val;
+ }
+
+ if (dev->flags & IFF_ALLMULTI)
+ return;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter;
+ u16 tmp = sbus_readw(&mcast_table[crc>>4]);
+ tmp |= 1 << (crc & 0xf);
+ sbus_writew(tmp, &mcast_table[crc>>4]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ u16 *mcast_table = (u16 *) &ib->filter;
+ mcast_table [crc >> 4] |= 1 << (crc & 0xf);
+ }
+ }
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib_mem = lp->init_block_mem;
+ struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem;
+ u16 mode;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+
+ if (lp->pio_buffer)
+ mode = sbus_readw(&ib_iomem->mode);
+ else
+ mode = ib_mem->mode;
+ if (dev->flags & IFF_PROMISC) {
+ mode |= LE_MO_PROM;
+ if (lp->pio_buffer)
+ sbus_writew(mode, &ib_iomem->mode);
+ else
+ ib_mem->mode = mode;
+ } else {
+ mode &= ~LE_MO_PROM;
+ if (lp->pio_buffer)
+ sbus_writew(mode, &ib_iomem->mode);
+ else
+ ib_mem->mode = mode;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static void lance_set_multicast_retry(struct timer_list *t)
+{
+ struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct net_device *dev = lp->dev;
+
+ lance_set_multicast(dev);
+}
+
+static void lance_free_hwresources(struct lance_private *lp)
+{
+ if (lp->lregs)
+ of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE);
+ if (lp->dregs) {
+ struct platform_device *ledma = lp->ledma;
+
+ of_iounmap(&ledma->resource[0], lp->dregs,
+ resource_size(&ledma->resource[0]));
+ }
+ if (lp->init_block_iomem) {
+ of_iounmap(&lp->lebuffer->resource[0], lp->init_block_iomem,
+ sizeof(struct lance_init_block));
+ } else if (lp->init_block_mem) {
+ dma_free_coherent(&lp->op->dev,
+ sizeof(struct lance_init_block),
+ lp->init_block_mem,
+ lp->init_block_dvma);
+ }
+}
+
+/* Ethtool support... */
+static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "sunlance", sizeof(info->driver));
+}
+
+static const struct ethtool_ops sparc_lance_ethtool_ops = {
+ .get_drvinfo = sparc_lance_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops sparc_lance_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int sparc_lance_probe_one(struct platform_device *op,
+ struct platform_device *ledma,
+ struct platform_device *lebuffer)
+{
+ struct device_node *dp = op->dev.of_node;
+ struct lance_private *lp;
+ struct net_device *dev;
+ int i;
+
+ dev = alloc_etherdev(sizeof(struct lance_private) + 8);
+ if (!dev)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+
+ spin_lock_init(&lp->lock);
+
+ /* Copy the IDPROM ethernet address to the device structure, later we
+ * will copy the address in the device structure to the lance
+ * initialization block.
+ */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ /* Get the IO region */
+ lp->lregs = of_ioremap(&op->resource[0], 0,
+ LANCE_REG_SIZE, lancestr);
+ if (!lp->lregs) {
+ printk(KERN_ERR "SunLance: Cannot map registers.\n");
+ goto fail;
+ }
+
+ lp->ledma = ledma;
+ if (lp->ledma) {
+ lp->dregs = of_ioremap(&ledma->resource[0], 0,
+ resource_size(&ledma->resource[0]),
+ "ledma");
+ if (!lp->dregs) {
+ printk(KERN_ERR "SunLance: Cannot map "
+ "ledma registers.\n");
+ goto fail;
+ }
+ }
+
+ lp->op = op;
+ lp->lebuffer = lebuffer;
+ if (lebuffer) {
+ /* sanity check */
+ if (lebuffer->resource[0].start & 7) {
+ printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n");
+ goto fail;
+ }
+ lp->init_block_iomem =
+ of_ioremap(&lebuffer->resource[0], 0,
+ sizeof(struct lance_init_block), "lebuffer");
+ if (!lp->init_block_iomem) {
+ printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n");
+ goto fail;
+ }
+ lp->init_block_dvma = 0;
+ lp->pio_buffer = 1;
+ lp->init_ring = lance_init_ring_pio;
+ lp->rx = lance_rx_pio;
+ lp->tx = lance_tx_pio;
+ } else {
+ lp->init_block_mem =
+ dma_alloc_coherent(&op->dev,
+ sizeof(struct lance_init_block),
+ &lp->init_block_dvma, GFP_ATOMIC);
+ if (!lp->init_block_mem)
+ goto fail;
+
+ lp->pio_buffer = 0;
+ lp->init_ring = lance_init_ring_dvma;
+ lp->rx = lance_rx_dvma;
+ lp->tx = lance_tx_dvma;
+ }
+ lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval",
+ (LE_C3_BSWP |
+ LE_C3_ACON |
+ LE_C3_BCON));
+
+ lp->name = lancestr;
+
+ lp->burst_sizes = 0;
+ if (lp->ledma) {
+ struct device_node *ledma_dp = ledma->dev.of_node;
+ struct device_node *sbus_dp;
+ unsigned int sbmask;
+ const char *prop;
+ u32 csr;
+
+ /* Find burst-size property for ledma */
+ lp->burst_sizes = of_getintprop_default(ledma_dp,
+ "burst-sizes", 0);
+
+ /* ledma may be capable of fast bursts, but sbus may not. */
+ sbus_dp = ledma_dp->parent;
+ sbmask = of_getintprop_default(sbus_dp, "burst-sizes",
+ DMA_BURSTBITS);
+ lp->burst_sizes &= sbmask;
+
+ /* Get the cable-selection property */
+ prop = of_get_property(ledma_dp, "cable-selection", NULL);
+ if (!prop || prop[0] == '\0') {
+ struct device_node *nd;
+
+ printk(KERN_INFO "SunLance: using "
+ "auto-carrier-detection.\n");
+
+ nd = of_find_node_by_path("/options");
+ if (!nd)
+ goto no_link_test;
+
+ prop = of_get_property(nd, "tpe-link-test?", NULL);
+ if (!prop)
+ goto node_put;
+
+ if (strcmp(prop, "true")) {
+ printk(KERN_NOTICE "SunLance: warning: overriding option "
+ "'tpe-link-test?'\n");
+ printk(KERN_NOTICE "SunLance: warning: mail any problems "
+ "to ecd@skynet.be\n");
+ auxio_set_lte(AUXIO_LTE_ON);
+ }
+node_put:
+ of_node_put(nd);
+no_link_test:
+ lp->auto_select = 1;
+ lp->tpe = 0;
+ } else if (!strcmp(prop, "aui")) {
+ lp->auto_select = 0;
+ lp->tpe = 0;
+ } else {
+ lp->auto_select = 0;
+ lp->tpe = 1;
+ }
+
+ /* Reset ledma */
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
+ udelay(200);
+ sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
+ } else
+ lp->dregs = NULL;
+
+ lp->dev = dev;
+ SET_NETDEV_DEV(dev, &op->dev);
+ dev->watchdog_timeo = 5*HZ;
+ dev->ethtool_ops = &sparc_lance_ethtool_ops;
+ dev->netdev_ops = &sparc_lance_ops;
+
+ dev->irq = op->archdata.irqs[0];
+
+ /* We cannot sleep if the chip is busy during a
+ * multicast list update event, because such events
+ * can occur from interrupts (ex. IPv6). So we
+ * use a timer to try again later when necessary. -DaveM
+ */
+ timer_setup(&lp->multicast_timer, lance_set_multicast_retry, 0);
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "SunLance: Cannot register device.\n");
+ goto fail;
+ }
+
+ platform_set_drvdata(op, lp);
+
+ printk(KERN_INFO "%s: LANCE %pM\n",
+ dev->name, dev->dev_addr);
+
+ return 0;
+
+fail:
+ lance_free_hwresources(lp);
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+static int sunlance_sbus_probe(struct platform_device *op)
+{
+ struct platform_device *parent = to_platform_device(op->dev.parent);
+ struct device_node *parent_dp = parent->dev.of_node;
+ int err;
+
+ if (of_node_name_eq(parent_dp, "ledma")) {
+ err = sparc_lance_probe_one(op, parent, NULL);
+ } else if (of_node_name_eq(parent_dp, "lebuffer")) {
+ err = sparc_lance_probe_one(op, NULL, parent);
+ } else
+ err = sparc_lance_probe_one(op, NULL, NULL);
+
+ return err;
+}
+
+static int sunlance_sbus_remove(struct platform_device *op)
+{
+ struct lance_private *lp = platform_get_drvdata(op);
+ struct net_device *net_dev = lp->dev;
+
+ unregister_netdev(net_dev);
+
+ lance_free_hwresources(lp);
+
+ free_netdev(net_dev);
+
+ return 0;
+}
+
+static const struct of_device_id sunlance_sbus_match[] = {
+ {
+ .name = "le",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
+
+static struct platform_driver sunlance_sbus_driver = {
+ .driver = {
+ .name = "sunlance",
+ .of_match_table = sunlance_sbus_match,
+ },
+ .probe = sunlance_sbus_probe,
+ .remove = sunlance_sbus_remove,
+};
+
+module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
new file mode 100644
index 000000000..620785ffb
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
+
+amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
+ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
+ xgbe-ptp.o \
+ xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
+ xgbe-platform.o
+
+amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o
+amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
+amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
new file mode 100644
index 000000000..533b8519e
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -0,0 +1,1723 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_COMMON_H__
+#define __XGBE_COMMON_H__
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_AXIARCR 0x3010
+#define DMA_AXIAWCR 0x3018
+#define DMA_AXIAWARCR 0x301c
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+#define DMA_TXEDMACR 0x3040
+#define DMA_RXEDMACR 0x3044
+
+/* DMA register entry bit positions and sizes */
+#define DMA_ISR_MACIS_INDEX 17
+#define DMA_ISR_MACIS_WIDTH 1
+#define DMA_ISR_MTLIS_INDEX 16
+#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_INTM_INDEX 12
+#define DMA_MR_INTM_WIDTH 2
+#define DMA_MR_SWR_INDEX 0
+#define DMA_MR_SWR_WIDTH 1
+#define DMA_RXEDMACR_RDPS_INDEX 0
+#define DMA_RXEDMACR_RDPS_WIDTH 3
+#define DMA_SBMR_AAL_INDEX 12
+#define DMA_SBMR_AAL_WIDTH 1
+#define DMA_SBMR_EAME_INDEX 11
+#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_BLEN_INDEX 1
+#define DMA_SBMR_BLEN_WIDTH 7
+#define DMA_SBMR_RD_OSR_LMT_INDEX 16
+#define DMA_SBMR_RD_OSR_LMT_WIDTH 6
+#define DMA_SBMR_UNDEF_INDEX 0
+#define DMA_SBMR_UNDEF_WIDTH 1
+#define DMA_SBMR_WR_OSR_LMT_INDEX 24
+#define DMA_SBMR_WR_OSR_LMT_WIDTH 6
+#define DMA_TXEDMACR_TDPS_INDEX 0
+#define DMA_TXEDMACR_TDPS_WIDTH 3
+
+/* DMA register values */
+#define DMA_SBMR_BLEN_256 256
+#define DMA_SBMR_BLEN_128 128
+#define DMA_SBMR_BLEN_64 64
+#define DMA_SBMR_BLEN_32 32
+#define DMA_SBMR_BLEN_16 16
+#define DMA_SBMR_BLEN_8 8
+#define DMA_SBMR_BLEN_4 4
+#define DMA_DSR_RPS_WIDTH 4
+#define DMA_DSR_TPS_WIDTH 4
+#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
+#define DMA_DSR0_RPS_START 8
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_RPS_START 0
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_CATDR_LO 0x44
+#define DMA_CH_CARDR_LO 0x4c
+#define DMA_CH_CATBR_HI 0x50
+#define DMA_CH_CATBR_LO 0x54
+#define DMA_CH_CARBR_HI 0x58
+#define DMA_CH_CARBR_LO 0x5c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_INDEX 16
+#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
+#define DMA_CH_IER_AIE20_INDEX 15
+#define DMA_CH_IER_AIE20_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 14
+#define DMA_CH_IER_AIE_WIDTH 1
+#define DMA_CH_IER_FBEE_INDEX 12
+#define DMA_CH_IER_FBEE_WIDTH 1
+#define DMA_CH_IER_NIE20_INDEX 16
+#define DMA_CH_IER_NIE20_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 15
+#define DMA_CH_IER_NIE_WIDTH 1
+#define DMA_CH_IER_RBUE_INDEX 7
+#define DMA_CH_IER_RBUE_WIDTH 1
+#define DMA_CH_IER_RIE_INDEX 6
+#define DMA_CH_IER_RIE_WIDTH 1
+#define DMA_CH_IER_RSE_INDEX 8
+#define DMA_CH_IER_RSE_WIDTH 1
+#define DMA_CH_IER_TBUE_INDEX 2
+#define DMA_CH_IER_TBUE_WIDTH 1
+#define DMA_CH_IER_TIE_INDEX 0
+#define DMA_CH_IER_TIE_WIDTH 1
+#define DMA_CH_IER_TXSE_INDEX 1
+#define DMA_CH_IER_TXSE_WIDTH 1
+#define DMA_CH_RCR_PBL_INDEX 16
+#define DMA_CH_RCR_PBL_WIDTH 6
+#define DMA_CH_RCR_RBSZ_INDEX 1
+#define DMA_CH_RCR_RBSZ_WIDTH 14
+#define DMA_CH_RCR_SR_INDEX 0
+#define DMA_CH_RCR_SR_WIDTH 1
+#define DMA_CH_RIWT_RWT_INDEX 0
+#define DMA_CH_RIWT_RWT_WIDTH 8
+#define DMA_CH_SR_FBE_INDEX 12
+#define DMA_CH_SR_FBE_WIDTH 1
+#define DMA_CH_SR_RBU_INDEX 7
+#define DMA_CH_SR_RBU_WIDTH 1
+#define DMA_CH_SR_RI_INDEX 6
+#define DMA_CH_SR_RI_WIDTH 1
+#define DMA_CH_SR_RPS_INDEX 8
+#define DMA_CH_SR_RPS_WIDTH 1
+#define DMA_CH_SR_TBU_INDEX 2
+#define DMA_CH_SR_TBU_WIDTH 1
+#define DMA_CH_SR_TI_INDEX 0
+#define DMA_CH_SR_TI_WIDTH 1
+#define DMA_CH_SR_TPS_INDEX 1
+#define DMA_CH_SR_TPS_WIDTH 1
+#define DMA_CH_TCR_OSP_INDEX 4
+#define DMA_CH_TCR_OSP_WIDTH 1
+#define DMA_CH_TCR_PBL_INDEX 16
+#define DMA_CH_TCR_PBL_WIDTH 6
+#define DMA_CH_TCR_ST_INDEX 0
+#define DMA_CH_TCR_ST_WIDTH 1
+#define DMA_CH_TCR_TSE_INDEX 12
+#define DMA_CH_TCR_TSE_WIDTH 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64 /* 8 x 8 */
+#define DMA_PBL_128 128 /* 8 x 16 */
+#define DMA_PBL_256 256 /* 8 x 32 */
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_WTR 0x000c
+#define MAC_HTR0 0x0010
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_IVLANIR 0x0064
+#define MAC_RETMR 0x006c
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_RTSR 0x00b8
+#define MAC_PMTCSR 0x00c0
+#define MAC_RWKPFR 0x00c4
+#define MAC_LPICSR 0x00d0
+#define MAC_LPITCR 0x00d4
+#define MAC_TIR 0x00e0
+#define MAC_VR 0x0110
+#define MAC_DR 0x0114
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_MDIOSCAR 0x0200
+#define MAC_MDIOSCCDR 0x0204
+#define MAC_MDIOISR 0x0214
+#define MAC_MDIOIER 0x0218
+#define MAC_MDIOCL22R 0x0220
+#define MAC_GPIOCR 0x0278
+#define MAC_GPIOSR 0x027c
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
+#define MAC_TSCR 0x0d00
+#define MAC_SSIR 0x0d04
+#define MAC_STSR 0x0d08
+#define MAC_STNR 0x0d0c
+#define MAC_STSUR 0x0d10
+#define MAC_STNUR 0x0d14
+#define MAC_TSAR 0x0d18
+#define MAC_TSSR 0x0d20
+#define MAC_TXSNR 0x0d30
+#define MAC_TXSSR 0x0d34
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
+#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
+#define MAC_HWF0R_ARPOFFSEL_INDEX 9
+#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
+#define MAC_HWF0R_EEESEL_INDEX 13
+#define MAC_HWF0R_EEESEL_WIDTH 1
+#define MAC_HWF0R_GMIISEL_INDEX 1
+#define MAC_HWF0R_GMIISEL_WIDTH 1
+#define MAC_HWF0R_MGKSEL_INDEX 7
+#define MAC_HWF0R_MGKSEL_WIDTH 1
+#define MAC_HWF0R_MMCSEL_INDEX 8
+#define MAC_HWF0R_MMCSEL_WIDTH 1
+#define MAC_HWF0R_RWKSEL_INDEX 6
+#define MAC_HWF0R_RWKSEL_WIDTH 1
+#define MAC_HWF0R_RXCOESEL_INDEX 16
+#define MAC_HWF0R_RXCOESEL_WIDTH 1
+#define MAC_HWF0R_SAVLANINS_INDEX 27
+#define MAC_HWF0R_SAVLANINS_WIDTH 1
+#define MAC_HWF0R_SMASEL_INDEX 5
+#define MAC_HWF0R_SMASEL_WIDTH 1
+#define MAC_HWF0R_TSSEL_INDEX 12
+#define MAC_HWF0R_TSSEL_WIDTH 1
+#define MAC_HWF0R_TSSTSSEL_INDEX 25
+#define MAC_HWF0R_TSSTSSEL_WIDTH 2
+#define MAC_HWF0R_TXCOESEL_INDEX 14
+#define MAC_HWF0R_TXCOESEL_WIDTH 1
+#define MAC_HWF0R_VLHASH_INDEX 4
+#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF0R_VXN_INDEX 29
+#define MAC_HWF0R_VXN_WIDTH 1
+#define MAC_HWF1R_ADDR64_INDEX 14
+#define MAC_HWF1R_ADDR64_WIDTH 2
+#define MAC_HWF1R_ADVTHWORD_INDEX 13
+#define MAC_HWF1R_ADVTHWORD_WIDTH 1
+#define MAC_HWF1R_DBGMEMA_INDEX 19
+#define MAC_HWF1R_DBGMEMA_WIDTH 1
+#define MAC_HWF1R_DCBEN_INDEX 16
+#define MAC_HWF1R_DCBEN_WIDTH 1
+#define MAC_HWF1R_HASHTBLSZ_INDEX 24
+#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
+#define MAC_HWF1R_L3L4FNUM_INDEX 27
+#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_NUMTC_INDEX 21
+#define MAC_HWF1R_NUMTC_WIDTH 3
+#define MAC_HWF1R_RSSEN_INDEX 20
+#define MAC_HWF1R_RSSEN_WIDTH 1
+#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
+#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
+#define MAC_HWF1R_SPHEN_INDEX 17
+#define MAC_HWF1R_SPHEN_WIDTH 1
+#define MAC_HWF1R_TSOEN_INDEX 18
+#define MAC_HWF1R_TSOEN_WIDTH 1
+#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
+#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
+#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
+#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
+#define MAC_HWF2R_PPSOUTNUM_INDEX 24
+#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
+#define MAC_HWF2R_RXCHCNT_INDEX 12
+#define MAC_HWF2R_RXCHCNT_WIDTH 4
+#define MAC_HWF2R_RXQCNT_INDEX 0
+#define MAC_HWF2R_RXQCNT_WIDTH 4
+#define MAC_HWF2R_TXCHCNT_INDEX 18
+#define MAC_HWF2R_TXCHCNT_WIDTH 4
+#define MAC_HWF2R_TXQCNT_INDEX 6
+#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_IER_TSIE_INDEX 12
+#define MAC_IER_TSIE_WIDTH 1
+#define MAC_ISR_MMCRXIS_INDEX 9
+#define MAC_ISR_MMCRXIS_WIDTH 1
+#define MAC_ISR_MMCTXIS_INDEX 10
+#define MAC_ISR_MMCTXIS_WIDTH 1
+#define MAC_ISR_PMTIS_INDEX 4
+#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_ISR_SMI_INDEX 1
+#define MAC_ISR_SMI_WIDTH 1
+#define MAC_ISR_TSIS_INDEX 12
+#define MAC_ISR_TSIS_WIDTH 1
+#define MAC_MACA1HR_AE_INDEX 31
+#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12
+#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1
+#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12
+#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1
+#define MAC_MDIOSCAR_DA_INDEX 21
+#define MAC_MDIOSCAR_DA_WIDTH 5
+#define MAC_MDIOSCAR_PA_INDEX 16
+#define MAC_MDIOSCAR_PA_WIDTH 5
+#define MAC_MDIOSCAR_RA_INDEX 0
+#define MAC_MDIOSCAR_RA_WIDTH 16
+#define MAC_MDIOSCCDR_BUSY_INDEX 22
+#define MAC_MDIOSCCDR_BUSY_WIDTH 1
+#define MAC_MDIOSCCDR_CMD_INDEX 16
+#define MAC_MDIOSCCDR_CMD_WIDTH 2
+#define MAC_MDIOSCCDR_CR_INDEX 19
+#define MAC_MDIOSCCDR_CR_WIDTH 3
+#define MAC_MDIOSCCDR_DATA_INDEX 0
+#define MAC_MDIOSCCDR_DATA_WIDTH 16
+#define MAC_MDIOSCCDR_SADDR_INDEX 18
+#define MAC_MDIOSCCDR_SADDR_WIDTH 1
+#define MAC_PFR_HMC_INDEX 2
+#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HPF_INDEX 10
+#define MAC_PFR_HPF_WIDTH 1
+#define MAC_PFR_HUC_INDEX 1
+#define MAC_PFR_HUC_WIDTH 1
+#define MAC_PFR_PM_INDEX 4
+#define MAC_PFR_PM_WIDTH 1
+#define MAC_PFR_PR_INDEX 0
+#define MAC_PFR_PR_WIDTH 1
+#define MAC_PFR_VTFE_INDEX 16
+#define MAC_PFR_VTFE_WIDTH 1
+#define MAC_PFR_VUCC_INDEX 22
+#define MAC_PFR_VUCC_WIDTH 1
+#define MAC_PMTCSR_MGKPKTEN_INDEX 1
+#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+#define MAC_PMTCSR_PWRDWN_INDEX 0
+#define MAC_PMTCSR_PWRDWN_WIDTH 1
+#define MAC_PMTCSR_RWKFILTRST_INDEX 31
+#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
+#define MAC_PMTCSR_RWKPKTEN_INDEX 2
+#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
+#define MAC_Q0TFCR_PT_INDEX 16
+#define MAC_Q0TFCR_PT_WIDTH 16
+#define MAC_Q0TFCR_TFE_INDEX 1
+#define MAC_Q0TFCR_TFE_WIDTH 1
+#define MAC_RCR_ACS_INDEX 1
+#define MAC_RCR_ACS_WIDTH 1
+#define MAC_RCR_CST_INDEX 2
+#define MAC_RCR_CST_WIDTH 1
+#define MAC_RCR_DCRCC_INDEX 3
+#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
+#define MAC_RCR_IPC_INDEX 9
+#define MAC_RCR_IPC_WIDTH 1
+#define MAC_RCR_JE_INDEX 8
+#define MAC_RCR_JE_WIDTH 1
+#define MAC_RCR_LM_INDEX 10
+#define MAC_RCR_LM_WIDTH 1
+#define MAC_RCR_RE_INDEX 0
+#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_PFCE_INDEX 8
+#define MAC_RFCR_PFCE_WIDTH 1
+#define MAC_RFCR_RFE_INDEX 0
+#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RFCR_UP_INDEX 1
+#define MAC_RFCR_UP_WIDTH 1
+#define MAC_RQC0R_RXQ0EN_INDEX 0
+#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_RSSAR_ADDRT_INDEX 2
+#define MAC_RSSAR_ADDRT_WIDTH 1
+#define MAC_RSSAR_CT_INDEX 1
+#define MAC_RSSAR_CT_WIDTH 1
+#define MAC_RSSAR_OB_INDEX 0
+#define MAC_RSSAR_OB_WIDTH 1
+#define MAC_RSSAR_RSSIA_INDEX 8
+#define MAC_RSSAR_RSSIA_WIDTH 8
+#define MAC_RSSCR_IP2TE_INDEX 1
+#define MAC_RSSCR_IP2TE_WIDTH 1
+#define MAC_RSSCR_RSSE_INDEX 0
+#define MAC_RSSCR_RSSE_WIDTH 1
+#define MAC_RSSCR_TCP4TE_INDEX 2
+#define MAC_RSSCR_TCP4TE_WIDTH 1
+#define MAC_RSSCR_UDP4TE_INDEX 3
+#define MAC_RSSCR_UDP4TE_WIDTH 1
+#define MAC_RSSDR_DMCH_INDEX 0
+#define MAC_RSSDR_DMCH_WIDTH 4
+#define MAC_SSIR_SNSINC_INDEX 8
+#define MAC_SSIR_SNSINC_WIDTH 8
+#define MAC_SSIR_SSINC_INDEX 16
+#define MAC_SSIR_SSINC_WIDTH 8
+#define MAC_TCR_SS_INDEX 29
+#define MAC_TCR_SS_WIDTH 2
+#define MAC_TCR_TE_INDEX 0
+#define MAC_TCR_TE_WIDTH 1
+#define MAC_TCR_VNE_INDEX 24
+#define MAC_TCR_VNE_WIDTH 1
+#define MAC_TCR_VNM_INDEX 25
+#define MAC_TCR_VNM_WIDTH 1
+#define MAC_TIR_TNID_INDEX 0
+#define MAC_TIR_TNID_WIDTH 16
+#define MAC_TSCR_AV8021ASMEN_INDEX 28
+#define MAC_TSCR_AV8021ASMEN_WIDTH 1
+#define MAC_TSCR_SNAPTYPSEL_INDEX 16
+#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
+#define MAC_TSCR_TSADDREG_INDEX 5
+#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSCFUPDT_INDEX 1
+#define MAC_TSCR_TSCFUPDT_WIDTH 1
+#define MAC_TSCR_TSCTRLSSR_INDEX 9
+#define MAC_TSCR_TSCTRLSSR_WIDTH 1
+#define MAC_TSCR_TSENA_INDEX 0
+#define MAC_TSCR_TSENA_WIDTH 1
+#define MAC_TSCR_TSENALL_INDEX 8
+#define MAC_TSCR_TSENALL_WIDTH 1
+#define MAC_TSCR_TSEVNTENA_INDEX 14
+#define MAC_TSCR_TSEVNTENA_WIDTH 1
+#define MAC_TSCR_TSINIT_INDEX 2
+#define MAC_TSCR_TSINIT_WIDTH 1
+#define MAC_TSCR_TSIPENA_INDEX 11
+#define MAC_TSCR_TSIPENA_WIDTH 1
+#define MAC_TSCR_TSIPV4ENA_INDEX 13
+#define MAC_TSCR_TSIPV4ENA_WIDTH 1
+#define MAC_TSCR_TSIPV6ENA_INDEX 12
+#define MAC_TSCR_TSIPV6ENA_WIDTH 1
+#define MAC_TSCR_TSMSTRENA_INDEX 15
+#define MAC_TSCR_TSMSTRENA_WIDTH 1
+#define MAC_TSCR_TSVER2ENA_INDEX 10
+#define MAC_TSCR_TSVER2ENA_WIDTH 1
+#define MAC_TSCR_TXTSSTSM_INDEX 24
+#define MAC_TSCR_TXTSSTSM_WIDTH 1
+#define MAC_TSSR_TXTSC_INDEX 15
+#define MAC_TSSR_TXTSC_WIDTH 1
+#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
+#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_VLANHTR_VLHT_INDEX 0
+#define MAC_VLANHTR_VLHT_WIDTH 16
+#define MAC_VLANIR_VLTI_INDEX 20
+#define MAC_VLANIR_VLTI_WIDTH 1
+#define MAC_VLANIR_CSVL_INDEX 19
+#define MAC_VLANIR_CSVL_WIDTH 1
+#define MAC_VLANTR_DOVLTC_INDEX 20
+#define MAC_VLANTR_DOVLTC_WIDTH 1
+#define MAC_VLANTR_ERSVLM_INDEX 19
+#define MAC_VLANTR_ERSVLM_WIDTH 1
+#define MAC_VLANTR_ESVL_INDEX 18
+#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_ETV_INDEX 16
+#define MAC_VLANTR_ETV_WIDTH 1
+#define MAC_VLANTR_EVLS_INDEX 21
+#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EVLRXS_INDEX 24
+#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VLANTR_VL_INDEX 0
+#define MAC_VLANTR_VL_WIDTH 16
+#define MAC_VLANTR_VTHM_INDEX 25
+#define MAC_VLANTR_VTHM_WIDTH 1
+#define MAC_VLANTR_VTIM_INDEX 17
+#define MAC_VLANTR_VTIM_WIDTH 1
+#define MAC_VR_DEVID_INDEX 8
+#define MAC_VR_DEVID_WIDTH 8
+#define MAC_VR_SNPSVER_INDEX 0
+#define MAC_VR_SNPSVER_WIDTH 8
+#define MAC_VR_USERVER_INDEX 16
+#define MAC_VR_USERVER_WIDTH 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXOCTETCOUNT_GB_HI 0x0818
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXFRAMECOUNT_GB_HI 0x0820
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX64OCTETS_GB_HI 0x0838
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX65TO127OCTETS_GB_HI 0x0840
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX128TO255OCTETS_GB_HI 0x0848
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX256TO511OCTETS_GB_HI 0x0850
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXUNDERFLOWERROR_HI 0x0880
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXOCTETCOUNT_G_HI 0x0888
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXFRAMECOUNT_G_HI 0x0890
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXPAUSEFRAMES_HI 0x0898
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_TXVLANFRAMES_G_HI 0x08a0
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXFRAMECOUNT_GB_HI 0x0904
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_GB_HI 0x090c
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXOCTETCOUNT_G_HI 0x0914
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXCRCERROR_HI 0x092c
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX64OCTETS_GB_HI 0x0944
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX65TO127OCTETS_GB_HI 0x094c
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX128TO255OCTETS_GB_HI 0x0954
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX256TO511OCTETS_GB_HI 0x095c
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXUNICASTFRAMES_G_HI 0x0974
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXLENGTHERROR_HI 0x097c
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXOUTOFRANGETYPE_HI 0x0984
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXPAUSEFRAMES_HI 0x098c
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXFIFOOVERFLOW_HI 0x0994
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXVLANFRAMES_GB_HI 0x099c
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_INDEX 0
+#define MMC_CR_CR_WIDTH 1
+#define MMC_CR_CSR_INDEX 1
+#define MMC_CR_CSR_WIDTH 1
+#define MMC_CR_ROR_INDEX 2
+#define MMC_CR_ROR_WIDTH 1
+#define MMC_CR_MCF_INDEX 3
+#define MMC_CR_MCF_WIDTH 1
+#define MMC_CR_MCT_INDEX 4
+#define MMC_CR_MCT_WIDTH 2
+#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
+#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
+#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
+#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
+#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXCRCERROR_INDEX 5
+#define MMC_RISR_RXCRCERROR_WIDTH 1
+#define MMC_RISR_RXRUNTERROR_INDEX 6
+#define MMC_RISR_RXRUNTERROR_WIDTH 1
+#define MMC_RISR_RXJABBERERROR_INDEX 7
+#define MMC_RISR_RXJABBERERROR_WIDTH 1
+#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
+#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
+#define MMC_RISR_RXOVERSIZE_G_INDEX 9
+#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
+#define MMC_RISR_RX64OCTETS_GB_INDEX 10
+#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
+#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
+#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
+#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
+#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXLENGTHERROR_INDEX 17
+#define MMC_RISR_RXLENGTHERROR_WIDTH 1
+#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
+#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
+#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
+#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
+#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
+#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
+#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
+#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
+#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
+#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
+#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
+#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
+#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TX64OCTETS_GB_INDEX 4
+#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
+#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
+#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
+#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
+#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
+#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
+#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
+#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
+#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
+#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
+#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
+#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDCR 0x1008
+#define MTL_FDSR 0x100c
+#define MTL_FDDR 0x1010
+#define MTL_ISR 0x1020
+#define MTL_RQDCM0R 0x1030
+#define MTL_TCPM0R 0x1040
+#define MTL_TCPM1R 0x1044
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+#define MTL_TCPM_INC 4
+#define MTL_TCPM_TC_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_INDEX 5
+#define MTL_OMR_ETSALG_WIDTH 2
+#define MTL_OMR_RAA_INDEX 2
+#define MTL_OMR_RAA_WIDTH 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_TQUR 0x04
+#define MTL_Q_TQDR 0x08
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQMPOCR 0x44
+#define MTL_Q_RQDR 0x48
+#define MTL_Q_RQFCR 0x50
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQDR_PRXQ_INDEX 16
+#define MTL_Q_RQDR_PRXQ_WIDTH 14
+#define MTL_Q_RQDR_RXQSTS_INDEX 4
+#define MTL_Q_RQDR_RXQSTS_WIDTH 2
+#define MTL_Q_RQFCR_RFA_INDEX 1
+#define MTL_Q_RQFCR_RFA_WIDTH 6
+#define MTL_Q_RQFCR_RFD_INDEX 17
+#define MTL_Q_RQFCR_RFD_WIDTH 6
+#define MTL_Q_RQOMR_EHFC_INDEX 7
+#define MTL_Q_RQOMR_EHFC_WIDTH 1
+#define MTL_Q_RQOMR_RQS_INDEX 16
+#define MTL_Q_RQOMR_RQS_WIDTH 9
+#define MTL_Q_RQOMR_RSF_INDEX 5
+#define MTL_Q_RQOMR_RSF_WIDTH 1
+#define MTL_Q_RQOMR_RTC_INDEX 0
+#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQDR_TRCSTS_INDEX 1
+#define MTL_Q_TQDR_TRCSTS_WIDTH 2
+#define MTL_Q_TQDR_TXQSTS_INDEX 4
+#define MTL_Q_TQDR_TXQSTS_WIDTH 1
+#define MTL_Q_TQOMR_FTQ_INDEX 0
+#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
+#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3
+#define MTL_Q_TQOMR_TQS_INDEX 16
+#define MTL_Q_TQOMR_TQS_WIDTH 10
+#define MTL_Q_TQOMR_TSF_INDEX 1
+#define MTL_Q_TQOMR_TSF_WIDTH 1
+#define MTL_Q_TQOMR_TTC_INDEX 4
+#define MTL_Q_TQOMR_TTC_WIDTH 3
+#define MTL_Q_TQOMR_TXQEN_INDEX 2
+#define MTL_Q_TQOMR_TXQEN_WIDTH 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_32 0x01
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_INDEX 0
+#define MTL_TC_ETSCR_TSA_WIDTH 2
+#define MTL_TC_QWR_QW_INDEX 0
+#define MTL_TC_QWR_QW_WIDTH 21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+/* PCS register offsets */
+#define PCS_V1_WINDOW_SELECT 0x03fc
+#define PCS_V2_WINDOW_DEF 0x9060
+#define PCS_V2_WINDOW_SELECT 0x9064
+#define PCS_V2_RV_WINDOW_DEF 0x1060
+#define PCS_V2_RV_WINDOW_SELECT 0x1064
+
+/* PCS register entry bit positions and sizes */
+#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
+#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14
+#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2
+#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4
+
+/* SerDes integration register offsets */
+#define SIR0_KR_RT_1 0x002c
+#define SIR0_STATUS 0x0040
+#define SIR1_SPEED 0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX 11
+#define SIR0_KR_RT_1_RESET_WIDTH 1
+#define SIR0_STATUS_RX_READY_INDEX 0
+#define SIR0_STATUS_RX_READY_WIDTH 1
+#define SIR0_STATUS_TX_READY_INDEX 8
+#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_CDR_RATE_INDEX 12
+#define SIR1_SPEED_CDR_RATE_WIDTH 4
+#define SIR1_SPEED_DATARATE_INDEX 4
+#define SIR1_SPEED_DATARATE_WIDTH 2
+#define SIR1_SPEED_PLLSEL_INDEX 3
+#define SIR1_SPEED_PLLSEL_WIDTH 1
+#define SIR1_SPEED_RATECHANGE_INDEX 6
+#define SIR1_SPEED_RATECHANGE_WIDTH 1
+#define SIR1_SPEED_TXAMP_INDEX 8
+#define SIR1_SPEED_TXAMP_WIDTH 4
+#define SIR1_SPEED_WORDMODE_INDEX 0
+#define SIR1_SPEED_WORDMODE_WIDTH 3
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
+#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
+#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
+/* MAC Control register offsets */
+#define XP_PROP_0 0x0000
+#define XP_PROP_1 0x0004
+#define XP_PROP_2 0x0008
+#define XP_PROP_3 0x000c
+#define XP_PROP_4 0x0010
+#define XP_PROP_5 0x0014
+#define XP_MAC_ADDR_LO 0x0020
+#define XP_MAC_ADDR_HI 0x0024
+#define XP_ECC_ISR 0x0030
+#define XP_ECC_IER 0x0034
+#define XP_ECC_CNT0 0x003c
+#define XP_ECC_CNT1 0x0040
+#define XP_DRIVER_INT_REQ 0x0060
+#define XP_DRIVER_INT_RO 0x0064
+#define XP_DRIVER_SCRATCH_0 0x0068
+#define XP_DRIVER_SCRATCH_1 0x006c
+#define XP_INT_REISSUE_EN 0x0074
+#define XP_INT_EN 0x0078
+#define XP_I2C_MUTEX 0x0080
+#define XP_MDIO_MUTEX 0x0084
+
+/* MAC Control register entry bit positions and sizes */
+#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0
+#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1
+#define XP_DRIVER_INT_RO_STATUS_INDEX 0
+#define XP_DRIVER_INT_RO_STATUS_WIDTH 1
+#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0
+#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8
+#define XP_ECC_CNT0_RX_DED_INDEX 24
+#define XP_ECC_CNT0_RX_DED_WIDTH 8
+#define XP_ECC_CNT0_RX_SEC_INDEX 16
+#define XP_ECC_CNT0_RX_SEC_WIDTH 8
+#define XP_ECC_CNT0_TX_DED_INDEX 8
+#define XP_ECC_CNT0_TX_DED_WIDTH 8
+#define XP_ECC_CNT0_TX_SEC_INDEX 0
+#define XP_ECC_CNT0_TX_SEC_WIDTH 8
+#define XP_ECC_CNT1_DESC_DED_INDEX 8
+#define XP_ECC_CNT1_DESC_DED_WIDTH 8
+#define XP_ECC_CNT1_DESC_SEC_INDEX 0
+#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
+#define XP_ECC_IER_DESC_DED_INDEX 5
+#define XP_ECC_IER_DESC_DED_WIDTH 1
+#define XP_ECC_IER_DESC_SEC_INDEX 4
+#define XP_ECC_IER_DESC_SEC_WIDTH 1
+#define XP_ECC_IER_RX_DED_INDEX 3
+#define XP_ECC_IER_RX_DED_WIDTH 1
+#define XP_ECC_IER_RX_SEC_INDEX 2
+#define XP_ECC_IER_RX_SEC_WIDTH 1
+#define XP_ECC_IER_TX_DED_INDEX 1
+#define XP_ECC_IER_TX_DED_WIDTH 1
+#define XP_ECC_IER_TX_SEC_INDEX 0
+#define XP_ECC_IER_TX_SEC_WIDTH 1
+#define XP_ECC_ISR_DESC_DED_INDEX 5
+#define XP_ECC_ISR_DESC_DED_WIDTH 1
+#define XP_ECC_ISR_DESC_SEC_INDEX 4
+#define XP_ECC_ISR_DESC_SEC_WIDTH 1
+#define XP_ECC_ISR_RX_DED_INDEX 3
+#define XP_ECC_ISR_RX_DED_WIDTH 1
+#define XP_ECC_ISR_RX_SEC_INDEX 2
+#define XP_ECC_ISR_RX_SEC_WIDTH 1
+#define XP_ECC_ISR_TX_DED_INDEX 1
+#define XP_ECC_ISR_TX_DED_WIDTH 1
+#define XP_ECC_ISR_TX_SEC_INDEX 0
+#define XP_ECC_ISR_TX_SEC_WIDTH 1
+#define XP_I2C_MUTEX_BUSY_INDEX 31
+#define XP_I2C_MUTEX_BUSY_WIDTH 1
+#define XP_I2C_MUTEX_ID_INDEX 29
+#define XP_I2C_MUTEX_ID_WIDTH 2
+#define XP_I2C_MUTEX_ACTIVE_INDEX 0
+#define XP_I2C_MUTEX_ACTIVE_WIDTH 1
+#define XP_MAC_ADDR_HI_VALID_INDEX 31
+#define XP_MAC_ADDR_HI_VALID_WIDTH 1
+#define XP_PROP_0_CONN_TYPE_INDEX 28
+#define XP_PROP_0_CONN_TYPE_WIDTH 3
+#define XP_PROP_0_MDIO_ADDR_INDEX 16
+#define XP_PROP_0_MDIO_ADDR_WIDTH 5
+#define XP_PROP_0_PORT_ID_INDEX 0
+#define XP_PROP_0_PORT_ID_WIDTH 8
+#define XP_PROP_0_PORT_MODE_INDEX 8
+#define XP_PROP_0_PORT_MODE_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 23
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_1_MAX_RX_DMA_INDEX 24
+#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
+#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5
+#define XP_PROP_1_MAX_TX_DMA_INDEX 16
+#define XP_PROP_1_MAX_TX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0
+#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5
+#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16
+#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0
+#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_3_GPIO_MASK_INDEX 28
+#define XP_PROP_3_GPIO_MASK_WIDTH 4
+#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20
+#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4
+#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16
+#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4
+#define XP_PROP_3_GPIO_RX_LOS_INDEX 24
+#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4
+#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12
+#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4
+#define XP_PROP_3_GPIO_ADDR_INDEX 8
+#define XP_PROP_3_GPIO_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_INDEX 0
+#define XP_PROP_3_MDIO_RESET_WIDTH 2
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2
+#define XP_PROP_4_MUX_ADDR_HI_INDEX 8
+#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5
+#define XP_PROP_4_MUX_ADDR_LO_INDEX 0
+#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3
+#define XP_PROP_4_MUX_CHAN_INDEX 4
+#define XP_PROP_4_MUX_CHAN_WIDTH 3
+#define XP_PROP_4_REDRV_ADDR_INDEX 16
+#define XP_PROP_4_REDRV_ADDR_WIDTH 7
+#define XP_PROP_4_REDRV_IF_INDEX 23
+#define XP_PROP_4_REDRV_IF_WIDTH 1
+#define XP_PROP_4_REDRV_LANE_INDEX 24
+#define XP_PROP_4_REDRV_LANE_WIDTH 3
+#define XP_PROP_4_REDRV_MODEL_INDEX 28
+#define XP_PROP_4_REDRV_MODEL_WIDTH 3
+#define XP_PROP_4_REDRV_PRESENT_INDEX 31
+#define XP_PROP_4_REDRV_PRESENT_WIDTH 1
+
+/* I2C Control register offsets */
+#define IC_CON 0x0000
+#define IC_TAR 0x0004
+#define IC_DATA_CMD 0x0010
+#define IC_INTR_STAT 0x002c
+#define IC_INTR_MASK 0x0030
+#define IC_RAW_INTR_STAT 0x0034
+#define IC_CLR_INTR 0x0040
+#define IC_CLR_TX_ABRT 0x0054
+#define IC_CLR_STOP_DET 0x0060
+#define IC_ENABLE 0x006c
+#define IC_TXFLR 0x0074
+#define IC_RXFLR 0x0078
+#define IC_TX_ABRT_SOURCE 0x0080
+#define IC_ENABLE_STATUS 0x009c
+#define IC_COMP_PARAM_1 0x00f4
+
+/* I2C Control register entry bit positions and sizes */
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8
+#define IC_CON_MASTER_MODE_INDEX 0
+#define IC_CON_MASTER_MODE_WIDTH 1
+#define IC_CON_RESTART_EN_INDEX 5
+#define IC_CON_RESTART_EN_WIDTH 1
+#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9
+#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1
+#define IC_CON_SLAVE_DISABLE_INDEX 6
+#define IC_CON_SLAVE_DISABLE_WIDTH 1
+#define IC_CON_SPEED_INDEX 1
+#define IC_CON_SPEED_WIDTH 2
+#define IC_DATA_CMD_CMD_INDEX 8
+#define IC_DATA_CMD_CMD_WIDTH 1
+#define IC_DATA_CMD_STOP_INDEX 9
+#define IC_DATA_CMD_STOP_WIDTH 1
+#define IC_ENABLE_ABORT_INDEX 1
+#define IC_ENABLE_ABORT_WIDTH 1
+#define IC_ENABLE_EN_INDEX 0
+#define IC_ENABLE_EN_WIDTH 1
+#define IC_ENABLE_STATUS_EN_INDEX 0
+#define IC_ENABLE_STATUS_EN_WIDTH 1
+#define IC_INTR_MASK_TX_EMPTY_INDEX 4
+#define IC_INTR_MASK_TX_EMPTY_WIDTH 1
+#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2
+#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1
+#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9
+#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6
+#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4
+#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1
+
+/* I2C Control register value */
+#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001
+#define IC_TX_ABRT_ARB_LOST 0x1000
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_INDEX 2
+#define RX_PACKET_ERRORS_CRC_WIDTH 1
+#define RX_PACKET_ERRORS_FRAME_INDEX 3
+#define RX_PACKET_ERRORS_FRAME_WIDTH 1
+#define RX_PACKET_ERRORS_LENGTH_INDEX 0
+#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
+#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
+#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
+#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
+#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_TNP_INDEX 8
+#define RX_PACKET_ATTRIBUTES_TNP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_INDEX 9
+#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_WIDTH 1
+
+#define RX_NORMAL_DESC0_OVT_INDEX 0
+#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
+#define RX_NORMAL_DESC2_TNP_INDEX 11
+#define RX_NORMAL_DESC2_TNP_WIDTH 1
+#define RX_NORMAL_DESC3_CDA_INDEX 27
+#define RX_NORMAL_DESC3_CDA_WIDTH 1
+#define RX_NORMAL_DESC3_CTXT_INDEX 30
+#define RX_NORMAL_DESC3_CTXT_WIDTH 1
+#define RX_NORMAL_DESC3_ES_INDEX 15
+#define RX_NORMAL_DESC3_ES_WIDTH 1
+#define RX_NORMAL_DESC3_ETLT_INDEX 16
+#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
+#define RX_NORMAL_DESC3_INTE_INDEX 30
+#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_L34T_INDEX 20
+#define RX_NORMAL_DESC3_L34T_WIDTH 4
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
+#define RX_NORMAL_DESC3_OWN_INDEX 31
+#define RX_NORMAL_DESC3_OWN_WIDTH 1
+#define RX_NORMAL_DESC3_PL_INDEX 0
+#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_NORMAL_DESC3_RSV_INDEX 26
+#define RX_NORMAL_DESC3_RSV_WIDTH 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV4_UNKNOWN 7
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
+#define RX_DESC3_L34T_IPV6_UNKNOWN 15
+
+#define RX_CONTEXT_DESC3_TSA_INDEX 4
+#define RX_CONTEXT_DESC3_TSA_WIDTH 1
+#define RX_CONTEXT_DESC3_TSD_INDEX 6
+#define RX_CONTEXT_DESC3_TSD_WIDTH 1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
+#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VXLAN_INDEX 4
+#define TX_PACKET_ATTRIBUTES_VXLAN_WIDTH 1
+
+#define TX_CONTEXT_DESC2_MSS_INDEX 0
+#define TX_CONTEXT_DESC2_MSS_WIDTH 15
+#define TX_CONTEXT_DESC3_CTXT_INDEX 30
+#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
+#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
+#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
+#define TX_CONTEXT_DESC3_VLTV_INDEX 16
+#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
+#define TX_CONTEXT_DESC3_VT_INDEX 0
+#define TX_CONTEXT_DESC3_VT_WIDTH 16
+
+#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
+#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
+#define TX_NORMAL_DESC2_IC_INDEX 31
+#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_TTSE_INDEX 30
+#define TX_NORMAL_DESC2_TTSE_WIDTH 1
+#define TX_NORMAL_DESC2_VTIR_INDEX 14
+#define TX_NORMAL_DESC2_VTIR_WIDTH 2
+#define TX_NORMAL_DESC3_CIC_INDEX 16
+#define TX_NORMAL_DESC3_CIC_WIDTH 2
+#define TX_NORMAL_DESC3_CPC_INDEX 26
+#define TX_NORMAL_DESC3_CPC_WIDTH 2
+#define TX_NORMAL_DESC3_CTXT_INDEX 30
+#define TX_NORMAL_DESC3_CTXT_WIDTH 1
+#define TX_NORMAL_DESC3_FD_INDEX 29
+#define TX_NORMAL_DESC3_FD_WIDTH 1
+#define TX_NORMAL_DESC3_FL_INDEX 0
+#define TX_NORMAL_DESC3_FL_WIDTH 15
+#define TX_NORMAL_DESC3_LD_INDEX 28
+#define TX_NORMAL_DESC3_LD_WIDTH 1
+#define TX_NORMAL_DESC3_OWN_INDEX 31
+#define TX_NORMAL_DESC3_OWN_WIDTH 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
+#define TX_NORMAL_DESC3_TCPPL_INDEX 0
+#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+#define TX_NORMAL_DESC3_TSE_INDEX 18
+#define TX_NORMAL_DESC3_TSE_WIDTH 1
+#define TX_NORMAL_DESC3_VNP_INDEX 23
+#define TX_NORMAL_DESC3_VNP_WIDTH 3
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+#define TX_NORMAL_DESC3_VXLAN_PACKET 0x3
+
+/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FECCTRL
+#define MDIO_PMA_10GBR_FECCTRL 0x00ab
+#endif
+
+#ifndef MDIO_PMA_RX_CTRL1
+#define MDIO_PMA_RX_CTRL1 0x8051
+#endif
+
+#ifndef MDIO_PCS_DIG_CTRL
+#define MDIO_PCS_DIG_CTRL 0x8000
+#endif
+
+#ifndef MDIO_PCS_DIGITAL_STAT
+#define MDIO_PCS_DIGITAL_STAT 0x8010
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX 0x0019
+#endif
+
+#ifndef MDIO_AN_COMP_STAT
+#define MDIO_AN_COMP_STAT 0x0030
+#endif
+
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_AN_ADVERTISE
+#define MDIO_VEND2_AN_ADVERTISE 0x0004
+#endif
+
+#ifndef MDIO_VEND2_AN_LP_ABILITY
+#define MDIO_VEND2_AN_LP_ABILITY 0x0005
+#endif
+
+#ifndef MDIO_VEND2_AN_CTRL
+#define MDIO_VEND2_AN_CTRL 0x8001
+#endif
+
+#ifndef MDIO_VEND2_AN_STAT
+#define MDIO_VEND2_AN_STAT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_PMA_CDR_CONTROL
+#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
+#endif
+
+#ifndef MDIO_VEND2_PMA_MISC_CTRL0
+#define MDIO_VEND2_PMA_MISC_CTRL0 0x8090
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
+#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_RESTART
+#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS6
+#define MDIO_VEND2_CTRL1_SS6 BIT(6)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS13
+#define MDIO_VEND2_CTRL1_SS13 BIT(13)
+#endif
+
+/* MDIO mask values */
+#define XGBE_AN_CL73_INT_CMPLT BIT(0)
+#define XGBE_AN_CL73_INC_LINK BIT(1)
+#define XGBE_AN_CL73_PG_RCV BIT(2)
+#define XGBE_AN_CL73_INT_MASK 0x07
+
+#define XGBE_XNP_MCF_NULL_MESSAGE 0x001
+#define XGBE_XNP_ACK_PROCESSED BIT(12)
+#define XGBE_XNP_MP_FORMATTED BIT(13)
+#define XGBE_XNP_NP_EXCHANGE BIT(15)
+
+#define XGBE_KR_TRAINING_START BIT(0)
+#define XGBE_KR_TRAINING_ENABLE BIT(1)
+
+#define XGBE_PCS_CL37_BP BIT(12)
+#define XGBE_PCS_PSEQ_STATE_MASK 0x1c
+#define XGBE_PCS_PSEQ_STATE_POWER_GOOD 0x10
+
+#define XGBE_AN_CL37_INT_CMPLT BIT(0)
+#define XGBE_AN_CL37_INT_MASK 0x01
+
+#define XGBE_AN_CL37_HD_MASK 0x40
+#define XGBE_AN_CL37_FD_MASK 0x20
+
+#define XGBE_AN_CL37_PCS_MODE_MASK 0x06
+#define XGBE_AN_CL37_PCS_MODE_BASEX 0x00
+#define XGBE_AN_CL37_PCS_MODE_SGMII 0x04
+#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
+#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
+
+#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01
+#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
+#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
+
+#define XGBE_PMA_RX_RST_0_MASK BIT(4)
+#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
+#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+
+#define XGBE_PMA_PLL_CTRL_MASK BIT(15)
+#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15)
+#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define GET_BITS_LE(_var, _index, _width) \
+ ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS_LE(_var, _index, _width, _val) \
+do { \
+ (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \
+ (_var) |= cpu_to_le32((((_val) & \
+ ((0x1 << (_width)) - 1)) << (_index))); \
+} while (0)
+
+/* Bit setting and getting macros based on register fields
+ * The get macro uses the bit field definitions formed using the input
+ * names to extract the current bit field value from within the
+ * variable
+ *
+ * The set macro uses the bit field definitions formed using the input
+ * names to set the bit field of the variable to the specified value
+ */
+#define XGMAC_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XGMAC_GET_BITS_LE(_var, _prefix, _field) \
+ GET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
+ SET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+/* Macros for reading or writing registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define XGMAC_IOREAD(_pdata, _reg) \
+ ioread32((_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_IOWRITE(_pdata, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing MTL queue or traffic class registers
+ * Similar to the standard read and write macros except that the
+ * base register value is calculated by the queue or traffic class number
+ */
+#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+ ioread32((_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing DMA channel registers
+ * Similar to the standard read and write macros except that the
+ * base register value is obtained from the ring
+ */
+#define XGMAC_DMA_IOREAD(_channel, _reg) \
+ ioread32((_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+ iowrite32((_val), (_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+#define XPCS_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XPCS_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XPCS32_IOWRITE(_pdata, _off, _val) \
+ iowrite32(_val, (_pdata)->xpcs_regs + (_off))
+
+#define XPCS32_IOREAD(_pdata, _off) \
+ ioread32((_pdata)->xpcs_regs + (_off))
+
+#define XPCS16_IOWRITE(_pdata, _off, _val) \
+ iowrite16(_val, (_pdata)->xpcs_regs + (_off))
+
+#define XPCS16_IOREAD(_pdata, _off) \
+ ioread16((_pdata)->xpcs_regs + (_off))
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+#define XSIR_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XSIR0_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR0_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+#define XSIR1_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR1_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes RxTx registers.
+ */
+#define XRXTX_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of MAC Control registers.
+ */
+#define XP_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XP_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XP_IOREAD(_pdata, _reg) \
+ ioread32((_pdata)->xprop_regs + (_reg))
+
+#define XP_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XP_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XP_IOWRITE(_pdata, _reg, _val) \
+ iowrite32((_val), (_pdata)->xprop_regs + (_reg))
+
+#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XP_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XP_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of I2C Control registers.
+ */
+#define XI2C_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XI2C_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XI2C_IOREAD(_pdata, _reg) \
+ ioread32((_pdata)->xi2c_regs + (_reg))
+
+#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XI2C_IOWRITE(_pdata, _reg, _val) \
+ iowrite32((_val), (_pdata)->xi2c_regs + (_reg))
+
+#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XI2C_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+ */
+#define XMDIO_READ(_pdata, _mmd, _reg) \
+ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+
+#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
+ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+
+#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
+ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+
+#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
+do { \
+ u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
+ mmd_val &= ~_mask; \
+ mmd_val |= (_val); \
+ XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \
+} while (0)
+
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
new file mode 100644
index 000000000..895d35639
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -0,0 +1,293 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+#include <net/dcbnl.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static int xgbe_dcb_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ /* Set number of supported traffic classes */
+ ets->ets_cap = pdata->hw_feat.tc_cnt;
+
+ if (pdata->ets) {
+ ets->cbs = pdata->ets->cbs;
+ memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_tsa, pdata->ets->tc_tsa,
+ sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, pdata->ets->prio_tc,
+ sizeof(ets->prio_tc));
+ }
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int i, tc_ets, tc_ets_weight;
+ u8 max_tc = 0;
+
+ tc_ets = 0;
+ tc_ets_weight = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ netif_dbg(pdata, drv, netdev,
+ "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+ ets->tc_tx_bw[i], ets->tc_rx_bw[i],
+ ets->tc_tsa[i]);
+ netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
+ ets->prio_tc[i]);
+
+ max_tc = max_t(u8, max_tc, ets->prio_tc[i]);
+ if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]))
+ max_tc = max_t(u8, max_tc, i);
+
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ tc_ets = 1;
+ tc_ets_weight += ets->tc_tx_bw[i];
+ break;
+ default:
+ netif_err(pdata, drv, netdev,
+ "unsupported TSA algorithm (%hhu)\n",
+ ets->tc_tsa[i]);
+ return -EINVAL;
+ }
+ }
+
+ /* Check maximum traffic class requested */
+ if (max_tc >= pdata->hw_feat.tc_cnt) {
+ netif_err(pdata, drv, netdev,
+ "exceeded number of supported traffic classes\n");
+ return -EINVAL;
+ }
+
+ /* Weights must add up to 100% */
+ if (tc_ets && (tc_ets_weight != 100)) {
+ netif_err(pdata, drv, netdev,
+ "sum of ETS algorithm weights is not 100 (%u)\n",
+ tc_ets_weight);
+ return -EINVAL;
+ }
+
+ if (!pdata->ets) {
+ pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets),
+ GFP_KERNEL);
+ if (!pdata->ets)
+ return -ENOMEM;
+ }
+
+ pdata->num_tcs = max_tc + 1;
+ memcpy(pdata->ets, ets, sizeof(*pdata->ets));
+
+ pdata->hw_if.config_dcb_tc(pdata);
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ /* Set number of supported PFC traffic classes */
+ pfc->pfc_cap = pdata->hw_feat.tc_cnt;
+
+ if (pdata->pfc) {
+ pfc->pfc_en = pdata->pfc->pfc_en;
+ pfc->mbc = pdata->pfc->mbc;
+ pfc->delay = pdata->pfc->delay;
+ }
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ netif_dbg(pdata, drv, netdev,
+ "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+ pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+
+ /* Check PFC for supported number of traffic classes */
+ if (pfc->pfc_en & ~((1 << pdata->hw_feat.tc_cnt) - 1)) {
+ netif_err(pdata, drv, netdev,
+ "PFC requested for unsupported traffic class\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->pfc) {
+ pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
+ GFP_KERNEL);
+ if (!pdata->pfc)
+ return -ENOMEM;
+ }
+
+ memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
+
+ pdata->hw_if.config_dcb_pfc(pdata);
+
+ return 0;
+}
+
+static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ u8 support = xgbe_dcb_getdcbx(netdev);
+
+ netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);
+
+ if (dcbx & ~support)
+ return 1;
+
+ if ((dcbx & support) != support)
+ return 1;
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops xgbe_dcbnl_ops = {
+ /* IEEE 802.1Qaz std */
+ .ieee_getets = xgbe_dcb_ieee_getets,
+ .ieee_setets = xgbe_dcb_ieee_setets,
+ .ieee_getpfc = xgbe_dcb_ieee_getpfc,
+ .ieee_setpfc = xgbe_dcb_ieee_setpfc,
+
+ /* DCBX configuration */
+ .getdcbx = xgbe_dcb_getdcbx,
+ .setdcbx = xgbe_dcb_setdcbx,
+};
+
+const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void)
+{
+ return &xgbe_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
new file mode 100644
index 000000000..b0a6c96b6
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -0,0 +1,525 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static ssize_t xgbe_common_read(char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int value)
+{
+ char *buf;
+ ssize_t len;
+
+ if (*ppos != 0)
+ return 0;
+
+ buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return len;
+}
+
+static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int *value)
+{
+ char workarea[32];
+ ssize_t len;
+ int ret;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (count >= sizeof(workarea))
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos,
+ buffer, count);
+ if (len < 0)
+ return len;
+
+ workarea[len] = '\0';
+ ret = kstrtouint(workarea, 16, value);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_addr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xgmac_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xgmac_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_addr_read,
+ .write = xgmac_reg_addr_write,
+};
+
+static const struct file_operations xgmac_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_value_read,
+ .write = xgmac_reg_value_write,
+};
+
+static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xpcs_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
+ value);
+
+ return len;
+}
+
+static const struct file_operations xpcs_mmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_mmd_read,
+ .write = xpcs_mmd_write,
+};
+
+static const struct file_operations xpcs_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_addr_read,
+ .write = xpcs_reg_addr_write,
+};
+
+static const struct file_operations xpcs_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_value_read,
+ .write = xpcs_reg_value_write,
+};
+
+static ssize_t xprop_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xprop_reg);
+}
+
+static ssize_t xprop_reg_addr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xprop_reg);
+}
+
+static ssize_t xprop_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XP_IOREAD(pdata, pdata->debugfs_xprop_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xprop_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XP_IOWRITE(pdata, pdata->debugfs_xprop_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xprop_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xprop_reg_addr_read,
+ .write = xprop_reg_addr_write,
+};
+
+static const struct file_operations xprop_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xprop_reg_value_read,
+ .write = xprop_reg_value_write,
+};
+
+static ssize_t xi2c_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xi2c_reg);
+}
+
+static ssize_t xi2c_reg_addr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xi2c_reg);
+}
+
+static ssize_t xi2c_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XI2C_IOREAD(pdata, pdata->debugfs_xi2c_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xi2c_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XI2C_IOWRITE(pdata, pdata->debugfs_xi2c_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xi2c_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xi2c_reg_addr_read,
+ .write = xi2c_reg_addr_write,
+};
+
+static const struct file_operations xi2c_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xi2c_reg_value_read,
+ .write = xi2c_reg_value_write,
+};
+
+void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
+{
+ char *buf;
+
+ /* Set defaults */
+ pdata->debugfs_xgmac_reg = 0;
+ pdata->debugfs_xpcs_mmd = 1;
+ pdata->debugfs_xpcs_reg = 0;
+
+ buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+ if (!buf)
+ return;
+
+ pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
+
+ debugfs_create_file("xgmac_register", 0600, pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_addr_fops);
+
+ debugfs_create_file("xgmac_register_value", 0600, pdata->xgbe_debugfs,
+ pdata, &xgmac_reg_value_fops);
+
+ debugfs_create_file("xpcs_mmd", 0600, pdata->xgbe_debugfs, pdata,
+ &xpcs_mmd_fops);
+
+ debugfs_create_file("xpcs_register", 0600, pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_addr_fops);
+
+ debugfs_create_file("xpcs_register_value", 0600, pdata->xgbe_debugfs,
+ pdata, &xpcs_reg_value_fops);
+
+ if (pdata->xprop_regs) {
+ debugfs_create_file("xprop_register", 0600, pdata->xgbe_debugfs,
+ pdata, &xprop_reg_addr_fops);
+
+ debugfs_create_file("xprop_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xprop_reg_value_fops);
+ }
+
+ if (pdata->xi2c_regs) {
+ debugfs_create_file("xi2c_register", 0600, pdata->xgbe_debugfs,
+ pdata, &xi2c_reg_addr_fops);
+
+ debugfs_create_file("xi2c_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xi2c_reg_value_fops);
+ }
+
+ if (pdata->vdata->an_cdr_workaround) {
+ debugfs_create_bool("an_cdr_workaround", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_workaround);
+
+ debugfs_create_bool("an_cdr_track_early", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_track_early);
+ }
+
+ kfree(buf);
+}
+
+void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
+{
+ debugfs_remove_recursive(pdata->xgbe_debugfs);
+ pdata->xgbe_debugfs = NULL;
+}
+
+void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
+{
+ char *buf;
+
+ if (!pdata->xgbe_debugfs)
+ return;
+
+ buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+ if (!buf)
+ return;
+
+ if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
+ goto out;
+
+ debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
+ pdata->xgbe_debugfs->d_parent, buf);
+
+out:
+ kfree(buf);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
new file mode 100644
index 000000000..230726d7b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -0,0 +1,676 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
+
+static void xgbe_free_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_ring_data *rdata;
+ unsigned int i;
+
+ if (!ring)
+ return;
+
+ if (ring->rdata) {
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+ xgbe_unmap_rdata(pdata, rdata);
+ }
+
+ kfree(ring->rdata);
+ ring->rdata = NULL;
+ }
+
+ if (ring->rx_hdr_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+ ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_hdr_pa.pages);
+
+ ring->rx_hdr_pa.pages = NULL;
+ ring->rx_hdr_pa.pages_len = 0;
+ ring->rx_hdr_pa.pages_offset = 0;
+ ring->rx_hdr_pa.pages_dma = 0;
+ }
+
+ if (ring->rx_buf_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+ ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_buf_pa.pages);
+
+ ring->rx_buf_pa.pages = NULL;
+ ring->rx_buf_pa.pages_len = 0;
+ ring->rx_buf_pa.pages_offset = 0;
+ ring->rx_buf_pa.pages_dma = 0;
+ }
+
+ if (ring->rdesc) {
+ dma_free_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ ring->rdesc_count),
+ ring->rdesc, ring->rdesc_dma);
+ ring->rdesc = NULL;
+ }
+}
+
+static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_free_ring_resources\n");
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ xgbe_free_ring(pdata, channel->tx_ring);
+ xgbe_free_ring(pdata, channel->rx_ring);
+ }
+
+ DBGPR("<--xgbe_free_ring_resources\n");
+}
+
+static void *xgbe_alloc_node(size_t size, int node)
+{
+ void *mem;
+
+ mem = kzalloc_node(size, GFP_KERNEL, node);
+ if (!mem)
+ mem = kzalloc(size, GFP_KERNEL);
+
+ return mem;
+}
+
+static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
+ dma_addr_t *dma, int node)
+{
+ void *mem;
+ int cur_node = dev_to_node(dev);
+
+ set_dev_node(dev, node);
+ mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
+ set_dev_node(dev, cur_node);
+
+ if (!mem)
+ mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
+
+ return mem;
+}
+
+static int xgbe_init_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, unsigned int rdesc_count)
+{
+ size_t size;
+
+ if (!ring)
+ return 0;
+
+ /* Descriptors */
+ size = rdesc_count * sizeof(struct xgbe_ring_desc);
+
+ ring->rdesc_count = rdesc_count;
+ ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
+ ring->node);
+ if (!ring->rdesc)
+ return -ENOMEM;
+
+ /* Descriptor information */
+ size = rdesc_count * sizeof(struct xgbe_ring_data);
+
+ ring->rdata = xgbe_alloc_node(size, ring->node);
+ if (!ring->rdata)
+ return -ENOMEM;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
+ ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
+
+ return 0;
+}
+
+static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+ channel->name);
+
+ ret = xgbe_init_ring(pdata, channel->tx_ring,
+ pdata->tx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
+ channel->name);
+
+ ret = xgbe_init_ring(pdata, channel->rx_ring,
+ pdata->rx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Rx ring\n");
+ goto err_ring;
+ }
+ }
+
+ return 0;
+
+err_ring:
+ xgbe_free_ring_resources(pdata);
+
+ return ret;
+}
+
+static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ struct xgbe_page_alloc *pa, int alloc_order,
+ int node)
+{
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+ gfp_t gfp;
+ int order;
+
+again:
+ order = alloc_order;
+
+ /* Try to obtain pages, decreasing order if necessary */
+ gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+ while (order >= 0) {
+ pages = alloc_pages_node(node, gfp, order);
+ if (pages)
+ break;
+
+ order--;
+ }
+
+ /* If we couldn't get local pages, try getting from anywhere */
+ if (!pages && (node != NUMA_NO_NODE)) {
+ node = NUMA_NO_NODE;
+ goto again;
+ }
+
+ if (!pages)
+ return -ENOMEM;
+
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+ if (dma_mapping_error(pdata->dev, pages_dma)) {
+ put_page(pages);
+ return -ENOMEM;
+ }
+
+ pa->pages = pages;
+ pa->pages_len = PAGE_SIZE << order;
+ pa->pages_offset = 0;
+ pa->pages_dma = pages_dma;
+
+ return 0;
+}
+
+static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
+ struct xgbe_page_alloc *pa,
+ unsigned int len)
+{
+ get_page(pa->pages);
+ bd->pa = *pa;
+
+ bd->dma_base = pa->pages_dma;
+ bd->dma_off = pa->pages_offset;
+ bd->dma_len = len;
+
+ pa->pages_offset += len;
+ if ((pa->pages_offset + len) > pa->pages_len) {
+ /* This data descriptor is responsible for unmapping page(s) */
+ bd->pa_unmap = *pa;
+
+ /* Get a new allocation next time */
+ pa->pages = NULL;
+ pa->pages_len = 0;
+ pa->pages_offset = 0;
+ pa->pages_dma = 0;
+ }
+}
+
+static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring,
+ struct xgbe_ring_data *rdata)
+{
+ int ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
+ PAGE_ALLOC_COSTLY_ORDER, ring->node);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the header page info */
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+
+ /* Set up the buffer page info */
+ xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
+ pdata->rx_buf_size);
+
+ return 0;
+}
+
+static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ memset(&ring->tx, 0, sizeof(ring->tx));
+
+ hw_if->tx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
+}
+
+static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_ring_data *rdata;
+ dma_addr_t rdesc_dma;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
+ break;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+
+ hw_if->rx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
+}
+
+static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata)
+{
+ if (rdata->skb_dma) {
+ if (rdata->mapped_as_page) {
+ dma_unmap_page(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ }
+ rdata->skb_dma = 0;
+ rdata->skb_dma_len = 0;
+ }
+
+ if (rdata->skb) {
+ dev_kfree_skb_any(rdata->skb);
+ rdata->skb = NULL;
+ }
+
+ if (rdata->rx.hdr.pa.pages)
+ put_page(rdata->rx.hdr.pa.pages);
+
+ if (rdata->rx.hdr.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
+ rdata->rx.hdr.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx.hdr.pa_unmap.pages);
+ }
+
+ if (rdata->rx.buf.pa.pages)
+ put_page(rdata->rx.buf.pa.pages);
+
+ if (rdata->rx.buf.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
+ rdata->rx.buf.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx.buf.pa_unmap.pages);
+ }
+
+ memset(&rdata->tx, 0, sizeof(rdata->tx));
+ memset(&rdata->rx, 0, sizeof(rdata->rx));
+
+ rdata->mapped_as_page = 0;
+
+ if (rdata->state_saved) {
+ rdata->state_saved = 0;
+ rdata->state.skb = NULL;
+ rdata->state.len = 0;
+ rdata->state.error = 0;
+ }
+}
+
+static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ skb_frag_t *frag;
+ dma_addr_t skb_dma;
+ unsigned int start_index, cur_index;
+ unsigned int offset, tso, vlan, datalen, len;
+ unsigned int i;
+
+ DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
+
+ offset = 0;
+ start_index = ring->cur;
+ cur_index = ring->cur;
+
+ packet = &ring->packet_data;
+ packet->rdesc_count = 0;
+ packet->length = 0;
+
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ /* Save space for a context descriptor if needed */
+ if ((tso && (packet->mss != ring->tx.cur_mss)) ||
+ (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+
+ if (tso) {
+ /* Map the TSO header */
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ packet->header_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = packet->header_len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb header: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, packet->header_len);
+
+ offset = packet->header_len;
+
+ packet->length += packet->header_len;
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ }
+
+ /* Map the (remainder of the) packet */
+ for (datalen = skb_headlen(skb) - offset; datalen; ) {
+ len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
+
+ skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb data: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "mapping frag %u\n", i);
+
+ frag = &skb_shinfo(skb)->frags[i];
+ offset = 0;
+
+ for (datalen = skb_frag_size(frag); datalen; ) {
+ len = min_t(unsigned int, datalen,
+ XGBE_TX_MAX_BUF_SIZE);
+
+ skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "skb_frag_dma_map failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ rdata->mapped_as_page = 1;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb frag: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ }
+ }
+
+ /* Save the skb address in the last entry. We always have some data
+ * that has been mapped so rdata is always advanced past the last
+ * piece of mapped data - use the entry pointed to by cur_index - 1.
+ */
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
+ rdata->skb = skb;
+
+ /* Save the number of descriptor entries used */
+ packet->rdesc_count = cur_index - start_index;
+
+ DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
+
+ return packet->rdesc_count;
+
+err_out:
+ while (start_index < cur_index) {
+ rdata = XGBE_GET_DESC_DATA(ring, start_index++);
+ xgbe_unmap_rdata(pdata, rdata);
+ }
+
+ DBGPR("<--xgbe_map_tx_skb: count=0\n");
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
+{
+ DBGPR("-->xgbe_init_function_ptrs_desc\n");
+
+ desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
+ desc_if->free_ring_resources = xgbe_free_ring_resources;
+ desc_if->map_tx_skb = xgbe_map_tx_skb;
+ desc_if->map_rx_buffer = xgbe_map_rx_buffer;
+ desc_if->unmap_rdata = xgbe_unmap_rdata;
+ desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
+ desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
+
+ DBGPR("<--xgbe_init_function_ptrs_desc\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
new file mode 100644
index 000000000..decc1c09a
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -0,0 +1,3654 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/clk.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+#include <linux/crc32poly.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
+{
+ return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+}
+
+static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
+ unsigned int usec)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_usec_to_riwt\n");
+
+ rate = pdata->sysclk_rate;
+
+ /*
+ * Convert the input usec value to the watchdog timer value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( usec * ( system_clock_mhz / 10^6 ) / 256
+ */
+ ret = (usec * (rate / 1000000)) / 256;
+
+ DBGPR("<--xgbe_usec_to_riwt\n");
+
+ return ret;
+}
+
+static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
+ unsigned int riwt)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_riwt_to_usec\n");
+
+ rate = pdata->sysclk_rate;
+
+ /*
+ * Convert the input watchdog timer value to the usec value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
+ */
+ ret = (riwt * 256) / (rate / 1000000);
+
+ DBGPR("<--xgbe_riwt_to_usec\n");
+
+ return ret;
+}
+
+static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
+{
+ unsigned int pblx8, pbl;
+ unsigned int i;
+
+ pblx8 = DMA_PBL_X8_DISABLE;
+ pbl = pdata->pbl;
+
+ if (pdata->pbl > 32) {
+ pblx8 = DMA_PBL_X8_ENABLE;
+ pbl >>= 3;
+ }
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
+ pblx8);
+
+ if (pdata->channel[i]->tx_ring)
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
+ PBL, pbl);
+
+ if (pdata->channel[i]->rx_ring)
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
+ PBL, pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
+ pdata->rx_riwt);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ return 0;
+}
+
+static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
+ pdata->rx_buf_size);
+ }
+}
+
+static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
+ }
+}
+
+static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
+ }
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+}
+
+static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+
+ mutex_lock(&pdata->rss_mutex);
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ goto unlock;
+
+ usleep_range(1000, 1500);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mutex_unlock(&pdata->rss_mutex);
+
+ return ret;
+}
+
+static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xgbe_write_rss_reg(pdata,
+ XGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
+{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return xgbe_write_rss_hash_key(pdata);
+}
+
+static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
+ const u32 *table)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
+
+ return xgbe_write_rss_lookup_table(pdata);
+}
+
+static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ /* Program the hash key */
+ ret = xgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = xgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+
+ return 0;
+}
+
+static void xgbe_config_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return;
+
+ if (pdata->netdev->features & NETIF_F_RXHASH)
+ ret = xgbe_enable_rss(pdata);
+ else
+ ret = xgbe_disable_rss(pdata);
+
+ if (ret)
+ netdev_err(pdata->netdev,
+ "error configuring RSS, RSS disabled\n");
+}
+
+static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
+ unsigned int queue)
+{
+ unsigned int prio, tc;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ /* Does this queue handle the priority? */
+ if (pdata->prio2q_map[prio] != queue)
+ continue;
+
+ /* Get the Traffic Class for this priority */
+ tc = pdata->ets->prio_tc[prio];
+
+ /* Check if PFC is enabled for this traffic class */
+ if (pdata->pfc->pfc_en & (1 << tc))
+ return true;
+ }
+
+ return false;
+}
+
+static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata)
+{
+ /* Program the VXLAN port */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port);
+
+ netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n",
+ pdata->vxlan_port);
+}
+
+static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->hw_feat.vxn)
+ return;
+
+ /* Program the VXLAN port */
+ xgbe_set_vxlan_id(pdata);
+
+ /* Allow for IPv6/UDP zero-checksum VXLAN packets */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1);
+
+ /* Enable VXLAN tunneling mode */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1);
+
+ netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n");
+}
+
+static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->hw_feat.vxn)
+ return;
+
+ /* Disable tunneling mode */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0);
+
+ /* Clear IPv6/UDP zero-checksum VXLAN packets setting */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0);
+
+ /* Clear the VXLAN port */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0);
+
+ netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
+}
+
+static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+
+ /* From MAC ver 30H the TFCR is per priority, instead of per queue */
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
+ return max_q_count;
+ else
+ return min_t(unsigned int, pdata->tx_q_count, max_q_count);
+}
+
+static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg, reg_val;
+ unsigned int i, q_count;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+ q_count = xgbe_get_fc_queue_count(pdata);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+ struct ieee_ets *ets = pdata->ets;
+ unsigned int reg, reg_val;
+ unsigned int i, q_count;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ unsigned int ehfc = 0;
+
+ if (pdata->rx_rfd[i]) {
+ /* Flow control thresholds are established */
+ if (pfc && ets) {
+ if (xgbe_is_pfc_queue(pdata, i))
+ ehfc = 1;
+ } else {
+ ehfc = 1;
+ }
+ }
+
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "flow control %s for RXq%u\n",
+ ehfc ? "enabled" : "disabled", i);
+ }
+
+ /* Set MAC flow control */
+ q_count = xgbe_get_fc_queue_count(pdata);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ /* Enable transmit flow control */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+ /* Set pause time */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+ return 0;
+}
+
+static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ if (pdata->tx_pause || (pfc && pfc->pfc_en))
+ xgbe_enable_tx_flow_control(pdata);
+ else
+ xgbe_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ if (pdata->rx_pause || (pfc && pfc->pfc_en))
+ xgbe_enable_rx_flow_control(pdata);
+ else
+ xgbe_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ xgbe_config_tx_flow_control(pdata);
+ xgbe_config_rx_flow_control(pdata);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
+ (pfc && pfc->pfc_en) ? 1 : 0);
+}
+
+static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i, ver;
+
+ /* Set the interrupt mode if supported */
+ if (pdata->channel_irq_mode)
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
+ pdata->channel_irq_mode);
+
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+
+ /* Clear all the interrupts which are set */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR,
+ XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
+
+ /* Clear all interrupt enable bits */
+ channel->curr_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ if (ver < 0x21) {
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1);
+ } else {
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
+ }
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
+
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+ * TIE - Transmit Interrupt Enable (unless using
+ * per channel interrupts in edge triggered
+ * mode)
+ */
+ if (!pdata->per_channel_irq || pdata->channel_irq_mode)
+ XGMAC_SET_BITS(channel->curr_ier,
+ DMA_CH_IER, TIE, 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts in edge triggered
+ * mode)
+ */
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
+ if (!pdata->per_channel_irq || pdata->channel_irq_mode)
+ XGMAC_SET_BITS(channel->curr_ier,
+ DMA_CH_IER, RIE, 1);
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
+ }
+}
+
+static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
+{
+ unsigned int mtl_q_isr;
+ unsigned int q_count, i;
+
+ q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+ /* No MTL interrupts to be enabled */
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
+ }
+}
+
+static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_ier = 0;
+
+ /* Enable Timestamp interrupt */
+ XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
+
+ XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
+
+ /* Enable all counter interrupts */
+ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
+
+ /* Enable MDIO single command completion interrupt */
+ XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
+}
+
+static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
+{
+ unsigned int ecc_isr, ecc_ier = 0;
+
+ if (!pdata->vdata->ecc_support)
+ return;
+
+ /* Clear all the interrupts which are set */
+ ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
+ XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
+
+ /* Enable ECC interrupts */
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1);
+
+ XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
+}
+
+static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
+{
+ unsigned int ecc_ier;
+
+ ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
+
+ /* Disable ECC DED interrupts */
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0);
+
+ XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
+}
+
+static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
+ enum xgbe_ecc_sec sec)
+{
+ unsigned int ecc_ier;
+
+ ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
+
+ /* Disable ECC SEC interrupt */
+ switch (sec) {
+ case XGBE_ECC_SEC_TX:
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0);
+ break;
+ case XGBE_ECC_SEC_RX:
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0);
+ break;
+ case XGBE_ECC_SEC_DESC:
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0);
+ break;
+ }
+
+ XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
+}
+
+static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ unsigned int ss;
+
+ switch (speed) {
+ case SPEED_1000:
+ ss = 0x03;
+ break;
+ case SPEED_2500:
+ ss = 0x02;
+ break;
+ case SPEED_10000:
+ ss = 0x00;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ /* Put the VLAN tag in the Rx descriptor */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
+
+ /* Don't check the VLAN type */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
+
+ /* Check only C-TAG (0x8100) packets */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
+
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
+
+ /* Enable VLAN tag stripping */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+ /* Enable VLAN filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
+
+ /* Enable VLAN Hash Table filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
+
+ /* Disable VLAN tag inverse matching */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
+
+ /* Only filter on the lower 12-bits of the VLAN tag */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
+
+ /* In order for the VLAN Hash Table filtering to be effective,
+ * the VLAN tag identifier in the VLAN Tag Register must not
+ * be zero. Set the VLAN tag identifier to "1" to enable the
+ * VLAN Hash Table filtering. This implies that a VLAN tag of
+ * 1 will always pass filtering.
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+ /* Disable VLAN filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
+
+ return 0;
+}
+
+static u32 xgbe_vid_crc32_le(__le16 vid_le)
+{
+ u32 crc = ~0;
+ u32 temp = 0;
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= CRC32_POLY_LE;
+ }
+
+ return crc;
+}
+
+static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
+{
+ u32 crc;
+ u16 vid;
+ __le16 vid_le;
+ u16 vlan_hash_table = 0;
+
+ /* Generate the VLAN Hash Table value */
+ for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+ /* Get the CRC32 value of the VLAN ID */
+ vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
+
+ vlan_hash_table |= (1 << crc);
+ }
+
+ /* Set the VLAN Hash Table filtering register */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
+
+ return 0;
+}
+
+static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
+ return 0;
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+ enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
+
+ /* Hardware will still perform VLAN filtering in promiscuous mode */
+ if (enable) {
+ xgbe_disable_rx_vlan_filtering(pdata);
+ } else {
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xgbe_enable_rx_vlan_filtering(pdata);
+ }
+
+ return 0;
+}
+
+static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
+ return 0;
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+ enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
+
+ return 0;
+}
+
+static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
+ struct netdev_hw_addr *ha, unsigned int *mac_reg)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+ u8 *mac_addr;
+
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+
+ if (ha) {
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "adding mac address %pM at %#x\n",
+ ha->addr, *mac_reg);
+
+ XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+ }
+
+ XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
+ *mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
+ *mac_reg += MAC_MACA_INC;
+}
+
+static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int mac_reg;
+ unsigned int addn_macs;
+
+ mac_reg = MAC_MACA1HR;
+ addn_macs = pdata->hw_feat.addn_mac;
+
+ if (netdev_uc_count(netdev) > addn_macs) {
+ xgbe_set_promiscuous_mode(pdata, 1);
+ } else {
+ netdev_for_each_uc_addr(ha, netdev) {
+ xgbe_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+
+ if (netdev_mc_count(netdev) > addn_macs) {
+ xgbe_set_all_multicast_mode(pdata, 1);
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ xgbe_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+ }
+ }
+
+ /* Clear remaining additional MAC address entries */
+ while (addn_macs--)
+ xgbe_set_mac_reg(pdata, NULL, &mac_reg);
+}
+
+static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int hash_reg;
+ unsigned int hash_table_shift, hash_table_count;
+ u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
+ u32 crc;
+ unsigned int i;
+
+ hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
+ hash_table_count = pdata->hw_feat.hash_table_size / 32;
+ memset(hash_table, 0, sizeof(hash_table));
+
+ /* Build the MAC Hash Table register values */
+ netdev_for_each_uc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ /* Set the MAC Hash Table registers */
+ hash_reg = MAC_HTR0;
+ for (i = 0; i < hash_table_count; i++) {
+ XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
+ hash_reg += MAC_HTR_INC;
+ }
+}
+
+static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
+{
+ if (pdata->hw_feat.hash_table_size)
+ xgbe_set_mac_hash_table(pdata);
+ else
+ xgbe_set_mac_addn_addrs(pdata);
+
+ return 0;
+}
+
+static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+ XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+ return 0;
+}
+
+static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ unsigned int pr_mode, am_mode;
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ xgbe_set_promiscuous_mode(pdata, pr_mode);
+ xgbe_set_all_multicast_mode(pdata, am_mode);
+
+ xgbe_add_mac_addresses(pdata);
+
+ return 0;
+}
+
+static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
+{
+ unsigned int reg;
+
+ if (gpio > 15)
+ return -EINVAL;
+
+ reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
+
+ reg &= ~(1 << (gpio + 16));
+ XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
+
+ return 0;
+}
+
+static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
+{
+ unsigned int reg;
+
+ if (gpio > 15)
+ return -EINVAL;
+
+ reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
+
+ reg |= (1 << (gpio + 16));
+ XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
+
+ return 0;
+}
+
+static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned long flags;
+ unsigned int mmd_address, index, offset;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and reading 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ mmd_data = XPCS16_IOREAD(pdata, offset);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned long flags;
+ unsigned int mmd_address, index, offset;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and writing 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ XPCS16_IOWRITE(pdata, offset, mmd_data);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+}
+
+static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned long flags;
+ unsigned int mmd_address;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+ XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
+ mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int mmd_address;
+ unsigned long flags;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and writing 32 bits of data.
+ */
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+ XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
+ XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+}
+
+static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case XGBE_XPCS_ACCESS_V1:
+ return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
+
+ case XGBE_XPCS_ACCESS_V2:
+ default:
+ return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+ }
+}
+
+static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case XGBE_XPCS_ACCESS_V1:
+ return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
+
+ case XGBE_XPCS_ACCESS_V2:
+ default:
+ return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
+ }
+}
+
+static unsigned int xgbe_create_mdio_sca(int port, int reg)
+{
+ unsigned int mdio_sca, da;
+
+ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
+
+ return mdio_sca;
+}
+
+static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
+{
+ unsigned int mdio_sca, mdio_sccd;
+
+ reinit_completion(&pdata->mdio_complete);
+
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
+ netdev_err(pdata->netdev, "mdio write operation timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ unsigned int mdio_sca, mdio_sccd;
+
+ reinit_completion(&pdata->mdio_complete);
+
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
+ netdev_err(pdata->netdev, "mdio read operation timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
+}
+
+static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
+ enum xgbe_mdio_mode mode)
+{
+ unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
+
+ switch (mode) {
+ case XGBE_MDIO_MODE_CL22:
+ if (port > XGMAC_MAX_C22_PORT)
+ return -EINVAL;
+ reg_val |= (1 << port);
+ break;
+ case XGBE_MDIO_MODE_CL45:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
+
+ return 0;
+}
+
+static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
+{
+ return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
+}
+
+static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+
+ return 0;
+}
+
+static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+
+ /* Reset the Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
+ */
+ rdesc->desc0 = 0;
+ rdesc->desc1 = 0;
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+}
+
+static void xgbe_tx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ int i;
+ int start_index = ring->cur;
+
+ DBGPR("-->tx_desc_init\n");
+
+ /* Initialze all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+
+ /* Initialize Tx descriptor */
+ xgbe_tx_desc_reset(rdata);
+ }
+
+ /* Update the total number of Tx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--tx_desc_init\n");
+}
+
+static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata, unsigned int index)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+ unsigned int rx_usecs = pdata->rx_usecs;
+ unsigned int rx_frames = pdata->rx_frames;
+ unsigned int inte;
+ dma_addr_t hdr_dma, buf_dma;
+
+ if (!rx_usecs && !rx_frames) {
+ /* No coalescing, interrupt for every descriptor */
+ inte = 1;
+ } else {
+ /* Set interrupt based on Rx frame coalescing setting */
+ if (rx_frames && !((index + 1) % rx_frames))
+ inte = 1;
+ else
+ inte = 0;
+ }
+
+ /* Reset the Rx descriptor
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
+ */
+ hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
+ buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
+
+ /* Since the Rx DMA engine is likely running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the descriptor
+ */
+ dma_wmb();
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+}
+
+static void xgbe_rx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int start_index = ring->cur;
+ unsigned int i;
+
+ DBGPR("-->rx_desc_init\n");
+
+ /* Initialize all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+
+ /* Initialize Rx descriptor */
+ xgbe_rx_desc_reset(pdata, rdata, i);
+ }
+
+ /* Update the total number of Rx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Update the Rx Descriptor Tail Pointer */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--rx_desc_init\n");
+}
+
+static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+{
+ unsigned int count = 10000;
+
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev,
+ "timed out updating timestamp addend register\n");
+}
+
+static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+{
+ unsigned int count = 10000;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev, "timed out initializing timestamp\n");
+}
+
+static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+ u64 nsec;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+ return nsec;
+}
+
+static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+ unsigned int tx_snr, tx_ssr;
+ u64 nsec;
+
+ if (pdata->vdata->tx_tstamp_workaround) {
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ } else {
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ }
+
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+ nsec = tx_ssr;
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+ return nsec;
+}
+
+static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc)
+{
+ u64 nsec;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+ !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+ nsec = le32_to_cpu(rdesc->desc1);
+ nsec <<= 32;
+ nsec |= le32_to_cpu(rdesc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ packet->rx_tstamp = nsec;
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RX_TSTAMP, 1);
+ }
+ }
+}
+
+static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
+ unsigned int mac_tscr)
+{
+ /* Set one nano-second accuracy */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+ /* Set fine timestamp update */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+ /* Overwrite earlier timestamps */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+ XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
+
+ /* Exit if timestamping is not enabled */
+ if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+ return 0;
+
+ /* Initialize time registers */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+ xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+ xgbe_set_tstamp_time(pdata, 0, 0);
+
+ /* Initialize the timecounter */
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ return 0;
+}
+
+static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring_data *rdata;
+
+ /* Make sure everything is written before the register write */
+ wmb();
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Start the Tx timer */
+ if (pdata->tx_usecs && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
+
+ ring->tx.xmit_more = 0;
+}
+
+static void xgbe_dev_xmit(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ unsigned int tx_packets, tx_bytes;
+ unsigned int csum, tso, vlan, vxlan;
+ unsigned int tso_context, vlan_context;
+ unsigned int tx_set_ic;
+ int start_index = ring->cur;
+ int cur_index = ring->cur;
+ int i;
+
+ DBGPR("-->xgbe_dev_xmit\n");
+
+ tx_packets = packet->tx_packets;
+ tx_bytes = packet->tx_bytes;
+
+ csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE);
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+ vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VXLAN);
+
+ if (tso && (packet->mss != ring->tx.cur_mss))
+ tso_context = 1;
+ else
+ tso_context = 0;
+
+ if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
+ vlan_context = 1;
+ else
+ vlan_context = 0;
+
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+ * - Tx frame count exceeds the frame count setting
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set exceeds the frame count setting
+ * No interrupt:
+ * - No frame count setting specified (ethtool -C ethX tx-frames 0)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+ ring->coalesce_count += tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+ else if (tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+ else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets)
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
+
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+
+ /* Create a context descriptor if this is a TSO packet */
+ if (tso_context || vlan_context) {
+ if (tso_context) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "TSO context descriptor, mss=%u\n",
+ packet->mss);
+
+ /* Set the MSS size */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
+ MSS, packet->mss);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Indicate this descriptor contains the MSS */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ TCMSSV, 1);
+
+ ring->tx.cur_mss = packet->mss;
+ }
+
+ if (vlan_context) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "VLAN context descriptor, ctag=%u\n",
+ packet->vlan_ctag);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Set the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VT, packet->vlan_ctag);
+
+ /* Indicate this descriptor contains the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+
+ ring->tx.cur_vlan_ctag = packet->vlan_ctag;
+ }
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+ }
+
+ /* Update buffer address (for TSO this is the header) */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* VLAN tag insertion check */
+ if (vlan)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+
+ /* Timestamp enablement check */
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
+ /* Mark it as First Descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
+
+ /* Mark it as a NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Set OWN bit if not the first descriptor */
+ if (cur_index != start_index)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ if (tso) {
+ /* Enable TSO */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
+ packet->tcp_payload_len);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
+ packet->tcp_header_len / 4);
+
+ pdata->ext_stats.tx_tso_packets += tx_packets;
+ } else {
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+
+ /* Set the total length to be transmitted */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
+ packet->length);
+ }
+
+ if (vxlan) {
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP,
+ TX_NORMAL_DESC3_VXLAN_PACKET);
+
+ pdata->ext_stats.tx_vxlan_packets += packet->tx_packets;
+ }
+
+ for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+
+ /* Update buffer address */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* Set OWN bit */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ /* Mark it as NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+ }
+
+ /* Set LAST bit for the last descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
+
+ /* Set IC bit based on Tx coalescing settings */
+ if (tx_set_ic)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+
+ /* Save the Tx info to report back during cleanup */
+ rdata->tx.packets = tx_packets;
+ rdata->tx.bytes = tx_bytes;
+
+ pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets;
+ pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes;
+
+ /* In case the Tx DMA engine is running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the first descriptor
+ */
+ dma_wmb();
+
+ /* Set OWN bit for the first descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
+ rdesc = rdata->rdesc;
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ if (netif_msg_tx_queued(pdata))
+ xgbe_dump_tx_desc(pdata, ring, start_index,
+ packet->rdesc_count, 1);
+
+ /* Make sure ownership is written to the descriptor */
+ smp_wmb();
+
+ ring->cur = cur_index + 1;
+ if (!netdev_xmit_more() ||
+ netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+ channel->queue_index)))
+ xgbe_tx_start_xmit(channel, ring);
+ else
+ ring->tx.xmit_more = 1;
+
+ DBGPR(" %s: descriptors %u to %u written\n",
+ channel->name, start_index & (ring->rdesc_count - 1),
+ (ring->cur - 1) & (ring->rdesc_count - 1));
+
+ DBGPR("<--xgbe_dev_xmit\n");
+}
+
+static int xgbe_dev_read(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int err, etlt, l34t;
+
+ DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
+
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Check for data availability */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
+ return 1;
+
+ /* Make sure descriptor fields are read after reading the OWN bit */
+ dma_rmb();
+
+ if (netif_msg_rx_status(pdata))
+ xgbe_dump_rx_desc(pdata, ring, ring->cur);
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
+ /* Timestamp Context Descriptor */
+ xgbe_get_rx_tstamp(packet, rdesc);
+
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT, 1);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT, 0);
+ return 0;
+ }
+
+ /* Normal Descriptor, be sure Context Descriptor bit is off */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
+
+ /* Indicate if a Context Descriptor is next */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT, 1);
+
+ /* Get the header length */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ FIRST, 1);
+ rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
+ RX_NORMAL_DESC2, HL);
+ if (rdata->rx.hdr_len)
+ pdata->ext_stats.rx_split_header_packets++;
+ } else {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ FIRST, 0);
+ }
+
+ /* Get the RSS hash */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RSS_HASH, 1);
+
+ packet->rss_hash = le32_to_cpu(rdesc->desc1);
+
+ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ case RX_DESC3_L34T_IPV4_UDP:
+ case RX_DESC3_L34T_IPV6_TCP:
+ case RX_DESC3_L34T_IPV6_UDP:
+ packet->rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ packet->rss_hash_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
+ /* Not all the data has been transferred for this packet */
+ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
+ return 0;
+
+ /* This is the last of the data for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ LAST, 1);
+
+ /* Get the packet length */
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+
+ /* Set checksum done indicator as appropriate */
+ if (netdev->features & NETIF_F_RXCSUM) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 1);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ TNPCSUM_DONE, 1);
+ }
+
+ /* Set the tunneled packet indicator */
+ if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ TNP, 1);
+ pdata->ext_stats.rx_vxlan_packets++;
+
+ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_UNKNOWN:
+ case RX_DESC3_L34T_IPV6_UNKNOWN:
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ TNPCSUM_DONE, 0);
+ break;
+ }
+ }
+
+ /* Check for errors (only valid in last descriptor) */
+ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
+ etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
+ netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
+
+ if (!err || !etlt) {
+ /* No error if err is 0 or etlt is 0 */
+ if ((etlt == 0x09) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
+ RX_NORMAL_DESC0,
+ OVT);
+ netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+ packet->vlan_ctag);
+ }
+ } else {
+ unsigned int tnp = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, TNP);
+
+ if ((etlt == 0x05) || (etlt == 0x06)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ TNPCSUM_DONE, 0);
+ pdata->ext_stats.rx_csum_errors++;
+ } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ TNPCSUM_DONE, 0);
+ pdata->ext_stats.rx_vxlan_csum_errors++;
+ } else {
+ XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
+ FRAME, 1);
+ }
+ }
+
+ pdata->ext_stats.rxq_packets[channel->queue_index]++;
+ pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len;
+
+ DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
+ ring->cur & (ring->rdesc_count - 1), ring->cur);
+
+ return 0;
+}
+
+static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
+}
+
+static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share LD bit, so check TDES3.LD bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
+}
+
+static int xgbe_enable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ switch (int_id) {
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI_RI:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ channel->curr_ier |= channel->saved_ier;
+ break;
+ default:
+ return -1;
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
+
+ return 0;
+}
+
+static int xgbe_disable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ switch (int_id) {
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI_RI:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ channel->saved_ier = channel->curr_ier;
+ channel->curr_ier = 0;
+ break;
+ default:
+ return -1;
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
+
+ return 0;
+}
+
+static int __xgbe_exit(struct xgbe_prv_data *pdata)
+{
+ unsigned int count = 2000;
+
+ DBGPR("-->xgbe_exit\n");
+
+ /* Issue a software reset */
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
+ usleep_range(10, 15);
+
+ /* Poll Until Poll Condition */
+ while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+
+ DBGPR("<--xgbe_exit\n");
+
+ return 0;
+}
+
+static int xgbe_exit(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ /* To guard against possible incorrectly generated interrupts,
+ * issue the software reset twice.
+ */
+ ret = __xgbe_exit(pdata);
+ if (ret)
+ return ret;
+
+ return __xgbe_exit(pdata);
+}
+
+static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
+{
+ unsigned int i, count;
+
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+ return 0;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ count = 2000;
+ while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
+ MTL_Q_TQOMR, FTQ))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
+{
+ unsigned int sbmr;
+
+ sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
+
+ /* Set enhanced addressing mode */
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
+
+ /* Set the System Bus mode */
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
+
+ XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
+
+ /* Set descriptor fetching threshold */
+ if (pdata->vdata->tx_desc_prefetch)
+ XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
+ pdata->vdata->tx_desc_prefetch);
+
+ if (pdata->vdata->rx_desc_prefetch)
+ XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
+ pdata->vdata->rx_desc_prefetch);
+}
+
+static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
+ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
+ if (pdata->awarcr)
+ XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
+}
+
+static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Set Tx to weighted round robin scheduling algorithm */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+ }
+
+ /* Set Rx to strict priority algorithm */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
+ unsigned int queue,
+ unsigned int q_fifo_size)
+{
+ unsigned int frame_fifo_size;
+ unsigned int rfa, rfd;
+
+ frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
+
+ if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
+ /* PFC is active for this queue */
+ rfa = pdata->pfc_rfa;
+ rfd = rfa + frame_fifo_size;
+ if (rfd > XGMAC_FLOW_CONTROL_MAX)
+ rfd = XGMAC_FLOW_CONTROL_MAX;
+ if (rfa >= XGMAC_FLOW_CONTROL_MAX)
+ rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
+ } else {
+ /* This path deals with just maximum frame sizes which are
+ * limited to a jumbo frame of 9,000 (plus headers, etc.)
+ * so we can never exceed the maximum allowable RFA/RFD
+ * values.
+ */
+ if (q_fifo_size <= 2048) {
+ /* rx_rfd to zero to signal no flow control */
+ pdata->rx_rfa[queue] = 0;
+ pdata->rx_rfd[queue] = 0;
+ return;
+ }
+
+ if (q_fifo_size <= 4096) {
+ /* Between 2048 and 4096 */
+ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
+ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= frame_fifo_size) {
+ /* Between 4096 and max-frame */
+ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
+ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= (frame_fifo_size * 3)) {
+ /* Between max-frame and 3 max-frames,
+ * trigger if we get just over a frame of data and
+ * resume when we have just under half a frame left.
+ */
+ rfa = q_fifo_size - frame_fifo_size;
+ rfd = rfa + (frame_fifo_size / 2);
+ } else {
+ /* Above 3 max-frames - trigger when just over
+ * 2 frames of space available
+ */
+ rfa = frame_fifo_size * 2;
+ rfa += XGMAC_FLOW_CONTROL_UNIT;
+ rfd = rfa + frame_fifo_size;
+ }
+ }
+
+ pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
+ pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
+}
+
+static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
+ unsigned int *fifo)
+{
+ unsigned int q_fifo_size;
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
+
+ xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
+ }
+}
+
+static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
+ pdata->rx_rfa[i]);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
+ pdata->rx_rfd[i]);
+ }
+}
+
+static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ /* The configured value may not be the actual amount of fifo RAM */
+ return min_t(unsigned int, pdata->tx_max_fifo_size,
+ pdata->hw_feat.tx_fifo_size);
+}
+
+static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ /* The configured value may not be the actual amount of fifo RAM */
+ return min_t(unsigned int, pdata->rx_max_fifo_size,
+ pdata->hw_feat.rx_fifo_size);
+}
+
+static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
+ unsigned int queue_count,
+ unsigned int *fifo)
+{
+ unsigned int q_fifo_size;
+ unsigned int p_fifo;
+ unsigned int i;
+
+ q_fifo_size = fifo_size / queue_count;
+
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result by 1).
+ */
+ p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
+ if (p_fifo)
+ p_fifo--;
+
+ /* Distribute the fifo equally amongst the queues */
+ for (i = 0; i < queue_count; i++)
+ fifo[i] = p_fifo;
+}
+
+static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
+ unsigned int queue_count,
+ unsigned int *fifo)
+{
+ unsigned int i;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
+
+ if (queue_count <= IEEE_8021QAZ_MAX_TCS)
+ return fifo_size;
+
+ /* Rx queues 9 and up are for specialized packets,
+ * such as PTP or DCB control packets, etc. and
+ * don't require a large fifo
+ */
+ for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
+ fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
+ fifo_size -= XGMAC_FIFO_MIN_ALLOC;
+ }
+
+ return fifo_size;
+}
+
+static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
+{
+ unsigned int delay;
+
+ /* If a delay has been provided, use that */
+ if (pdata->pfc->delay)
+ return pdata->pfc->delay / 8;
+
+ /* Allow for two maximum size frames */
+ delay = xgbe_get_max_frame(pdata);
+ delay += XGMAC_ETH_PREAMBLE;
+ delay *= 2;
+
+ /* Allow for PFC frame */
+ delay += XGMAC_PFC_DATA_LEN;
+ delay += ETH_HLEN + ETH_FCS_LEN;
+ delay += XGMAC_ETH_PREAMBLE;
+
+ /* Allow for miscellaneous delays (LPI exit, cable, etc.) */
+ delay += XGMAC_PFC_DELAYS;
+
+ return delay;
+}
+
+static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
+{
+ unsigned int count, prio_queues;
+ unsigned int i;
+
+ if (!pdata->pfc->pfc_en)
+ return 0;
+
+ count = 0;
+ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
+ for (i = 0; i < prio_queues; i++) {
+ if (!xgbe_is_pfc_queue(pdata, i))
+ continue;
+
+ pdata->pfcq[i] = 1;
+ count++;
+ }
+
+ return count;
+}
+
+static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
+ unsigned int fifo_size,
+ unsigned int *fifo)
+{
+ unsigned int q_fifo_size, rem_fifo, addn_fifo;
+ unsigned int prio_queues;
+ unsigned int pfc_count;
+ unsigned int i;
+
+ q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
+ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
+ pfc_count = xgbe_get_pfc_queues(pdata);
+
+ if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
+ /* No traffic classes with PFC enabled or can't do lossless */
+ xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
+ return;
+ }
+
+ /* Calculate how much fifo we have to play with */
+ rem_fifo = fifo_size - (q_fifo_size * prio_queues);
+
+ /* Calculate how much more than base fifo PFC needs, which also
+ * becomes the threshold activation point (RFA)
+ */
+ pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
+ pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
+
+ if (pdata->pfc_rfa > q_fifo_size) {
+ addn_fifo = pdata->pfc_rfa - q_fifo_size;
+ addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
+ } else {
+ addn_fifo = 0;
+ }
+
+ /* Calculate DCB fifo settings:
+ * - distribute remaining fifo between the VLAN priority
+ * queues based on traffic class PFC enablement and overall
+ * priority (0 is lowest priority, so start at highest)
+ */
+ i = prio_queues;
+ while (i > 0) {
+ i--;
+
+ fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
+
+ if (!pdata->pfcq[i] || !addn_fifo)
+ continue;
+
+ if (addn_fifo > rem_fifo) {
+ netdev_warn(pdata->netdev,
+ "RXq%u cannot set needed fifo size\n", i);
+ if (!rem_fifo)
+ continue;
+
+ addn_fifo = rem_fifo;
+ }
+
+ fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
+ rem_fifo -= addn_fifo;
+ }
+
+ if (rem_fifo) {
+ unsigned int inc_fifo = rem_fifo / prio_queues;
+
+ /* Distribute remaining fifo across queues */
+ for (i = 0; i < prio_queues; i++)
+ fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
+ }
+}
+
+static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int fifo[XGBE_MAX_QUEUES];
+ unsigned int i;
+
+ fifo_size = xgbe_get_tx_fifo_size(pdata);
+
+ xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
+
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
+}
+
+static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int fifo[XGBE_MAX_QUEUES];
+ unsigned int prio_queues;
+ unsigned int i;
+
+ /* Clear any DCB related fifo/queue information */
+ memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
+ pdata->pfc_rfa = 0;
+
+ fifo_size = xgbe_get_rx_fifo_size(pdata);
+ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
+
+ /* Assign a minimum fifo to the non-VLAN priority queues */
+ fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
+
+ if (pdata->pfc && pdata->ets)
+ xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
+ else
+ xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
+
+ xgbe_calculate_flow_control_threshold(pdata, fifo);
+ xgbe_config_flow_control_threshold(pdata);
+
+ if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
+ netif_info(pdata, drv, pdata->netdev,
+ "%u Rx hardware queues\n", pdata->rx_q_count);
+ for (i = 0; i < pdata->rx_q_count; i++)
+ netif_info(pdata, drv, pdata->netdev,
+ "RxQ%u, %u byte fifo queue\n", i,
+ ((fifo[i] + 1) * XGMAC_FIFO_UNIT));
+ } else {
+ netif_info(pdata, drv, pdata->netdev,
+ "%u Rx hardware queues, %u byte fifo per queue\n",
+ pdata->rx_q_count,
+ ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
+ }
+}
+
+static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
+{
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int prio_queues;
+ unsigned int ppq, ppq_extra, prio;
+ unsigned int mask;
+ unsigned int i, j, reg, reg_val;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ pdata->q2tc_map[queue++] = i;
+ }
+
+ if (i < qptc_extra) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ pdata->q2tc_map[queue++] = i;
+ }
+ }
+
+ /* Map the 8 VLAN priority values to available MTL Rx queues */
+ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
+ ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+ ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+ reg = MAC_RQC2R;
+ reg_val = 0;
+ for (i = 0, prio = 0; i < prio_queues;) {
+ mask = 0;
+ for (j = 0; j < ppq; j++) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ pdata->prio2q_map[prio++] = i;
+ }
+
+ if (i < ppq_extra) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ pdata->prio2q_map[prio++] = i;
+ }
+
+ reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
+
+ if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
+ continue;
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+ reg += MAC_RQC2_INC;
+ reg_val = 0;
+ }
+
+ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+ reg = MTL_RQDCM0R;
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count;) {
+ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+ if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
+ continue;
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MTL_RQDCM_INC;
+ reg_val = 0;
+ }
+}
+
+static void xgbe_config_tc(struct xgbe_prv_data *pdata)
+{
+ unsigned int offset, queue, prio;
+ u8 i;
+
+ netdev_reset_tc(pdata->netdev);
+ if (!pdata->num_tcs)
+ return;
+
+ netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
+
+ for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
+ while ((queue < pdata->tx_q_count) &&
+ (pdata->q2tc_map[queue] == i))
+ queue++;
+
+ netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
+ i, offset, queue - 1);
+ netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
+ offset = queue;
+ }
+
+ if (!pdata->ets)
+ return;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ netdev_set_prio_tc_map(pdata->netdev, prio,
+ pdata->ets->prio_tc[prio]);
+}
+
+static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
+{
+ struct ieee_ets *ets = pdata->ets;
+ unsigned int total_weight, min_weight, weight;
+ unsigned int mask, reg, reg_val;
+ unsigned int i, prio;
+
+ if (!ets)
+ return;
+
+ /* Set Tx to deficit weighted round robin scheduling algorithm (when
+ * traffic class is using ETS algorithm)
+ */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
+
+ /* Set Traffic Class algorithms */
+ total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
+ min_weight = total_weight / 100;
+ if (!min_weight)
+ min_weight = 1;
+
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ /* Map the priorities to the traffic class */
+ mask = 0;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ if (ets->prio_tc[prio] == i)
+ mask |= (1 << prio);
+ }
+ mask &= 0xff;
+
+ netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
+ i, mask);
+ reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
+ reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ /* Set the traffic class algorithm */
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TC%u using SP\n", i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_SP);
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ weight = total_weight * ets->tc_tx_bw[i] / 100;
+ weight = clamp(weight, min_weight, total_weight);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TC%u using DWRR (weight %u)\n", i, weight);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
+ weight);
+ break;
+ }
+ }
+
+ xgbe_config_tc(pdata);
+}
+
+static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
+{
+ if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
+ /* Just stop the Tx queues while Rx fifo is changed */
+ netif_tx_stop_all_queues(pdata->netdev);
+
+ /* Suspend Rx so that fifo's can be adjusted */
+ pdata->hw_if.disable_rx(pdata);
+ }
+
+ xgbe_config_rx_fifo_size(pdata);
+ xgbe_config_flow_control(pdata);
+
+ if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
+ /* Resume Rx */
+ pdata->hw_if.enable_rx(pdata);
+
+ /* Resume Tx queues */
+ netif_tx_start_all_queues(pdata->netdev);
+ }
+}
+
+static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
+
+ /* Filtering is done using perfect filtering and hash filtering */
+ if (pdata->hw_feat.hash_table_size) {
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
+ }
+}
+
+static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
+{
+ unsigned int val;
+
+ val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_speed(pdata, pdata->phy_speed);
+}
+
+static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_RXCSUM)
+ xgbe_enable_rx_csum(pdata);
+ else
+ xgbe_disable_rx_csum(pdata);
+}
+
+static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
+{
+ /* Indicate that VLAN Tx CTAGs come from context descriptors */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
+
+ /* Set the current VLAN Hash Table register value */
+ xgbe_update_vlan_hash_table(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xgbe_enable_rx_vlan_filtering(pdata);
+ else
+ xgbe_disable_rx_vlan_filtering(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ xgbe_enable_rx_vlan_stripping(pdata);
+ else
+ xgbe_disable_rx_vlan_stripping(pdata);
+}
+
+static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+{
+ bool read_hi;
+ u64 val;
+
+ if (pdata->vdata->mmc_64bit) {
+ switch (reg_lo) {
+ /* These registers are always 32 bit */
+ case MMC_RXRUNTERROR:
+ case MMC_RXJABBERERROR:
+ case MMC_RXUNDERSIZE_G:
+ case MMC_RXOVERSIZE_G:
+ case MMC_RXWATCHDOGERROR:
+ read_hi = false;
+ break;
+
+ default:
+ read_hi = true;
+ }
+ } else {
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ }
+ }
+
+ val = XGMAC_IOREAD(pdata, reg_lo);
+
+ if (read_hi)
+ val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
+
+ return val;
+}
+
+static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
+ stats->txoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
+ stats->txframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
+ stats->txmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
+ stats->tx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
+ stats->tx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
+ stats->tx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
+ stats->tx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
+ stats->tx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
+ stats->tx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
+ stats->txunicastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
+ stats->txmulticastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
+ stats->txunderflowerror +=
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
+ stats->txoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
+ stats->txframecount_g +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
+ stats->txpauseframes +=
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
+ stats->txvlanframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+}
+
+static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
+ stats->rxframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
+ stats->rxoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
+ stats->rxoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
+ stats->rxbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
+ stats->rxmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
+ stats->rxcrcerror +=
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
+ stats->rxrunterror +=
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
+ stats->rxjabbererror +=
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
+ stats->rxundersize_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
+ stats->rxoversize_g +=
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
+ stats->rx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
+ stats->rx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
+ stats->rx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
+ stats->rx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
+ stats->rx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
+ stats->rx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
+ stats->rxunicastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
+ stats->rxlengtherror +=
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
+ stats->rxoutofrangetype +=
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
+ stats->rxpauseframes +=
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
+ stats->rxfifooverflow +=
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
+ stats->rxvlanframes_gb +=
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
+ stats->rxwatchdogerror +=
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+}
+
+static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+
+ /* Freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
+
+ stats->txoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ stats->txframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ stats->txmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ stats->tx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ stats->tx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ stats->tx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ stats->tx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ stats->tx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ stats->tx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ stats->txunicastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ stats->txmulticastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ stats->txunderflowerror +=
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ stats->txoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ stats->txframecount_g +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ stats->txpauseframes +=
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ stats->txvlanframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+
+ stats->rxframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ stats->rxoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ stats->rxoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ stats->rxbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ stats->rxmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ stats->rxcrcerror +=
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ stats->rxrunterror +=
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ stats->rxjabbererror +=
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ stats->rxundersize_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ stats->rxoversize_g +=
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ stats->rx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ stats->rx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ stats->rx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ stats->rx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ stats->rx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ stats->rx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ stats->rxunicastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ stats->rxlengtherror +=
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ stats->rxoutofrangetype +=
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ stats->rxpauseframes +=
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ stats->rxfifooverflow +=
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ stats->rxvlanframes_gb +=
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ stats->rxwatchdogerror +=
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+
+ /* Un-freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
+}
+
+static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
+{
+ /* Set counters to reset on read */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
+
+ /* Reset the counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
+}
+
+static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Tx queue to empty the Tx fifo. Don't
+ * wait forever though...
+ */
+ tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
+ if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
+ (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx queue %u to empty\n",
+ queue);
+}
+
+static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
+ return xgbe_txq_prepare_tx_stop(pdata, queue);
+
+ /* Calculate the status register to read and the position within */
+ if (queue < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = XGMAC_IOREAD(pdata, tx_dsr);
+ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ queue);
+}
+
+static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ MTL_Q_ENABLED);
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Prepare for Tx DMA channel stop */
+ for (i = 0; i < pdata->tx_q_count; i++)
+ xgbe_prepare_tx_stop(pdata, i);
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
+
+ /* Disable each Tx DMA channel */
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
+ }
+}
+
+static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
+ unsigned int queue)
+{
+ unsigned int rx_status;
+ unsigned long rx_timeout;
+
+ /* The Rx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Rx queue to empty the Rx fifo. Don't
+ * wait forever though...
+ */
+ rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, rx_timeout)) {
+ rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
+ if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
+ (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, rx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Rx queue %u to empty\n",
+ queue);
+}
+
+static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg_val, i;
+
+ /* Enable each Rx DMA channel */
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
+ }
+
+ /* Enable each Rx queue */
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count; i++)
+ reg_val |= (0x02 << (i << 1));
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
+
+ /* Enable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
+}
+
+static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Disable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+
+ /* Prepare for Rx DMA channel stop */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ xgbe_prepare_rx_stop(pdata, i);
+
+ /* Disable each Rx queue */
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+
+ /* Disable each Rx DMA channel */
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
+ }
+}
+
+static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Prepare for Tx DMA channel stop */
+ for (i = 0; i < pdata->tx_q_count; i++)
+ xgbe_prepare_tx_stop(pdata, i);
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx DMA channel */
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
+ }
+}
+
+static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Enable each Rx DMA channel */
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
+ }
+}
+
+static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Disable each Rx DMA channel */
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
+ }
+}
+
+static int xgbe_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+ DBGPR("-->xgbe_init\n");
+
+ /* Flush Tx queues */
+ ret = xgbe_flush_tx_queues(pdata);
+ if (ret) {
+ netdev_err(pdata->netdev, "error flushing TX queues\n");
+ return ret;
+ }
+
+ /*
+ * Initialize DMA related features
+ */
+ xgbe_config_dma_bus(pdata);
+ xgbe_config_dma_cache(pdata);
+ xgbe_config_osp_mode(pdata);
+ xgbe_config_pbl_val(pdata);
+ xgbe_config_rx_coalesce(pdata);
+ xgbe_config_tx_coalesce(pdata);
+ xgbe_config_rx_buffer_size(pdata);
+ xgbe_config_tso_mode(pdata);
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
+ desc_if->wrapper_tx_desc_init(pdata);
+ desc_if->wrapper_rx_desc_init(pdata);
+ xgbe_enable_dma_interrupts(pdata);
+
+ /*
+ * Initialize MTL related features
+ */
+ xgbe_config_mtl_mode(pdata);
+ xgbe_config_queue_mapping(pdata);
+ xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+ xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ xgbe_config_tx_fifo_size(pdata);
+ xgbe_config_rx_fifo_size(pdata);
+ /*TODO: Error Packet and undersized good Packet forwarding enable
+ (FEP and FUP)
+ */
+ xgbe_config_dcb_tc(pdata);
+ xgbe_enable_mtl_interrupts(pdata);
+
+ /*
+ * Initialize MAC related features
+ */
+ xgbe_config_mac_address(pdata);
+ xgbe_config_rx_mode(pdata);
+ xgbe_config_jumbo_enable(pdata);
+ xgbe_config_flow_control(pdata);
+ xgbe_config_mac_speed(pdata);
+ xgbe_config_checksum_offload(pdata);
+ xgbe_config_vlan_support(pdata);
+ xgbe_config_mmc(pdata);
+ xgbe_enable_mac_interrupts(pdata);
+
+ /*
+ * Initialize ECC related features
+ */
+ xgbe_enable_ecc_interrupts(pdata);
+
+ DBGPR("<--xgbe_init\n");
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+{
+ DBGPR("-->xgbe_init_function_ptrs\n");
+
+ hw_if->tx_complete = xgbe_tx_complete;
+
+ hw_if->set_mac_address = xgbe_set_mac_address;
+ hw_if->config_rx_mode = xgbe_config_rx_mode;
+
+ hw_if->enable_rx_csum = xgbe_enable_rx_csum;
+ hw_if->disable_rx_csum = xgbe_disable_rx_csum;
+
+ hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
+ hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
+ hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
+ hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
+ hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
+
+ hw_if->read_mmd_regs = xgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = xgbe_write_mmd_regs;
+
+ hw_if->set_speed = xgbe_set_speed;
+
+ hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
+ hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
+ hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
+
+ hw_if->set_gpio = xgbe_set_gpio;
+ hw_if->clr_gpio = xgbe_clr_gpio;
+
+ hw_if->enable_tx = xgbe_enable_tx;
+ hw_if->disable_tx = xgbe_disable_tx;
+ hw_if->enable_rx = xgbe_enable_rx;
+ hw_if->disable_rx = xgbe_disable_rx;
+
+ hw_if->powerup_tx = xgbe_powerup_tx;
+ hw_if->powerdown_tx = xgbe_powerdown_tx;
+ hw_if->powerup_rx = xgbe_powerup_rx;
+ hw_if->powerdown_rx = xgbe_powerdown_rx;
+
+ hw_if->dev_xmit = xgbe_dev_xmit;
+ hw_if->dev_read = xgbe_dev_read;
+ hw_if->enable_int = xgbe_enable_int;
+ hw_if->disable_int = xgbe_disable_int;
+ hw_if->init = xgbe_init;
+ hw_if->exit = xgbe_exit;
+
+ /* Descriptor related Sequences have to be initialized here */
+ hw_if->tx_desc_init = xgbe_tx_desc_init;
+ hw_if->rx_desc_init = xgbe_rx_desc_init;
+ hw_if->tx_desc_reset = xgbe_tx_desc_reset;
+ hw_if->rx_desc_reset = xgbe_rx_desc_reset;
+ hw_if->is_last_desc = xgbe_is_last_desc;
+ hw_if->is_context_desc = xgbe_is_context_desc;
+ hw_if->tx_start_xmit = xgbe_tx_start_xmit;
+
+ /* For FLOW ctrl */
+ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
+ hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
+
+ /* For RX coalescing */
+ hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
+ hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
+ hw_if->usec_to_riwt = xgbe_usec_to_riwt;
+ hw_if->riwt_to_usec = xgbe_riwt_to_usec;
+
+ /* For RX and TX threshold config */
+ hw_if->config_rx_threshold = xgbe_config_rx_threshold;
+ hw_if->config_tx_threshold = xgbe_config_tx_threshold;
+
+ /* For RX and TX Store and Forward Mode config */
+ hw_if->config_rsf_mode = xgbe_config_rsf_mode;
+ hw_if->config_tsf_mode = xgbe_config_tsf_mode;
+
+ /* For TX DMA Operating on Second Frame config */
+ hw_if->config_osp_mode = xgbe_config_osp_mode;
+
+ /* For MMC statistics support */
+ hw_if->tx_mmc_int = xgbe_tx_mmc_int;
+ hw_if->rx_mmc_int = xgbe_rx_mmc_int;
+ hw_if->read_mmc_stats = xgbe_read_mmc_stats;
+
+ /* For PTP config */
+ hw_if->config_tstamp = xgbe_config_tstamp;
+ hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
+ hw_if->set_tstamp_time = xgbe_set_tstamp_time;
+ hw_if->get_tstamp_time = xgbe_get_tstamp_time;
+ hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
+
+ /* For Data Center Bridging config */
+ hw_if->config_tc = xgbe_config_tc;
+ hw_if->config_dcb_tc = xgbe_config_dcb_tc;
+ hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+
+ /* For Receive Side Scaling */
+ hw_if->enable_rss = xgbe_enable_rss;
+ hw_if->disable_rss = xgbe_disable_rss;
+ hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+ hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+
+ /* For ECC */
+ hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
+ hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
+
+ /* For VXLAN */
+ hw_if->enable_vxlan = xgbe_enable_vxlan;
+ hw_if->disable_vxlan = xgbe_disable_vxlan;
+ hw_if->set_vxlan_id = xgbe_set_vxlan_id;
+
+ DBGPR("<--xgbe_init_function_ptrs\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
new file mode 100644
index 000000000..23401958b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -0,0 +1,2822 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <net/vxlan.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static unsigned int ecc_sec_info_threshold = 10;
+static unsigned int ecc_sec_warn_threshold = 10000;
+static unsigned int ecc_sec_period = 600;
+static unsigned int ecc_ded_threshold = 2;
+static unsigned int ecc_ded_period = 600;
+
+#ifdef CONFIG_AMD_XGBE_HAVE_ECC
+/* Only expose the ECC parameters if supported */
+module_param(ecc_sec_info_threshold, uint, 0644);
+MODULE_PARM_DESC(ecc_sec_info_threshold,
+ " ECC corrected error informational threshold setting");
+
+module_param(ecc_sec_warn_threshold, uint, 0644);
+MODULE_PARM_DESC(ecc_sec_warn_threshold,
+ " ECC corrected error warning threshold setting");
+
+module_param(ecc_sec_period, uint, 0644);
+MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
+
+module_param(ecc_ded_threshold, uint, 0644);
+MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
+
+module_param(ecc_ded_period, uint, 0644);
+MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
+#endif
+
+static int xgbe_one_poll(struct napi_struct *, int);
+static int xgbe_all_poll(struct napi_struct *, int);
+static void xgbe_stop(struct xgbe_prv_data *);
+
+static void *xgbe_alloc_node(size_t size, int node)
+{
+ void *mem;
+
+ mem = kzalloc_node(size, GFP_KERNEL, node);
+ if (!mem)
+ mem = kzalloc(size, GFP_KERNEL);
+
+ return mem;
+}
+
+static void xgbe_free_channels(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
+ if (!pdata->channel[i])
+ continue;
+
+ kfree(pdata->channel[i]->rx_ring);
+ kfree(pdata->channel[i]->tx_ring);
+ kfree(pdata->channel[i]);
+
+ pdata->channel[i] = NULL;
+ }
+
+ pdata->channel_count = 0;
+}
+
+static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ unsigned int count, i;
+ unsigned int cpu;
+ int node;
+
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+ for (i = 0; i < count; i++) {
+ /* Attempt to use a CPU on the node the device is on */
+ cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
+
+ /* Set the allocation node based on the returned CPU */
+ node = cpu_to_node(cpu);
+
+ channel = xgbe_alloc_node(sizeof(*channel), node);
+ if (!channel)
+ goto err_mem;
+ pdata->channel[i] = channel;
+
+ snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+ channel->node = node;
+ cpumask_set_cpu(cpu, &channel->affinity_mask);
+
+ if (pdata->per_channel_irq)
+ channel->dma_irq = pdata->channel_irq[i];
+
+ if (i < pdata->tx_ring_count) {
+ ring = xgbe_alloc_node(sizeof(*ring), node);
+ if (!ring)
+ goto err_mem;
+
+ spin_lock_init(&ring->lock);
+ ring->node = node;
+
+ channel->tx_ring = ring;
+ }
+
+ if (i < pdata->rx_ring_count) {
+ ring = xgbe_alloc_node(sizeof(*ring), node);
+ if (!ring)
+ goto err_mem;
+
+ spin_lock_init(&ring->lock);
+ ring->node = node;
+
+ channel->rx_ring = ring;
+ }
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+ channel->name, channel->dma_regs, channel->dma_irq,
+ channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel_count = count;
+
+ return 0;
+
+err_mem:
+ xgbe_free_channels(pdata);
+
+ return -ENOMEM;
+}
+
+static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
+{
+ return (ring->rdesc_count - (ring->cur - ring->dirty));
+}
+
+static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
+{
+ return (ring->cur - ring->dirty);
+}
+
+static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
+ struct xgbe_ring *ring, unsigned int count)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+
+ if (count > xgbe_tx_avail_desc(ring)) {
+ netif_info(pdata, drv, pdata->netdev,
+ "Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(pdata->netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+
+ /* If we haven't notified the hardware because of xmit_more
+ * support, tell it now
+ */
+ if (ring->tx.xmit_more)
+ pdata->hw_if.tx_start_xmit(channel, ring);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return 0;
+}
+
+static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+{
+ unsigned int rx_buf_size;
+
+ rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
+ rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
+ ~(XGBE_RX_BUF_ALIGN - 1);
+
+ return rx_buf_size;
+}
+
+static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
+ struct xgbe_channel *channel)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ enum xgbe_int int_id;
+
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ return;
+
+ hw_if->enable_int(channel, int_id);
+}
+
+static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++)
+ xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
+}
+
+static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
+ struct xgbe_channel *channel)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ enum xgbe_int int_id;
+
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ return;
+
+ hw_if->disable_int(channel, int_id);
+}
+
+static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++)
+ xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
+}
+
+static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
+ unsigned int *count, const char *area)
+{
+ if (time_before(jiffies, *period)) {
+ (*count)++;
+ } else {
+ *period = jiffies + (ecc_sec_period * HZ);
+ *count = 1;
+ }
+
+ if (*count > ecc_sec_info_threshold)
+ dev_warn_once(pdata->dev,
+ "%s ECC corrected errors exceed informational threshold\n",
+ area);
+
+ if (*count > ecc_sec_warn_threshold) {
+ dev_warn_once(pdata->dev,
+ "%s ECC corrected errors exceed warning threshold\n",
+ area);
+ return true;
+ }
+
+ return false;
+}
+
+static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
+ unsigned int *count, const char *area)
+{
+ if (time_before(jiffies, *period)) {
+ (*count)++;
+ } else {
+ *period = jiffies + (ecc_ded_period * HZ);
+ *count = 1;
+ }
+
+ if (*count > ecc_ded_threshold) {
+ netdev_alert(pdata->netdev,
+ "%s ECC detected errors exceed threshold\n",
+ area);
+ return true;
+ }
+
+ return false;
+}
+
+static void xgbe_ecc_isr_task(struct tasklet_struct *t)
+{
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
+ unsigned int ecc_isr;
+ bool stop = false;
+
+ /* Mask status with only the interrupts we care about */
+ ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
+ ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
+ netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
+ stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
+ &pdata->tx_ded_count, "TX fifo");
+ }
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
+ stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
+ &pdata->rx_ded_count, "RX fifo");
+ }
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
+ stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
+ &pdata->desc_ded_count,
+ "descriptor cache");
+ }
+
+ if (stop) {
+ pdata->hw_if.disable_ecc_ded(pdata);
+ schedule_work(&pdata->stopdev_work);
+ goto out;
+ }
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
+ if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
+ &pdata->tx_sec_count, "TX fifo"))
+ pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
+ }
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
+ if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
+ &pdata->rx_sec_count, "RX fifo"))
+ pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
+ if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
+ &pdata->desc_sec_count, "descriptor cache"))
+ pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
+
+out:
+ /* Clear all ECC interrupts */
+ XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
+}
+
+static irqreturn_t xgbe_ecc_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_ecc);
+ else
+ xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_isr_task(struct tasklet_struct *t)
+{
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int dma_isr, dma_ch_isr;
+ unsigned int mac_isr, mac_tssr, mac_mdioisr;
+ unsigned int i;
+
+ /* The DMA interrupt status register also reports MAC and MTL
+ * interrupts. So for polling mode, we just need to check for
+ * this register to be non-zero
+ */
+ dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
+ if (!dma_isr)
+ goto isr_done;
+
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+ channel = pdata->channel[i];
+
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+ i, dma_ch_isr);
+
+ /* The TI or RI interrupt bits may still be set even if using
+ * per channel DMA interrupts. Check to be sure those are not
+ * enabled before using the private data napi structure.
+ */
+ if (!pdata->per_channel_irq &&
+ (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
+ XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(&pdata->napi);
+ }
+ } else {
+ /* Don't clear Rx/Tx status if doing per channel DMA
+ * interrupts, these will be cleared by the ISR for
+ * per channel DMA interrupts.
+ */
+ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
+ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
+ }
+
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
+ pdata->ext_stats.rx_buffer_unavailable++;
+
+ /* Restart the device on a Fatal Bus Error */
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ schedule_work(&pdata->restart_work);
+
+ /* Clear interrupt signals */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+ }
+
+ if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
+ mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
+
+ netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
+ mac_isr);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
+ hw_if->tx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
+ hw_if->rx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
+ mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
+
+ netif_dbg(pdata, intr, pdata->netdev,
+ "MAC_TSSR=%#010x\n", mac_tssr);
+
+ if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
+ /* Read Tx Timestamp to clear interrupt */
+ pdata->tx_tstamp =
+ hw_if->get_tx_tstamp(pdata);
+ queue_work(pdata->dev_workqueue,
+ &pdata->tx_tstamp_work);
+ }
+ }
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
+ mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
+
+ netif_dbg(pdata, intr, pdata->netdev,
+ "MAC_MDIOISR=%#010x\n", mac_mdioisr);
+
+ if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
+ SNGLCOMPINT))
+ complete(&pdata->mdio_complete);
+ }
+ }
+
+isr_done:
+ /* If there is not a separate AN irq, handle it here */
+ if (pdata->dev_irq == pdata->an_irq)
+ pdata->phy_if.an_isr(pdata);
+
+ /* If there is not a separate ECC irq, handle it here */
+ if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
+ xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+
+ /* If there is not a separate I2C irq, handle it here */
+ if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
+ pdata->i2c_if.i2c_isr(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support) {
+ unsigned int reissue_mask;
+
+ reissue_mask = 1 << 0;
+ if (!pdata->per_channel_irq)
+ reissue_mask |= 0xffff << 4;
+
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
+ }
+}
+
+static irqreturn_t xgbe_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_dev);
+ else
+ xgbe_isr_task(&pdata->tasklet_dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xgbe_dma_isr(int irq, void *data)
+{
+ struct xgbe_channel *channel = data;
+ struct xgbe_prv_data *pdata = channel->pdata;
+ unsigned int dma_status;
+
+ /* Per channel DMA interrupts are enabled, so we use the per
+ * channel napi structure and not the private data napi structure
+ */
+ if (napi_schedule_prep(&channel->napi)) {
+ /* Disable Tx and Rx interrupts */
+ if (pdata->channel_irq_mode)
+ xgbe_disable_rx_tx_int(pdata, channel);
+ else
+ disable_irq_nosync(channel->dma_irq);
+
+ /* Turn on polling */
+ __napi_schedule_irqoff(&channel->napi);
+ }
+
+ /* Clear Tx/Rx signals */
+ dma_status = 0;
+ XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
+ XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_tx_timer(struct timer_list *t)
+{
+ struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct napi_struct *napi;
+
+ DBGPR("-->xgbe_tx_timer\n");
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ if (napi_schedule_prep(napi)) {
+ /* Disable Tx and Rx interrupts */
+ if (pdata->per_channel_irq)
+ if (pdata->channel_irq_mode)
+ xgbe_disable_rx_tx_int(pdata, channel);
+ else
+ disable_irq_nosync(channel->dma_irq);
+ else
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(napi);
+ }
+
+ channel->tx_timer_active = 0;
+
+ DBGPR("<--xgbe_tx_timer\n");
+}
+
+static void xgbe_service(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ service_work);
+
+ pdata->phy_if.phy_status(pdata);
+}
+
+static void xgbe_service_timer(struct timer_list *t)
+{
+ struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
+
+ mod_timer(&pdata->service_timer, jiffies + HZ);
+
+ if (!pdata->tx_usecs)
+ return;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ if (!channel->tx_ring || channel->tx_timer_active)
+ break;
+ channel->tx_timer_active = 1;
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
+}
+
+static void xgbe_init_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ timer_setup(&pdata->service_timer, xgbe_service_timer, 0);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ if (!channel->tx_ring)
+ break;
+
+ timer_setup(&channel->tx_timer, xgbe_tx_timer, 0);
+ }
+}
+
+static void xgbe_start_timers(struct xgbe_prv_data *pdata)
+{
+ mod_timer(&pdata->service_timer, jiffies + HZ);
+}
+
+static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ del_timer_sync(&pdata->service_timer);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ if (!channel->tx_ring)
+ break;
+
+ /* Deactivate the Tx timer */
+ del_timer_sync(&channel->tx_timer);
+ channel->tx_timer_active = 0;
+ }
+}
+
+void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
+
+ /* Hardware feature register 0 */
+ hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ ADDMACADRSEL);
+ hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+ hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
+ hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
+ hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
+ hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
+ hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
+ hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ HASHTBLSZ);
+ hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ L3L4FNUM);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
+ hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
+ hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
+ hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
+ hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
+ hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
+ /* Translate the address width setting into actual number */
+ switch (hw_feat->dma_width) {
+ case 0:
+ hw_feat->dma_width = 32;
+ break;
+ case 1:
+ hw_feat->dma_width = 40;
+ break;
+ case 2:
+ hw_feat->dma_width = 48;
+ break;
+ default:
+ hw_feat->dma_width = 32;
+ }
+
+ /* The Queue, Channel and TC counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
+
+ /* Translate the fifo sizes into actual numbers */
+ hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
+ hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
+
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "Hardware features:\n");
+
+ /* Hardware feature register 0 */
+ dev_dbg(pdata->dev, " 1GbE support : %s\n",
+ hw_feat->gmii ? "yes" : "no");
+ dev_dbg(pdata->dev, " VLAN hash filter : %s\n",
+ hw_feat->vlhash ? "yes" : "no");
+ dev_dbg(pdata->dev, " MDIO interface : %s\n",
+ hw_feat->sma ? "yes" : "no");
+ dev_dbg(pdata->dev, " Wake-up packet support : %s\n",
+ hw_feat->rwk ? "yes" : "no");
+ dev_dbg(pdata->dev, " Magic packet support : %s\n",
+ hw_feat->mgk ? "yes" : "no");
+ dev_dbg(pdata->dev, " Management counters : %s\n",
+ hw_feat->mmc ? "yes" : "no");
+ dev_dbg(pdata->dev, " ARP offload : %s\n",
+ hw_feat->aoe ? "yes" : "no");
+ dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n",
+ hw_feat->ts ? "yes" : "no");
+ dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n",
+ hw_feat->eee ? "yes" : "no");
+ dev_dbg(pdata->dev, " TX checksum offload : %s\n",
+ hw_feat->tx_coe ? "yes" : "no");
+ dev_dbg(pdata->dev, " RX checksum offload : %s\n",
+ hw_feat->rx_coe ? "yes" : "no");
+ dev_dbg(pdata->dev, " Additional MAC addresses : %u\n",
+ hw_feat->addn_mac);
+ dev_dbg(pdata->dev, " Timestamp source : %s\n",
+ (hw_feat->ts_src == 1) ? "internal" :
+ (hw_feat->ts_src == 2) ? "external" :
+ (hw_feat->ts_src == 3) ? "internal/external" : "n/a");
+ dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n",
+ hw_feat->sa_vlan_ins ? "yes" : "no");
+ dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n",
+ hw_feat->vxn ? "yes" : "no");
+
+ /* Hardware feature register 1 */
+ dev_dbg(pdata->dev, " RX fifo size : %u\n",
+ hw_feat->rx_fifo_size);
+ dev_dbg(pdata->dev, " TX fifo size : %u\n",
+ hw_feat->tx_fifo_size);
+ dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n",
+ hw_feat->adv_ts_hi ? "yes" : "no");
+ dev_dbg(pdata->dev, " DMA width : %u\n",
+ hw_feat->dma_width);
+ dev_dbg(pdata->dev, " Data Center Bridging : %s\n",
+ hw_feat->dcb ? "yes" : "no");
+ dev_dbg(pdata->dev, " Split header : %s\n",
+ hw_feat->sph ? "yes" : "no");
+ dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n",
+ hw_feat->tso ? "yes" : "no");
+ dev_dbg(pdata->dev, " Debug memory interface : %s\n",
+ hw_feat->dma_debug ? "yes" : "no");
+ dev_dbg(pdata->dev, " Receive Side Scaling : %s\n",
+ hw_feat->rss ? "yes" : "no");
+ dev_dbg(pdata->dev, " Traffic Class count : %u\n",
+ hw_feat->tc_cnt);
+ dev_dbg(pdata->dev, " Hash table size : %u\n",
+ hw_feat->hash_table_size);
+ dev_dbg(pdata->dev, " L3/L4 Filters : %u\n",
+ hw_feat->l3l4_filter_num);
+
+ /* Hardware feature register 2 */
+ dev_dbg(pdata->dev, " RX queue count : %u\n",
+ hw_feat->rx_q_cnt);
+ dev_dbg(pdata->dev, " TX queue count : %u\n",
+ hw_feat->tx_q_cnt);
+ dev_dbg(pdata->dev, " RX DMA channel count : %u\n",
+ hw_feat->rx_ch_cnt);
+ dev_dbg(pdata->dev, " TX DMA channel count : %u\n",
+ hw_feat->rx_ch_cnt);
+ dev_dbg(pdata->dev, " PPS outputs : %u\n",
+ hw_feat->pps_out_num);
+ dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n",
+ hw_feat->aux_snap_num);
+ }
+}
+
+static int xgbe_vxlan_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ pdata->vxlan_port = be16_to_cpu(ti->port);
+ pdata->hw_if.enable_vxlan(pdata);
+
+ return 0;
+}
+
+static int xgbe_vxlan_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ pdata->hw_if.disable_vxlan(pdata);
+ pdata->vxlan_port = 0;
+
+ return 0;
+}
+
+static const struct udp_tunnel_nic_info xgbe_udp_tunnels = {
+ .set_port = xgbe_vxlan_set_port,
+ .unset_port = xgbe_vxlan_unset_port,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+};
+
+const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void)
+{
+ return &xgbe_udp_tunnels;
+}
+
+static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ if (add)
+ netif_napi_add(pdata->netdev, &channel->napi,
+ xgbe_one_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&channel->napi);
+ }
+ } else {
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi,
+ xgbe_all_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&pdata->napi);
+ }
+}
+
+static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ napi_disable(&channel->napi);
+
+ if (del)
+ netif_napi_del(&channel->napi);
+ }
+ } else {
+ napi_disable(&pdata->napi);
+
+ if (del)
+ netif_napi_del(&pdata->napi);
+ }
+}
+
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
+ tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+ netdev_name(netdev), pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ return ret;
+ }
+
+ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
+ ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
+ 0, pdata->ecc_name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting ecc irq %d\n",
+ pdata->ecc_irq);
+ goto err_dev_irq;
+ }
+ }
+
+ if (!pdata->per_channel_irq)
+ return 0;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xgbe_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_dma_irq;
+ }
+
+ irq_set_affinity_hint(channel->dma_irq,
+ &channel->affinity_mask);
+ }
+
+ return 0;
+
+err_dma_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--; i < pdata->channel_count; i--) {
+ channel = pdata->channel[i];
+
+ irq_set_affinity_hint(channel->dma_irq, NULL);
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
+
+ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
+ devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
+
+err_dev_irq:
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ tasklet_kill(&pdata->tasklet_dev);
+ tasklet_kill(&pdata->tasklet_ecc);
+
+ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
+ devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
+
+ if (!pdata->per_channel_irq)
+ return;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+
+ irq_set_affinity_hint(channel->dma_irq, NULL);
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
+}
+
+void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_init_tx_coalesce\n");
+
+ pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
+ pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
+
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_init_tx_coalesce\n");
+}
+
+void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_init_rx_coalesce\n");
+
+ pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
+ pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
+ pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
+
+ hw_if->config_rx_coalesce(pdata);
+
+ DBGPR("<--xgbe_init_rx_coalesce\n");
+}
+
+static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_tx_data\n");
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ ring = pdata->channel[i]->tx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+ desc_if->unmap_rdata(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_tx_data\n");
+}
+
+static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_rx_data\n");
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ ring = pdata->channel[i]->rx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+ desc_if->unmap_rdata(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_rx_data\n");
+}
+
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+
+ return pdata->phy_if.phy_reset(pdata);
+}
+
+int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered down\n");
+ DBGPR("<--xgbe_powerdown\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+
+ hw_if->powerdown_tx(pdata);
+ hw_if->powerdown_rx(pdata);
+
+ xgbe_napi_disable(pdata, 0);
+
+ pdata->power_down = 1;
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+}
+
+int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered up\n");
+ DBGPR("<--xgbe_powerup\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ pdata->power_down = 0;
+
+ xgbe_napi_enable(pdata, 0);
+
+ hw_if->powerup_tx(pdata);
+ hw_if->powerup_rx(pdata);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_attach(netdev);
+
+ netif_tx_start_all_queues(netdev);
+
+ xgbe_start_timers(pdata);
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+}
+
+static void xgbe_free_memory(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+
+ /* Free the ring descriptors and buffers */
+ desc_if->free_ring_resources(pdata);
+
+ /* Free the channel and ring structures */
+ xgbe_free_channels(pdata);
+}
+
+static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
+ if (pdata->new_tx_ring_count) {
+ pdata->tx_ring_count = pdata->new_tx_ring_count;
+ pdata->tx_q_count = pdata->tx_ring_count;
+ pdata->new_tx_ring_count = 0;
+ }
+
+ if (pdata->new_rx_ring_count) {
+ pdata->rx_ring_count = pdata->new_rx_ring_count;
+ pdata->new_rx_ring_count = 0;
+ }
+
+ /* Calculate the Rx buffer size before allocating rings */
+ pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
+
+ /* Allocate the channel and ring structures */
+ ret = xgbe_alloc_channels(pdata);
+ if (ret)
+ return ret;
+
+ /* Allocate the ring descriptors and buffers */
+ ret = desc_if->alloc_ring_resources(pdata);
+ if (ret)
+ goto err_channels;
+
+ /* Initialize the service and Tx timers */
+ xgbe_init_timers(pdata);
+
+ return 0;
+
+err_channels:
+ xgbe_free_memory(pdata);
+
+ return ret;
+}
+
+static int xgbe_start(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ /* Set the number of queues */
+ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+ if (ret) {
+ netdev_err(netdev, "error setting real tx queue count\n");
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
+ if (ret) {
+ netdev_err(netdev, "error setting real rx queue count\n");
+ return ret;
+ }
+
+ /* Set RSS lookup table data for programming */
+ for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->rx_ring_count);
+
+ ret = hw_if->init(pdata);
+ if (ret)
+ return ret;
+
+ xgbe_napi_enable(pdata, 1);
+
+ ret = xgbe_request_irqs(pdata);
+ if (ret)
+ goto err_napi;
+
+ ret = phy_if->phy_start(pdata);
+ if (ret)
+ goto err_irqs;
+
+ hw_if->enable_tx(pdata);
+ hw_if->enable_rx(pdata);
+
+ udp_tunnel_nic_reset_ntf(netdev);
+
+ netif_tx_start_all_queues(netdev);
+
+ xgbe_start_timers(pdata);
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
+
+ clear_bit(XGBE_STOPPED, &pdata->dev_state);
+
+ return 0;
+
+err_irqs:
+ xgbe_free_irqs(pdata);
+
+err_napi:
+ xgbe_napi_disable(pdata, 1);
+
+ hw_if->exit(pdata);
+
+ return ret;
+}
+
+static void xgbe_stop(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
+ struct xgbe_channel *channel;
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
+ unsigned int i;
+
+ DBGPR("-->xgbe_stop\n");
+
+ if (test_bit(XGBE_STOPPED, &pdata->dev_state))
+ return;
+
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(pdata->netdev);
+
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+
+ xgbe_vxlan_unset_port(netdev, 0, 0, NULL);
+
+ hw_if->disable_tx(pdata);
+ hw_if->disable_rx(pdata);
+
+ phy_if->phy_stop(pdata);
+
+ xgbe_free_irqs(pdata);
+
+ xgbe_napi_disable(pdata, 1);
+
+ hw_if->exit(pdata);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ if (!channel->tx_ring)
+ continue;
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ netdev_tx_reset_queue(txq);
+ }
+
+ set_bit(XGBE_STOPPED, &pdata->dev_state);
+
+ DBGPR("<--xgbe_stop\n");
+}
+
+static void xgbe_stopdev(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ stopdev_work);
+
+ rtnl_lock();
+
+ xgbe_stop(pdata);
+
+ xgbe_free_tx_data(pdata);
+ xgbe_free_rx_data(pdata);
+
+ rtnl_unlock();
+
+ netdev_alert(pdata->netdev, "device stopped\n");
+}
+
+void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
+{
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xgbe_stop(pdata);
+
+ xgbe_free_memory(pdata);
+ xgbe_alloc_memory(pdata);
+
+ xgbe_start(pdata);
+}
+
+void xgbe_restart_dev(struct xgbe_prv_data *pdata)
+{
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xgbe_stop(pdata);
+
+ xgbe_free_tx_data(pdata);
+ xgbe_free_rx_data(pdata);
+
+ xgbe_start(pdata);
+}
+
+static void xgbe_restart(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ restart_work);
+
+ rtnl_lock();
+
+ xgbe_restart_dev(pdata);
+
+ rtnl_unlock();
+}
+
+static void xgbe_tx_tstamp(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ tx_tstamp_work);
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 nsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (!pdata->tx_tstamp_skb)
+ goto unlock;
+
+ if (pdata->tx_tstamp) {
+ nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+ pdata->tx_tstamp);
+
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(nsec);
+ skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+ }
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+ pdata->tx_tstamp_skb = NULL;
+
+unlock:
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq)
+{
+ if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
+ sizeof(pdata->tstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq)
+{
+ struct hwtstamp_config config;
+ unsigned int mac_tscr;
+
+ if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags)
+ return -EINVAL;
+
+ mac_tscr = 0;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+
+ case HWTSTAMP_TX_ON:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ pdata->hw_if.config_tstamp(pdata, mac_tscr);
+
+ memcpy(&pdata->tstamp_config, &config, sizeof(config));
+
+ return 0;
+}
+
+static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ unsigned long flags;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (pdata->tx_tstamp_skb) {
+ /* Another timestamp in progress, ignore this one */
+ XGMAC_SET_BITS(packet->attributes,
+ TX_PACKET_ATTRIBUTES, PTP, 0);
+ } else {
+ pdata->tx_tstamp_skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+ skb_tx_timestamp(skb);
+}
+
+static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ if (skb_vlan_tag_present(skb))
+ packet->vlan_ctag = skb_vlan_tag_get(skb);
+}
+
+static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ int ret;
+
+ if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
+ packet->header_len = skb_inner_transport_offset(skb) +
+ inner_tcp_hdrlen(skb);
+ packet->tcp_header_len = inner_tcp_hdrlen(skb);
+ } else {
+ packet->header_len = skb_transport_offset(skb) +
+ tcp_hdrlen(skb);
+ packet->tcp_header_len = tcp_hdrlen(skb);
+ }
+ packet->tcp_payload_len = skb->len - packet->header_len;
+ packet->mss = skb_shinfo(skb)->gso_size;
+
+ DBGPR(" packet->header_len=%u\n", packet->header_len);
+ DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
+ packet->tcp_header_len, packet->tcp_payload_len);
+ DBGPR(" packet->mss=%u\n", packet->mss);
+
+ /* Update the number of packets that will ultimately be transmitted
+ * along with the extra bytes for each extra packet
+ */
+ packet->tx_packets = skb_shinfo(skb)->gso_segs;
+ packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
+
+ return 0;
+}
+
+static bool xgbe_is_vxlan(struct sk_buff *skb)
+{
+ if (!skb->encapsulation)
+ return false;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return false;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (ip_hdr(skb)->protocol != IPPROTO_UDP)
+ return false;
+ break;
+
+ case htons(ETH_P_IPV6):
+ if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB) ||
+ (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr)))
+ return false;
+
+ return true;
+}
+
+static int xgbe_is_tso(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ DBGPR(" TSO packet to be processed\n");
+
+ return 1;
+}
+
+static void xgbe_packet_info(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ skb_frag_t *frag;
+ unsigned int context_desc;
+ unsigned int len;
+ unsigned int i;
+
+ packet->skb = skb;
+
+ context_desc = 0;
+ packet->rdesc_count = 0;
+
+ packet->tx_packets = 1;
+ packet->tx_bytes = skb->len;
+
+ if (xgbe_is_tso(skb)) {
+ /* TSO requires an extra descriptor if mss is different */
+ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ /* TSO requires an extra descriptor for TSO header */
+ packet->rdesc_count++;
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE, 1);
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+
+ if (xgbe_is_vxlan(skb))
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VXLAN, 1);
+
+ if (skb_vlan_tag_present(skb)) {
+ /* VLAN requires an extra descriptor if tag is different */
+ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ /* We can share with the TSO context descriptor */
+ if (!context_desc) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ }
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ PTP, 1);
+
+ for (len = skb_headlen(skb); len;) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ for (len = skb_frag_size(frag); len; ) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
+ }
+ }
+}
+
+static int xgbe_open(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ /* Create the various names based on netdev name */
+ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
+ netdev_name(netdev));
+
+ snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
+ netdev_name(netdev));
+
+ snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
+ netdev_name(netdev));
+
+ /* Create workqueues */
+ pdata->dev_workqueue =
+ create_singlethread_workqueue(netdev_name(netdev));
+ if (!pdata->dev_workqueue) {
+ netdev_err(netdev, "device workqueue creation failed\n");
+ return -ENOMEM;
+ }
+
+ pdata->an_workqueue =
+ create_singlethread_workqueue(pdata->an_name);
+ if (!pdata->an_workqueue) {
+ netdev_err(netdev, "phy workqueue creation failed\n");
+ ret = -ENOMEM;
+ goto err_dev_wq;
+ }
+
+ /* Reset the phy settings */
+ ret = xgbe_phy_reset(pdata);
+ if (ret)
+ goto err_an_wq;
+
+ /* Enable the clocks */
+ ret = clk_prepare_enable(pdata->sysclk);
+ if (ret) {
+ netdev_alert(netdev, "dma clk_prepare_enable failed\n");
+ goto err_an_wq;
+ }
+
+ ret = clk_prepare_enable(pdata->ptpclk);
+ if (ret) {
+ netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
+ goto err_sysclk;
+ }
+
+ INIT_WORK(&pdata->service_work, xgbe_service);
+ INIT_WORK(&pdata->restart_work, xgbe_restart);
+ INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
+ INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+
+ ret = xgbe_alloc_memory(pdata);
+ if (ret)
+ goto err_ptpclk;
+
+ ret = xgbe_start(pdata);
+ if (ret)
+ goto err_mem;
+
+ clear_bit(XGBE_DOWN, &pdata->dev_state);
+
+ return 0;
+
+err_mem:
+ xgbe_free_memory(pdata);
+
+err_ptpclk:
+ clk_disable_unprepare(pdata->ptpclk);
+
+err_sysclk:
+ clk_disable_unprepare(pdata->sysclk);
+
+err_an_wq:
+ destroy_workqueue(pdata->an_workqueue);
+
+err_dev_wq:
+ destroy_workqueue(pdata->dev_workqueue);
+
+ return ret;
+}
+
+static int xgbe_close(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ /* Stop the device */
+ xgbe_stop(pdata);
+
+ xgbe_free_memory(pdata);
+
+ /* Disable the clocks */
+ clk_disable_unprepare(pdata->ptpclk);
+ clk_disable_unprepare(pdata->sysclk);
+
+ flush_workqueue(pdata->an_workqueue);
+ destroy_workqueue(pdata->an_workqueue);
+
+ flush_workqueue(pdata->dev_workqueue);
+ destroy_workqueue(pdata->dev_workqueue);
+
+ set_bit(XGBE_DOWN, &pdata->dev_state);
+
+ return 0;
+}
+
+static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ struct netdev_queue *txq;
+ netdev_tx_t ret;
+
+ DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+
+ channel = pdata->channel[skb->queue_mapping];
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ ring = channel->tx_ring;
+ packet = &ring->packet_data;
+
+ ret = NETDEV_TX_OK;
+
+ if (skb->len == 0) {
+ netif_err(pdata, tx_err, netdev,
+ "empty skb received from stack\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ /* Calculate preliminary packet info */
+ memset(packet, 0, sizeof(*packet));
+ xgbe_packet_info(pdata, ring, skb, packet);
+
+ /* Check that there are enough descriptors available */
+ ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
+ if (ret)
+ goto tx_netdev_return;
+
+ ret = xgbe_prep_tso(skb, packet);
+ if (ret) {
+ netif_err(pdata, tx_err, netdev,
+ "error processing TSO packet\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+ xgbe_prep_vlan(skb, packet);
+
+ if (!desc_if->map_tx_skb(channel, skb)) {
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ xgbe_prep_tx_tstamp(pdata, skb, packet);
+
+ /* Report on the actual number of bytes (to be) sent */
+ netdev_tx_sent_queue(txq, packet->tx_bytes);
+
+ /* Configure required descriptor fields for transmission */
+ hw_if->dev_xmit(channel);
+
+ if (netif_msg_pktdata(pdata))
+ xgbe_print_pkt(netdev, skb, true);
+
+ /* Stop the queue in advance if there may not be enough descriptors */
+ xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
+
+ ret = NETDEV_TX_OK;
+
+tx_netdev_return:
+ return ret;
+}
+
+static void xgbe_set_rx_mode(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_set_rx_mode\n");
+
+ hw_if->config_rx_mode(pdata);
+
+ DBGPR("<--xgbe_set_rx_mode\n");
+}
+
+static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct sockaddr *saddr = addr;
+
+ DBGPR("-->xgbe_set_mac_address\n");
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+
+ hw_if->set_mac_address(pdata, netdev->dev_addr);
+
+ DBGPR("<--xgbe_set_mac_address\n");
+
+ return 0;
+}
+
+static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
+ break;
+
+ case SIOCSHWTSTAMP:
+ ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int xgbe_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_change_mtu\n");
+
+ ret = xgbe_calc_rx_buf_size(netdev, mtu);
+ if (ret < 0)
+ return ret;
+
+ pdata->rx_buf_size = ret;
+ netdev->mtu = mtu;
+
+ xgbe_restart_dev(pdata);
+
+ DBGPR("<--xgbe_change_mtu\n");
+
+ return 0;
+}
+
+static void xgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ netdev_warn(netdev, "tx timeout, device restarting\n");
+ schedule_work(&pdata->restart_work);
+}
+
+static void xgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+
+ s->rx_packets = pstats->rxframecount_gb;
+ s->rx_bytes = pstats->rxoctetcount_gb;
+ s->rx_errors = pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g;
+ s->multicast = pstats->rxmulticastframes_g;
+ s->rx_length_errors = pstats->rxlengtherror;
+ s->rx_crc_errors = pstats->rxcrcerror;
+ s->rx_fifo_errors = pstats->rxfifooverflow;
+
+ s->tx_packets = pstats->txframecount_gb;
+ s->tx_bytes = pstats->txoctetcount_gb;
+ s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
+ s->tx_dropped = netdev->stats.tx_dropped;
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->%s\n", __func__);
+
+ set_bit(vid, pdata->active_vlans);
+ hw_if->update_vlan_hash_table(pdata);
+
+ DBGPR("<--%s\n", __func__);
+
+ return 0;
+}
+
+static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->%s\n", __func__);
+
+ clear_bit(vid, pdata->active_vlans);
+ hw_if->update_vlan_hash_table(pdata);
+
+ DBGPR("<--%s\n", __func__);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xgbe_poll_controller(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_poll_controller\n");
+
+ if (pdata->per_channel_irq) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ xgbe_dma_isr(channel->dma_irq, channel);
+ }
+ } else {
+ disable_irq(pdata->dev_irq);
+ xgbe_isr(pdata->dev_irq, pdata);
+ enable_irq(pdata->dev_irq);
+ }
+
+ DBGPR("<--xgbe_poll_controller\n");
+}
+#endif /* End CONFIG_NET_POLL_CONTROLLER */
+
+static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+ u8 tc;
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ tc = mqprio->num_tc;
+
+ if (tc > pdata->hw_feat.tc_cnt)
+ return -EINVAL;
+
+ pdata->num_tcs = tc;
+ pdata->hw_if.config_tc(pdata);
+
+ return 0;
+}
+
+static netdev_features_t xgbe_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ netdev_features_t vxlan_base;
+
+ vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT;
+
+ if (!pdata->hw_feat.vxn)
+ return features;
+
+ /* VXLAN CSUM requires VXLAN base */
+ if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) &&
+ !(features & NETIF_F_GSO_UDP_TUNNEL)) {
+ netdev_notice(netdev,
+ "forcing tx udp tunnel support\n");
+ features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+
+ /* Can't do one without doing the other */
+ if ((features & vxlan_base) != vxlan_base) {
+ netdev_notice(netdev,
+ "forcing both tx and rx udp tunnel support\n");
+ features |= vxlan_base;
+ }
+
+ if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) {
+ netdev_notice(netdev,
+ "forcing tx udp tunnel checksumming on\n");
+ features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+ } else {
+ if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) {
+ netdev_notice(netdev,
+ "forcing tx udp tunnel checksumming off\n");
+ features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+ }
+
+ return features;
+}
+
+static int xgbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+ int ret = 0;
+
+ rxhash = pdata->netdev_features & NETIF_F_RXHASH;
+ rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+ rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+ rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if ((features & NETIF_F_RXHASH) && !rxhash)
+ ret = hw_if->enable_rss(pdata);
+ else if (!(features & NETIF_F_RXHASH) && rxhash)
+ ret = hw_if->disable_rss(pdata);
+ if (ret)
+ return ret;
+
+ if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ hw_if->enable_rx_csum(pdata);
+ else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ hw_if->disable_rx_csum(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
+ hw_if->enable_rx_vlan_stripping(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
+ hw_if->disable_rx_vlan_stripping(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
+ hw_if->enable_rx_vlan_filtering(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+ hw_if->disable_rx_vlan_filtering(pdata);
+
+ pdata->netdev_features = features;
+
+ DBGPR("<--xgbe_set_features\n");
+
+ return 0;
+}
+
+static netdev_features_t xgbe_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ features = vlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ return features;
+}
+
+static const struct net_device_ops xgbe_netdev_ops = {
+ .ndo_open = xgbe_open,
+ .ndo_stop = xgbe_close,
+ .ndo_start_xmit = xgbe_xmit,
+ .ndo_set_rx_mode = xgbe_set_rx_mode,
+ .ndo_set_mac_address = xgbe_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = xgbe_ioctl,
+ .ndo_change_mtu = xgbe_change_mtu,
+ .ndo_tx_timeout = xgbe_tx_timeout,
+ .ndo_get_stats64 = xgbe_get_stats64,
+ .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xgbe_poll_controller,
+#endif
+ .ndo_setup_tc = xgbe_setup_tc,
+ .ndo_fix_features = xgbe_fix_features,
+ .ndo_set_features = xgbe_set_features,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
+ .ndo_features_check = xgbe_features_check,
+};
+
+const struct net_device_ops *xgbe_get_netdev_ops(void)
+{
+ return &xgbe_netdev_ops;
+}
+
+static void xgbe_rx_refresh(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+
+ while (ring->dirty != ring->cur) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+
+ /* Reset rdata values */
+ desc_if->unmap_rdata(pdata, rdata);
+
+ if (desc_if->map_rx_buffer(pdata, ring, rdata))
+ break;
+
+ hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
+
+ ring->dirty++;
+ }
+
+ /* Make sure everything is written before the register write */
+ wmb();
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+}
+
+static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+ struct napi_struct *napi,
+ struct xgbe_ring_data *rdata,
+ unsigned int len)
+{
+ struct sk_buff *skb;
+ u8 *packet;
+
+ skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
+ if (!skb)
+ return NULL;
+
+ /* Pull in the header buffer which may contain just the header
+ * or the header plus data
+ */
+ dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
+ rdata->rx.hdr.dma_off,
+ rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+
+ packet = page_address(rdata->rx.hdr.pa.pages) +
+ rdata->rx.hdr.pa.pages_offset;
+ skb_copy_to_linear_data(skb, packet, len);
+ skb_put(skb, len);
+
+ return skb;
+}
+
+static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
+ struct xgbe_packet_data *packet)
+{
+ /* Always zero if not the first descriptor */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
+ return 0;
+
+ /* First descriptor with split header, return header length */
+ if (rdata->rx.hdr_len)
+ return rdata->rx.hdr_len;
+
+ /* First descriptor but not the last descriptor and no split header,
+ * so the full buffer was used
+ */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+ return rdata->rx.hdr.dma_len;
+
+ /* First descriptor and last descriptor and no split header, so
+ * calculate how much of the buffer was used
+ */
+ return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
+}
+
+static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
+ struct xgbe_packet_data *packet,
+ unsigned int len)
+{
+ /* Always the full buffer if not the last descriptor */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+ return rdata->rx.buf.dma_len;
+
+ /* Last descriptor so calculate how much of the buffer was used
+ * for the last bit of data
+ */
+ return rdata->rx.len - len;
+}
+
+static int xgbe_tx_poll(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
+ int processed = 0;
+ unsigned int tx_packets = 0, tx_bytes = 0;
+ unsigned int cur;
+
+ DBGPR("-->xgbe_tx_poll\n");
+
+ /* Nothing to do if there isn't a Tx ring for this channel */
+ if (!ring)
+ return 0;
+
+ cur = ring->cur;
+
+ /* Be sure we get ring->cur before accessing descriptor data */
+ smp_rmb();
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
+ while ((processed < XGBE_TX_DESC_MAX_PROC) &&
+ (ring->dirty != cur)) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+ rdesc = rdata->rdesc;
+
+ if (!hw_if->tx_complete(rdesc))
+ break;
+
+ /* Make sure descriptor fields are read after reading the OWN
+ * bit */
+ dma_rmb();
+
+ if (netif_msg_tx_done(pdata))
+ xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
+
+ if (hw_if->is_last_desc(rdesc)) {
+ tx_packets += rdata->tx.packets;
+ tx_bytes += rdata->tx.bytes;
+ }
+
+ /* Free the SKB and reset the descriptor for re-use */
+ desc_if->unmap_rdata(pdata, rdata);
+ hw_if->tx_desc_reset(rdata);
+
+ processed++;
+ ring->dirty++;
+ }
+
+ if (!processed)
+ return 0;
+
+ netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
+ if ((ring->tx.queue_stopped == 1) &&
+ (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
+ ring->tx.queue_stopped = 0;
+ netif_tx_wake_queue(txq);
+ }
+
+ DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
+
+ return processed;
+}
+
+static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct net_device *netdev = pdata->netdev;
+ struct napi_struct *napi;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *hwtstamps;
+ unsigned int last, error, context_next, context;
+ unsigned int len, buf1_len, buf2_len, max_len;
+ unsigned int received = 0;
+ int packet_count = 0;
+
+ DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
+
+ /* Nothing to do if there isn't a Rx ring for this channel */
+ if (!ring)
+ return 0;
+
+ last = 0;
+ context_next = 0;
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ packet = &ring->packet_data;
+ while (packet_count < budget) {
+ DBGPR(" cur = %d\n", ring->cur);
+
+ /* First time in loop see if we need to restore state */
+ if (!received && rdata->state_saved) {
+ skb = rdata->state.skb;
+ error = rdata->state.error;
+ len = rdata->state.len;
+ } else {
+ memset(packet, 0, sizeof(*packet));
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+
+read_again:
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+
+ if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
+ xgbe_rx_refresh(channel);
+
+ if (hw_if->dev_read(channel))
+ break;
+
+ received++;
+ ring->cur++;
+
+ last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ LAST);
+ context_next = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT);
+ context = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ CONTEXT);
+
+ /* Earlier error, just drain the remaining data */
+ if ((!last || context_next) && error)
+ goto read_again;
+
+ if (error || packet->errors) {
+ if (packet->errors)
+ netif_err(pdata, rx_err, netdev,
+ "error in received packet\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ if (!context) {
+ /* Get the data length in the descriptor buffers */
+ buf1_len = xgbe_rx_buf1_len(rdata, packet);
+ len += buf1_len;
+ buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
+ len += buf2_len;
+
+ if (buf2_len > rdata->rx.buf.dma_len) {
+ /* Hardware inconsistency within the descriptors
+ * that has resulted in a length underflow.
+ */
+ error = 1;
+ goto skip_data;
+ }
+
+ if (!skb) {
+ skb = xgbe_create_skb(pdata, napi, rdata,
+ buf1_len);
+ if (!skb) {
+ error = 1;
+ goto skip_data;
+ }
+ }
+
+ if (buf2_len) {
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
+ rdata->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rdata->rx.buf.pa.pages,
+ rdata->rx.buf.pa.pages_offset,
+ buf2_len,
+ rdata->rx.buf.dma_len);
+ rdata->rx.buf.pa.pages = NULL;
+ }
+ }
+
+skip_data:
+ if (!last || context_next)
+ goto read_again;
+
+ if (!skb || error) {
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ /* Be sure we don't exceed the configured MTU */
+ max_len = netdev->mtu + ETH_HLEN;
+ if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (skb->protocol == htons(ETH_P_8021Q)))
+ max_len += VLAN_HLEN;
+
+ if (skb->len > max_len) {
+ netif_err(pdata, rx_err, netdev,
+ "packet length exceeds configured MTU\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ if (netif_msg_pktdata(pdata))
+ xgbe_print_pkt(netdev, skb, false);
+
+ skb_checksum_none_assert(skb);
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, CSUM_DONE))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, TNP)) {
+ skb->encapsulation = 1;
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, TNPCSUM_DONE))
+ skb->csum_level = 1;
+ }
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, VLAN_CTAG))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ packet->vlan_ctag);
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
+ u64 nsec;
+
+ nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+ packet->rx_tstamp);
+ hwtstamps = skb_hwtstamps(skb);
+ hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ }
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, RSS_HASH))
+ skb_set_hash(skb, packet->rss_hash,
+ packet->rss_hash_type);
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, channel->queue_index);
+
+ napi_gro_receive(napi, skb);
+
+next_packet:
+ packet_count++;
+ }
+
+ /* Check if we need to save state before leaving */
+ if (received && (!last || context_next)) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ rdata->state_saved = 1;
+ rdata->state.skb = skb;
+ rdata->state.len = len;
+ rdata->state.error = error;
+ }
+
+ DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
+
+ return packet_count;
+}
+
+static int xgbe_one_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
+ napi);
+ struct xgbe_prv_data *pdata = channel->pdata;
+ int processed = 0;
+
+ DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = xgbe_rx_poll(channel, budget);
+
+ /* If we processed everything, we are done */
+ if ((processed < budget) && napi_complete_done(napi, processed)) {
+ /* Enable Tx and Rx interrupts */
+ if (pdata->channel_irq_mode)
+ xgbe_enable_rx_tx_int(pdata, channel);
+ else
+ enable_irq(channel->dma_irq);
+ }
+
+ DBGPR("<--xgbe_one_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+static int xgbe_all_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
+ napi);
+ struct xgbe_channel *channel;
+ int ring_budget;
+ int processed, last_processed;
+ unsigned int i;
+
+ DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
+
+ processed = 0;
+ ring_budget = budget / pdata->rx_ring_count;
+ do {
+ last_processed = processed;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+
+ /* Cleanup Tx ring first */
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ if (ring_budget > (budget - processed))
+ ring_budget = budget - processed;
+ processed += xgbe_rx_poll(channel, ring_budget);
+ }
+ } while ((processed < budget) && (processed != last_processed));
+
+ /* If we processed everything, we are done */
+ if ((processed < budget) && napi_complete_done(napi, processed)) {
+ /* Enable Tx and Rx interrupts */
+ xgbe_enable_rx_tx_ints(pdata);
+ }
+
+ DBGPR("<--xgbe_all_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
+ unsigned int idx, unsigned int count, unsigned int flag)
+{
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+
+ while (count--) {
+ rdata = XGBE_GET_DESC_DATA(ring, idx);
+ rdesc = rdata->rdesc;
+ netdev_dbg(pdata->netdev,
+ "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0),
+ le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2),
+ le32_to_cpu(rdesc->desc3));
+ idx++;
+ }
+}
+
+void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
+ unsigned int idx)
+{
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+
+ rdata = XGBE_GET_DESC_DATA(ring, idx);
+ rdesc = rdata->rdesc;
+ netdev_dbg(pdata->netdev,
+ "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
+ idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+}
+
+void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ unsigned char buffer[128];
+ unsigned int i;
+
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
+
+ netdev_dbg(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
+
+ netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+
+ for (i = 0; i < skb->len; i += 32) {
+ unsigned int len = min(skb->len - i, 32U);
+
+ hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+ buffer, sizeof(buffer), false);
+ netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
+ }
+
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
new file mode 100644
index 000000000..64aebf922
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -0,0 +1,853 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/phy.h>
+#include <linux/net_tstamp.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+struct xgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_size;
+ int stat_offset;
+};
+
+#define XGMAC_MMC_STAT(_string, _var) \
+ { _string, \
+ sizeof_field(struct xgbe_mmc_stats, _var), \
+ offsetof(struct xgbe_prv_data, mmc_stats._var), \
+ }
+
+#define XGMAC_EXT_STAT(_string, _var) \
+ { _string, \
+ sizeof_field(struct xgbe_ext_stats, _var), \
+ offsetof(struct xgbe_prv_data, ext_stats._var), \
+ }
+
+static const struct xgbe_stats xgbe_gstring_stats[] = {
+ XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
+ XGMAC_MMC_STAT("tx_packets", txframecount_gb),
+ XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
+ XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
+ XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
+ XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+ XGMAC_EXT_STAT("tx_vxlan_packets", tx_vxlan_packets),
+ XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
+ XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
+ XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
+ XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
+
+ XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
+ XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
+ XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
+ XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
+ XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
+ XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
+ XGMAC_EXT_STAT("rx_vxlan_packets", rx_vxlan_packets),
+ XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
+ XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
+ XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
+ XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
+ XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
+ XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
+ XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
+ XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
+ XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
+ XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
+ XGMAC_EXT_STAT("rx_csum_errors", rx_csum_errors),
+ XGMAC_EXT_STAT("rx_vxlan_csum_errors", rx_vxlan_csum_errors),
+ XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+ XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
+ XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
+};
+
+#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
+
+static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ memcpy(data, xgbe_gstring_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < pdata->tx_ring_count; i++) {
+ sprintf(data, "txq_%u_packets", i);
+ data += ETH_GSTRING_LEN;
+ sprintf(data, "txq_%u_bytes", i);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < pdata->rx_ring_count; i++) {
+ sprintf(data, "rxq_%u_packets", i);
+ data += ETH_GSTRING_LEN;
+ sprintf(data, "rxq_%u_bytes", i);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void xgbe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ u8 *stat;
+ int i;
+
+ pdata->hw_if.read_mmc_stats(pdata);
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
+ *data++ = *(u64 *)stat;
+ }
+ for (i = 0; i < pdata->tx_ring_count; i++) {
+ *data++ = pdata->ext_stats.txq_packets[i];
+ *data++ = pdata->ext_stats.txq_bytes[i];
+ }
+ for (i = 0; i < pdata->rx_ring_count; i++) {
+ *data++ = pdata->ext_stats.rxq_packets[i];
+ *data++ = pdata->ext_stats.rxq_bytes[i];
+ }
+}
+
+static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = XGBE_STATS_COUNT +
+ (pdata->tx_ring_count * 2) +
+ (pdata->rx_ring_count * 2);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static void xgbe_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ pause->autoneg = pdata->phy.pause_autoneg;
+ pause->tx_pause = pdata->phy.tx_pause;
+ pause->rx_pause = pdata->phy.rx_pause;
+}
+
+static int xgbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ int ret = 0;
+
+ if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
+ netdev_err(netdev,
+ "autoneg disabled, pause autoneg not available\n");
+ return -EINVAL;
+ }
+
+ pdata->phy.pause_autoneg = pause->autoneg;
+ pdata->phy.tx_pause = pause->tx_pause;
+ pdata->phy.rx_pause = pause->rx_pause;
+
+ XGBE_CLR_ADV(lks, Pause);
+ XGBE_CLR_ADV(lks, Asym_Pause);
+
+ if (pause->rx_pause) {
+ XGBE_SET_ADV(lks, Pause);
+ XGBE_SET_ADV(lks, Asym_Pause);
+ }
+
+ if (pause->tx_pause) {
+ /* Equivalent to XOR of Asym_Pause */
+ if (XGBE_ADV(lks, Asym_Pause))
+ XGBE_CLR_ADV(lks, Asym_Pause);
+ else
+ XGBE_SET_ADV(lks, Asym_Pause);
+ }
+
+ if (netif_running(netdev))
+ ret = pdata->phy_if.phy_config_aneg(pdata);
+
+ return ret;
+}
+
+static int xgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+
+ cmd->base.phy_address = pdata->phy.address;
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = pdata->phy.speed;
+ cmd->base.duplex = pdata->phy.duplex;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ cmd->base.autoneg = pdata->phy.autoneg;
+ cmd->base.port = PORT_NONE;
+
+ XGBE_LM_COPY(cmd, supported, lks, supported);
+ XGBE_LM_COPY(cmd, advertising, lks, advertising);
+ XGBE_LM_COPY(cmd, lp_advertising, lks, lp_advertising);
+
+ return 0;
+}
+
+static int xgbe_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ u32 speed;
+ int ret;
+
+ speed = cmd->base.speed;
+
+ if (cmd->base.phy_address != pdata->phy.address) {
+ netdev_err(netdev, "invalid phy address %hhu\n",
+ cmd->base.phy_address);
+ return -EINVAL;
+ }
+
+ if ((cmd->base.autoneg != AUTONEG_ENABLE) &&
+ (cmd->base.autoneg != AUTONEG_DISABLE)) {
+ netdev_err(netdev, "unsupported autoneg %hhu\n",
+ cmd->base.autoneg);
+ return -EINVAL;
+ }
+
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ if (!pdata->phy_if.phy_valid_speed(pdata, speed)) {
+ netdev_err(netdev, "unsupported speed %u\n", speed);
+ return -EINVAL;
+ }
+
+ if (cmd->base.duplex != DUPLEX_FULL) {
+ netdev_err(netdev, "unsupported duplex %hhu\n",
+ cmd->base.duplex);
+ return -EINVAL;
+ }
+ }
+
+ netif_dbg(pdata, link, netdev,
+ "requested advertisement 0x%*pb, phy supported 0x%*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported);
+
+ bitmap_and(advertising,
+ cmd->link_modes.advertising, lks->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ if ((cmd->base.autoneg == AUTONEG_ENABLE) &&
+ bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ netdev_err(netdev,
+ "unsupported requested advertisement\n");
+ return -EINVAL;
+ }
+
+ ret = 0;
+ pdata->phy.autoneg = cmd->base.autoneg;
+ pdata->phy.speed = speed;
+ pdata->phy.duplex = cmd->base.duplex;
+ bitmap_copy(lks->link_modes.advertising, advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE)
+ XGBE_SET_ADV(lks, Autoneg);
+ else
+ XGBE_CLR_ADV(lks, Autoneg);
+
+ if (netif_running(netdev))
+ ret = pdata->phy_if.phy_config_aneg(pdata);
+
+ return ret;
+}
+
+static void xgbe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
+}
+
+static u32 xgbe_get_msglevel(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return pdata->msg_enable;
+}
+
+static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ pdata->msg_enable = msglevel;
+}
+
+static int xgbe_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+
+ ec->rx_coalesce_usecs = pdata->rx_usecs;
+ ec->rx_max_coalesced_frames = pdata->rx_frames;
+
+ ec->tx_max_coalesced_frames = pdata->tx_frames;
+
+ return 0;
+}
+
+static int xgbe_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int rx_frames, rx_riwt, rx_usecs;
+ unsigned int tx_frames;
+
+ rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
+ rx_usecs = ec->rx_coalesce_usecs;
+ rx_frames = ec->rx_max_coalesced_frames;
+
+ /* Use smallest possible value if conversion resulted in zero */
+ if (rx_usecs && !rx_riwt)
+ rx_riwt = 1;
+
+ /* Check the bounds of values for Rx */
+ if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
+ netdev_err(netdev, "rx-usec is limited to %d usecs\n",
+ hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
+ return -EINVAL;
+ }
+ if (rx_frames > pdata->rx_desc_count) {
+ netdev_err(netdev, "rx-frames is limited to %d frames\n",
+ pdata->rx_desc_count);
+ return -EINVAL;
+ }
+
+ tx_frames = ec->tx_max_coalesced_frames;
+
+ /* Check the bounds of values for Tx */
+ if (tx_frames > pdata->tx_desc_count) {
+ netdev_err(netdev, "tx-frames is limited to %d frames\n",
+ pdata->tx_desc_count);
+ return -EINVAL;
+ }
+
+ pdata->rx_riwt = rx_riwt;
+ pdata->rx_usecs = rx_usecs;
+ pdata->rx_frames = rx_frames;
+ hw_if->config_rx_coalesce(pdata);
+
+ pdata->tx_frames = tx_frames;
+ hw_if->config_tx_coalesce(pdata);
+
+ return 0;
+}
+
+static int xgbe_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = pdata->rx_ring_count;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return sizeof(pdata->rss_key);
+}
+
+static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return ARRAY_SIZE(pdata->rss_table);
+}
+
+static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int i;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
+ MAC_RSSDR, DMCH);
+ }
+
+ if (key)
+ memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int ret;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ netdev_err(netdev, "unsupported hash function\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (indir) {
+ ret = hw_if->set_rss_lookup_table(pdata, indir);
+ if (ret)
+ return ret;
+ }
+
+ if (key) {
+ ret = hw_if->set_rss_hash_key(pdata, key);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *ts_info)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (pdata->ptp_clock)
+ ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
+ else
+ ts_info->phc_index = -1;
+
+ ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static int xgbe_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return pdata->phy_if.module_info(pdata, modinfo);
+}
+
+static int xgbe_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return pdata->phy_if.module_eeprom(pdata, eeprom, data);
+}
+
+static void xgbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX;
+ ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX;
+ ringparam->rx_pending = pdata->rx_desc_count;
+ ringparam->tx_pending = pdata->tx_desc_count;
+}
+
+static int xgbe_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int rx, tx;
+
+ if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) {
+ netdev_err(netdev, "unsupported ring parameter\n");
+ return -EINVAL;
+ }
+
+ if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
+ (ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
+ netdev_err(netdev,
+ "rx ring parameter must be between %u and %u\n",
+ XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX);
+ return -EINVAL;
+ }
+
+ if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
+ (ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
+ netdev_err(netdev,
+ "tx ring parameter must be between %u and %u\n",
+ XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX);
+ return -EINVAL;
+ }
+
+ rx = __rounddown_pow_of_two(ringparam->rx_pending);
+ if (rx != ringparam->rx_pending)
+ netdev_notice(netdev,
+ "rx ring parameter rounded to power of two: %u\n",
+ rx);
+
+ tx = __rounddown_pow_of_two(ringparam->tx_pending);
+ if (tx != ringparam->tx_pending)
+ netdev_notice(netdev,
+ "tx ring parameter rounded to power of two: %u\n",
+ tx);
+
+ if ((rx == pdata->rx_desc_count) &&
+ (tx == pdata->tx_desc_count))
+ goto out;
+
+ pdata->rx_desc_count = rx;
+ pdata->tx_desc_count = tx;
+
+ xgbe_restart_dev(pdata);
+
+out:
+ return 0;
+}
+
+static void xgbe_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int rx, tx, combined;
+
+ /* Calculate maximums allowed:
+ * - Take into account the number of available IRQs
+ * - Do not take into account the number of online CPUs so that
+ * the user can over-subscribe if desired
+ * - Tx is additionally limited by the number of hardware queues
+ */
+ rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
+ rx = min(rx, pdata->channel_irq_count);
+ tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
+ tx = min(tx, pdata->channel_irq_count);
+ tx = min(tx, pdata->tx_max_q_count);
+
+ combined = min(rx, tx);
+
+ channels->max_combined = combined;
+ channels->max_rx = rx ? rx - 1 : 0;
+ channels->max_tx = tx ? tx - 1 : 0;
+
+ /* Get current settings based on device state */
+ rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
+ tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
+
+ combined = min(rx, tx);
+ rx -= combined;
+ tx -= combined;
+
+ channels->combined_count = combined;
+ channels->rx_count = rx;
+ channels->tx_count = tx;
+}
+
+static void xgbe_print_set_channels_input(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n",
+ channels->combined_count, channels->rx_count,
+ channels->tx_count);
+}
+
+static int xgbe_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int rx, rx_curr, tx, tx_curr, combined;
+
+ /* Calculate maximums allowed:
+ * - Take into account the number of available IRQs
+ * - Do not take into account the number of online CPUs so that
+ * the user can over-subscribe if desired
+ * - Tx is additionally limited by the number of hardware queues
+ */
+ rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
+ rx = min(rx, pdata->channel_irq_count);
+ tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
+ tx = min(tx, pdata->tx_max_q_count);
+ tx = min(tx, pdata->channel_irq_count);
+
+ combined = min(rx, tx);
+
+ /* Should not be setting other count */
+ if (channels->other_count) {
+ netdev_err(netdev,
+ "other channel count must be zero\n");
+ return -EINVAL;
+ }
+
+ /* Require at least one Combined (Rx and Tx) channel */
+ if (!channels->combined_count) {
+ netdev_err(netdev,
+ "at least one combined Rx/Tx channel is required\n");
+ xgbe_print_set_channels_input(netdev, channels);
+ return -EINVAL;
+ }
+
+ /* Check combined channels */
+ if (channels->combined_count > combined) {
+ netdev_err(netdev,
+ "combined channel count cannot exceed %u\n",
+ combined);
+ xgbe_print_set_channels_input(netdev, channels);
+ return -EINVAL;
+ }
+
+ /* Can have some Rx-only or Tx-only channels, but not both */
+ if (channels->rx_count && channels->tx_count) {
+ netdev_err(netdev,
+ "cannot specify both Rx-only and Tx-only channels\n");
+ xgbe_print_set_channels_input(netdev, channels);
+ return -EINVAL;
+ }
+
+ /* Check that we don't exceed the maximum number of channels */
+ if ((channels->combined_count + channels->rx_count) > rx) {
+ netdev_err(netdev,
+ "total Rx channels (%u) requested exceeds maximum available (%u)\n",
+ channels->combined_count + channels->rx_count, rx);
+ xgbe_print_set_channels_input(netdev, channels);
+ return -EINVAL;
+ }
+
+ if ((channels->combined_count + channels->tx_count) > tx) {
+ netdev_err(netdev,
+ "total Tx channels (%u) requested exceeds maximum available (%u)\n",
+ channels->combined_count + channels->tx_count, tx);
+ xgbe_print_set_channels_input(netdev, channels);
+ return -EINVAL;
+ }
+
+ rx = channels->combined_count + channels->rx_count;
+ tx = channels->combined_count + channels->tx_count;
+
+ rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
+ tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
+
+ if ((rx == rx_curr) && (tx == tx_curr))
+ goto out;
+
+ pdata->new_rx_ring_count = rx;
+ pdata->new_tx_ring_count = tx;
+
+ xgbe_full_restart_dev(pdata);
+
+out:
+ return 0;
+}
+
+static const struct ethtool_ops xgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_drvinfo = xgbe_get_drvinfo,
+ .get_msglevel = xgbe_get_msglevel,
+ .set_msglevel = xgbe_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = xgbe_get_coalesce,
+ .set_coalesce = xgbe_set_coalesce,
+ .get_pauseparam = xgbe_get_pauseparam,
+ .set_pauseparam = xgbe_set_pauseparam,
+ .get_strings = xgbe_get_strings,
+ .get_ethtool_stats = xgbe_get_ethtool_stats,
+ .get_sset_count = xgbe_get_sset_count,
+ .get_rxnfc = xgbe_get_rxnfc,
+ .get_rxfh_key_size = xgbe_get_rxfh_key_size,
+ .get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
+ .get_rxfh = xgbe_get_rxfh,
+ .set_rxfh = xgbe_set_rxfh,
+ .get_ts_info = xgbe_get_ts_info,
+ .get_link_ksettings = xgbe_get_link_ksettings,
+ .set_link_ksettings = xgbe_set_link_ksettings,
+ .get_module_info = xgbe_get_module_info,
+ .get_module_eeprom = xgbe_get_module_eeprom,
+ .get_ringparam = xgbe_get_ringparam,
+ .set_ringparam = xgbe_set_ringparam,
+ .get_channels = xgbe_get_channels,
+ .set_channels = xgbe_set_channels,
+};
+
+const struct ethtool_ops *xgbe_get_ethtool_ops(void)
+{
+ return &xgbe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
new file mode 100644
index 000000000..a9ccc4258
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -0,0 +1,514 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kmod.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define XGBE_ABORT_COUNT 500
+#define XGBE_DISABLE_COUNT 1000
+
+#define XGBE_STD_SPEED 1
+
+#define XGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX)
+#define XGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX)
+#define XGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX)
+#define XGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX)
+#define XGBE_DEFAULT_INT_MASK (XGBE_INTR_RX_FULL | \
+ XGBE_INTR_TX_EMPTY | \
+ XGBE_INTR_TX_ABRT | \
+ XGBE_INTR_STOP_DET)
+
+#define XGBE_I2C_READ BIT(8)
+#define XGBE_I2C_STOP BIT(9)
+
+static int xgbe_i2c_abort(struct xgbe_prv_data *pdata)
+{
+ unsigned int wait = XGBE_ABORT_COUNT;
+
+ /* Must be enabled to recognize the abort request */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1);
+
+ /* Issue the abort */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1);
+
+ while (wait--) {
+ if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT))
+ return 0;
+
+ usleep_range(500, 600);
+ }
+
+ return -EBUSY;
+}
+
+static int xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable)
+{
+ unsigned int wait = XGBE_DISABLE_COUNT;
+ unsigned int mode = enable ? 1 : 0;
+
+ while (wait--) {
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode);
+ if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode)
+ return 0;
+
+ usleep_range(100, 110);
+ }
+
+ return -EBUSY;
+}
+
+static int xgbe_i2c_disable(struct xgbe_prv_data *pdata)
+{
+ unsigned int ret;
+
+ ret = xgbe_i2c_set_enable(pdata, false);
+ if (ret) {
+ /* Disable failed, try an abort */
+ ret = xgbe_i2c_abort(pdata);
+ if (ret)
+ return ret;
+
+ /* Abort succeeded, try to disable again */
+ ret = xgbe_i2c_set_enable(pdata, false);
+ }
+
+ return ret;
+}
+
+static int xgbe_i2c_enable(struct xgbe_prv_data *pdata)
+{
+ return xgbe_i2c_set_enable(pdata, true);
+}
+
+static void xgbe_i2c_clear_all_interrupts(struct xgbe_prv_data *pdata)
+{
+ XI2C_IOREAD(pdata, IC_CLR_INTR);
+}
+
+static void xgbe_i2c_disable_interrupts(struct xgbe_prv_data *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, 0);
+}
+
+static void xgbe_i2c_enable_interrupts(struct xgbe_prv_data *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, XGBE_DEFAULT_INT_MASK);
+}
+
+static void xgbe_i2c_write(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int tx_slots;
+ unsigned int cmd;
+
+ /* Configured to never receive Rx overflows, so fill up Tx fifo */
+ tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR);
+ while (tx_slots && state->tx_len) {
+ if (state->op->cmd == XGBE_I2C_CMD_READ)
+ cmd = XGBE_I2C_READ;
+ else
+ cmd = *state->tx_buf++;
+
+ if (state->tx_len == 1)
+ XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1);
+
+ XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd);
+
+ tx_slots--;
+ state->tx_len--;
+ }
+
+ /* No more Tx operations, so ignore TX_EMPTY and return */
+ if (!state->tx_len)
+ XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0);
+}
+
+static void xgbe_i2c_read(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int rx_slots;
+
+ /* Anything to be read? */
+ if (state->op->cmd != XGBE_I2C_CMD_READ)
+ return;
+
+ rx_slots = XI2C_IOREAD(pdata, IC_RXFLR);
+ while (rx_slots && state->rx_len) {
+ *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD);
+ state->rx_len--;
+ rx_slots--;
+ }
+}
+
+static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
+ unsigned int isr)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+
+ if (isr & XGBE_INTR_TX_ABRT) {
+ state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE);
+ XI2C_IOREAD(pdata, IC_CLR_TX_ABRT);
+ }
+
+ if (isr & XGBE_INTR_STOP_DET)
+ XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
+}
+
+static void xgbe_i2c_isr_task(struct tasklet_struct *t)
+{
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c);
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int isr;
+
+ isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
+ if (!isr)
+ goto reissue_check;
+
+ netif_dbg(pdata, intr, pdata->netdev,
+ "I2C interrupt received: status=%#010x\n", isr);
+
+ xgbe_i2c_clear_isr_interrupts(pdata, isr);
+
+ if (isr & XGBE_INTR_TX_ABRT) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "I2C TX_ABRT received (%#010x) for target %#04x\n",
+ state->tx_abort_source, state->op->target);
+
+ xgbe_i2c_disable_interrupts(pdata);
+
+ state->ret = -EIO;
+ goto out;
+ }
+
+ /* Check for data in the Rx fifo */
+ xgbe_i2c_read(pdata);
+
+ /* Fill up the Tx fifo next */
+ xgbe_i2c_write(pdata);
+
+out:
+ /* Complete on an error or STOP condition */
+ if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
+ complete(&pdata->i2c_complete);
+
+reissue_check:
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 2);
+}
+
+static irqreturn_t xgbe_i2c_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_i2c);
+ else
+ xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_i2c_set_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_CON);
+ XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1);
+ XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1);
+ XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1);
+ XI2C_SET_BITS(reg, IC_CON, SPEED, XGBE_STD_SPEED);
+ XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1);
+ XI2C_IOWRITE(pdata, IC_CON, reg);
+}
+
+static void xgbe_i2c_get_features(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_i2c *i2c = &pdata->i2c;
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1);
+ i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ MAX_SPEED_MODE);
+ i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ RX_BUFFER_DEPTH);
+ i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ TX_BUFFER_DEPTH);
+
+ if (netif_msg_probe(pdata))
+ dev_dbg(pdata->dev, "I2C features: %s=%u, %s=%u, %s=%u\n",
+ "MAX_SPEED_MODE", i2c->max_speed_mode,
+ "RX_BUFFER_DEPTH", i2c->rx_fifo_size,
+ "TX_BUFFER_DEPTH", i2c->tx_fifo_size);
+}
+
+static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
+{
+ XI2C_IOWRITE(pdata, IC_TAR, addr);
+}
+
+static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
+{
+ xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+
+ return IRQ_HANDLED;
+}
+
+static int xgbe_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *op)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ int ret;
+
+ mutex_lock(&pdata->i2c_mutex);
+
+ reinit_completion(&pdata->i2c_complete);
+
+ ret = xgbe_i2c_disable(pdata);
+ if (ret) {
+ netdev_err(pdata->netdev, "failed to disable i2c master\n");
+ goto unlock;
+ }
+
+ xgbe_i2c_set_target(pdata, op->target);
+
+ memset(state, 0, sizeof(*state));
+ state->op = op;
+ state->tx_len = op->len;
+ state->tx_buf = op->buf;
+ state->rx_len = op->len;
+ state->rx_buf = op->buf;
+
+ xgbe_i2c_clear_all_interrupts(pdata);
+ ret = xgbe_i2c_enable(pdata);
+ if (ret) {
+ netdev_err(pdata->netdev, "failed to enable i2c master\n");
+ goto unlock;
+ }
+
+ /* Enabling the interrupts will cause the TX FIFO empty interrupt to
+ * fire and begin to process the command via the ISR.
+ */
+ xgbe_i2c_enable_interrupts(pdata);
+
+ if (!wait_for_completion_timeout(&pdata->i2c_complete, HZ)) {
+ netdev_err(pdata->netdev, "i2c operation timed out\n");
+ ret = -ETIMEDOUT;
+ goto disable;
+ }
+
+ ret = state->ret;
+ if (ret) {
+ if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK)
+ ret = -ENOTCONN;
+ else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST)
+ ret = -EAGAIN;
+ }
+
+disable:
+ xgbe_i2c_disable_interrupts(pdata);
+ xgbe_i2c_disable(pdata);
+
+unlock:
+ mutex_unlock(&pdata->i2c_mutex);
+
+ return ret;
+}
+
+static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->i2c.started)
+ return;
+
+ netif_dbg(pdata, link, pdata->netdev, "stopping I2C\n");
+
+ pdata->i2c.started = 0;
+
+ xgbe_i2c_disable_interrupts(pdata);
+ xgbe_i2c_disable(pdata);
+ xgbe_i2c_clear_all_interrupts(pdata);
+
+ if (pdata->dev_irq != pdata->i2c_irq) {
+ devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
+ tasklet_kill(&pdata->tasklet_i2c);
+ }
+}
+
+static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (pdata->i2c.started)
+ return 0;
+
+ netif_dbg(pdata, link, pdata->netdev, "starting I2C\n");
+
+ /* If we have a separate I2C irq, enable it */
+ if (pdata->dev_irq != pdata->i2c_irq) {
+ tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task);
+
+ ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
+ xgbe_i2c_isr, 0, pdata->i2c_name,
+ pdata);
+ if (ret) {
+ netdev_err(pdata->netdev, "i2c irq request failed\n");
+ return ret;
+ }
+ }
+
+ pdata->i2c.started = 1;
+
+ return 0;
+}
+
+static int xgbe_i2c_init(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ xgbe_i2c_disable_interrupts(pdata);
+
+ ret = xgbe_i2c_disable(pdata);
+ if (ret) {
+ dev_err(pdata->dev, "failed to disable i2c master\n");
+ return ret;
+ }
+
+ xgbe_i2c_get_features(pdata);
+
+ xgbe_i2c_set_mode(pdata);
+
+ xgbe_i2c_clear_all_interrupts(pdata);
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *i2c_if)
+{
+ i2c_if->i2c_init = xgbe_i2c_init;
+
+ i2c_if->i2c_start = xgbe_i2c_start;
+ i2c_if->i2c_stop = xgbe_i2c_stop;
+
+ i2c_if->i2c_xfer = xgbe_i2c_xfer;
+
+ i2c_if->i2c_isr = xgbe_i2c_combined_isr;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
new file mode 100644
index 000000000..a218dc6f2
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -0,0 +1,490 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/notifier.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION(XGBE_DRV_DESC);
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP);
+
+static void xgbe_default_config(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_default_config\n");
+
+ pdata->blen = DMA_SBMR_BLEN_64;
+ pdata->pbl = DMA_PBL_128;
+ pdata->aal = 1;
+ pdata->rd_osr_limit = 8;
+ pdata->wr_osr_limit = 8;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->power_down = 0;
+
+ DBGPR("<--xgbe_default_config\n");
+}
+
+static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
+{
+ xgbe_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_init_function_ptrs_phy(&pdata->phy_if);
+ xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
+ xgbe_init_function_ptrs_desc(&pdata->desc_if);
+
+ pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
+}
+
+struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
+{
+ struct xgbe_prv_data *pdata;
+ struct net_device *netdev;
+
+ netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
+ XGBE_MAX_DMA_CHANNELS);
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev_mq failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ SET_NETDEV_DEV(netdev, dev);
+ pdata = netdev_priv(netdev);
+ pdata->netdev = netdev;
+ pdata->dev = dev;
+
+ spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->xpcs_lock);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+ mutex_init(&pdata->i2c_mutex);
+ init_completion(&pdata->i2c_complete);
+ init_completion(&pdata->mdio_complete);
+
+ pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ set_bit(XGBE_DOWN, &pdata->dev_state);
+ set_bit(XGBE_STOPPED, &pdata->dev_state);
+
+ return pdata;
+}
+
+void xgbe_free_pdata(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+
+ free_netdev(netdev);
+}
+
+void xgbe_set_counts(struct xgbe_prv_data *pdata)
+{
+ /* Set all the function pointers */
+ xgbe_init_all_fptrs(pdata);
+
+ /* Populate the hardware features */
+ xgbe_get_all_hw_features(pdata);
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_channel_count)
+ pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
+ if (!pdata->rx_max_channel_count)
+ pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
+
+ if (!pdata->tx_max_q_count)
+ pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
+ if (!pdata->rx_max_q_count)
+ pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
+
+ /* Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+ * enabled
+ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+ * number of Rx queues or maximum allowed
+ */
+ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.tx_ch_cnt);
+ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+ pdata->tx_max_channel_count);
+ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+ pdata->tx_max_q_count);
+
+ pdata->tx_q_count = pdata->tx_ring_count;
+
+ pdata->rx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.rx_ch_cnt);
+ pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
+ pdata->rx_max_channel_count);
+
+ pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
+ pdata->rx_max_q_count);
+
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
+ pdata->tx_ring_count, pdata->rx_ring_count);
+ dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
+ pdata->tx_q_count, pdata->rx_q_count);
+ }
+}
+
+int xgbe_config_netdev(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct device *dev = pdata->dev;
+ int ret;
+
+ netdev->irq = pdata->dev_irq;
+ netdev->base_addr = (unsigned long)pdata->xgmac_regs;
+ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+
+ /* Initialize ECC timestamps */
+ pdata->tx_sec_period = jiffies;
+ pdata->tx_ded_period = jiffies;
+ pdata->rx_sec_period = jiffies;
+ pdata->rx_ded_period = jiffies;
+ pdata->desc_sec_period = jiffies;
+ pdata->desc_ded_period = jiffies;
+
+ /* Issue software reset to device */
+ ret = pdata->hw_if.exit(pdata);
+ if (ret) {
+ dev_err(dev, "software reset failed\n");
+ return ret;
+ }
+
+ /* Set default configuration data */
+ xgbe_default_config(pdata);
+
+ /* Set the DMA mask */
+ ret = dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(pdata->hw_feat.dma_width));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed\n");
+ return ret;
+ }
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_fifo_size)
+ pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
+ if (!pdata->rx_max_fifo_size)
+ pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
+
+ /* Set and validate the number of descriptors for a ring */
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
+ pdata->tx_desc_count = XGBE_TX_DESC_CNT;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
+ pdata->rx_desc_count = XGBE_RX_DESC_CNT;
+
+ /* Adjust the number of queues based on interrupts assigned */
+ if (pdata->channel_irq_count) {
+ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+ pdata->channel_irq_count);
+ pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
+ pdata->channel_irq_count);
+
+ if (netif_msg_probe(pdata))
+ dev_dbg(pdata->dev,
+ "adjusted TX/RX DMA channel count = %u/%u\n",
+ pdata->tx_ring_count, pdata->rx_ring_count);
+ }
+
+ /* Initialize RSS hash key */
+ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
+
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+
+ /* Call MDIO/PHY initialization routine */
+ pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
+ ret = pdata->phy_if.phy_init(pdata);
+ if (ret)
+ return ret;
+
+ /* Set device operations */
+ netdev->netdev_ops = xgbe_get_netdev_ops();
+ netdev->ethtool_ops = xgbe_get_ethtool_ops();
+#ifdef CONFIG_AMD_XGBE_DCB
+ netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
+#endif
+
+ /* Set device features */
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (pdata->hw_feat.rss)
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ if (pdata->hw_feat.vxn) {
+ netdev->hw_enc_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->udp_tunnel_nic_info = xgbe_get_udp_tunnel_info();
+ }
+
+ netdev->vlan_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+
+ netdev->features |= netdev->hw_features;
+ pdata->netdev_features = netdev->features;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->min_mtu = 0;
+ netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
+
+ /* Use default watchdog timeout */
+ netdev->watchdog_timeo = 0;
+
+ xgbe_init_rx_coalesce(pdata);
+ xgbe_init_tx_coalesce(pdata);
+
+ netif_carrier_off(netdev);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dev, "net device registration failed\n");
+ return ret;
+ }
+
+ if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
+ xgbe_ptp_register(pdata);
+
+ xgbe_debugfs_init(pdata);
+
+ netif_dbg(pdata, drv, pdata->netdev, "%u Tx software queues\n",
+ pdata->tx_ring_count);
+ netif_dbg(pdata, drv, pdata->netdev, "%u Rx software queues\n",
+ pdata->rx_ring_count);
+
+ return 0;
+}
+
+void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+
+ xgbe_debugfs_exit(pdata);
+
+ if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
+ xgbe_ptp_unregister(pdata);
+
+ unregister_netdev(netdev);
+
+ pdata->phy_if.phy_exit(pdata);
+}
+
+static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(data);
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ if (netdev->netdev_ops != xgbe_get_netdev_ops())
+ goto out;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ xgbe_debugfs_rename(pdata);
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block xgbe_netdev_notifier = {
+ .notifier_call = xgbe_netdev_event,
+};
+
+static int __init xgbe_mod_init(void)
+{
+ int ret;
+
+ ret = register_netdevice_notifier(&xgbe_netdev_notifier);
+ if (ret)
+ return ret;
+
+ ret = xgbe_platform_init();
+ if (ret)
+ goto err_platform_init;
+
+ ret = xgbe_pci_init();
+ if (ret)
+ goto err_pci_init;
+
+ return 0;
+
+err_pci_init:
+ xgbe_platform_exit();
+err_platform_init:
+ unregister_netdevice_notifier(&xgbe_netdev_notifier);
+ return ret;
+}
+
+static void __exit xgbe_mod_exit(void)
+{
+ xgbe_pci_exit();
+
+ xgbe_platform_exit();
+
+ unregister_netdevice_notifier(&xgbe_netdev_notifier);
+}
+
+module_init(xgbe_mod_init);
+module_exit(xgbe_mod_exit);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
new file mode 100644
index 000000000..60be836b2
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -0,0 +1,1677 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ if (!pdata->phy_if.phy_impl.module_eeprom)
+ return -ENXIO;
+
+ return pdata->phy_if.phy_impl.module_eeprom(pdata, eeprom, data);
+}
+
+static int xgbe_phy_module_info(struct xgbe_prv_data *pdata,
+ struct ethtool_modinfo *modinfo)
+{
+ if (!pdata->phy_if.phy_impl.module_info)
+ return -ENXIO;
+
+ return pdata->phy_if.phy_impl.module_info(pdata, modinfo);
+}
+
+static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
+ reg &= ~XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
+}
+
+static void xgbe_an37_disable_interrupts(struct xgbe_prv_data *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg &= ~XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
+ reg &= ~XGBE_PCS_CL37_BP;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
+}
+
+static void xgbe_an37_enable_interrupts(struct xgbe_prv_data *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
+ reg |= XGBE_PCS_CL37_BP;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg |= XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+}
+
+static void xgbe_an73_clear_interrupts(struct xgbe_prv_data *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+}
+
+static void xgbe_an73_disable_interrupts(struct xgbe_prv_data *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+}
+
+static void xgbe_an73_enable_interrupts(struct xgbe_prv_data *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_CL73_INT_MASK);
+}
+
+static void xgbe_an_enable_interrupts(struct xgbe_prv_data *pdata)
+{
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_enable_interrupts(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_enable_interrupts(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata)
+{
+ xgbe_an73_clear_interrupts(pdata);
+ xgbe_an37_clear_interrupts(pdata);
+}
+
+static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KR);
+}
+
+static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 2.5G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_2500);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_2500);
+}
+
+static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_1000);
+}
+
+static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
+{
+ /* If a KR re-driver is present, change to KR mode instead */
+ if (pdata->kr_redrv)
+ return xgbe_kr_mode(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SFI);
+}
+
+static void xgbe_x_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_X);
+}
+
+static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000);
+}
+
+static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_100);
+}
+
+static enum xgbe_mode xgbe_cur_mode(struct xgbe_prv_data *pdata)
+{
+ return pdata->phy_if.phy_impl.cur_mode(pdata);
+}
+
+static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
+{
+ return (xgbe_cur_mode(pdata) == XGBE_MODE_KR);
+}
+
+static void xgbe_change_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ xgbe_kx_1000_mode(pdata);
+ break;
+ case XGBE_MODE_KX_2500:
+ xgbe_kx_2500_mode(pdata);
+ break;
+ case XGBE_MODE_KR:
+ xgbe_kr_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_100:
+ xgbe_sgmii_100_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_1000:
+ xgbe_sgmii_1000_mode(pdata);
+ break;
+ case XGBE_MODE_X:
+ xgbe_x_mode(pdata);
+ break;
+ case XGBE_MODE_SFI:
+ xgbe_sfi_mode(pdata);
+ break;
+ case XGBE_MODE_UNKNOWN:
+ break;
+ default:
+ netif_dbg(pdata, link, pdata->netdev,
+ "invalid operation mode requested (%u)\n", mode);
+ }
+}
+
+static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
+{
+ xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
+}
+
+static bool xgbe_set_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ if (mode == xgbe_cur_mode(pdata))
+ return false;
+
+ xgbe_change_mode(pdata, mode);
+
+ return true;
+}
+
+static bool xgbe_use_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ return pdata->phy_if.phy_impl.use_mode(pdata, mode);
+}
+
+static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1);
+ reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (enable)
+ reg |= MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (restart)
+ reg |= MDIO_VEND2_CTRL1_AN_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+}
+
+static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
+{
+ xgbe_an37_enable_interrupts(pdata);
+ xgbe_an37_set(pdata, true, true);
+
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN enabled/restarted\n");
+}
+
+static void xgbe_an37_disable(struct xgbe_prv_data *pdata)
+{
+ xgbe_an37_set(pdata, false, false);
+ xgbe_an37_disable_interrupts(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN disabled\n");
+}
+
+static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ /* Disable KR training for now */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ reg &= ~XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+
+ /* Update AN settings */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+
+ if (enable)
+ reg |= MDIO_AN_CTRL1_ENABLE;
+
+ if (restart)
+ reg |= MDIO_AN_CTRL1_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+static void xgbe_an73_restart(struct xgbe_prv_data *pdata)
+{
+ xgbe_an73_enable_interrupts(pdata);
+ xgbe_an73_set(pdata, true, true);
+
+ netif_dbg(pdata, link, pdata->netdev, "CL73 AN enabled/restarted\n");
+}
+
+static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
+{
+ xgbe_an73_set(pdata, false, false);
+ xgbe_an73_disable_interrupts(pdata);
+
+ pdata->an_start = 0;
+
+ netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
+}
+
+static void xgbe_an_restart(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_pre)
+ pdata->phy_if.phy_impl.an_pre(pdata);
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_restart(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_restart(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void xgbe_an_disable(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_disable(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_disable(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void xgbe_an_disable_all(struct xgbe_prv_data *pdata)
+{
+ xgbe_an73_disable(pdata);
+ xgbe_an37_disable(pdata);
+}
+
+static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg, reg;
+
+ *state = XGBE_RX_COMPLETE;
+
+ /* If we're not in KR mode then we're done */
+ if (!xgbe_in_kr_mode(pdata))
+ return XGBE_AN_PAGE_RECEIVED;
+
+ /* Enable/Disable FEC */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
+ reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ reg |= pdata->fec_ability;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+ /* Start KR training */
+ if (pdata->phy_if.phy_impl.kr_training_pre)
+ pdata->phy_if.phy_impl.kr_training_pre(pdata);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ reg |= XGBE_KR_TRAINING_ENABLE;
+ reg |= XGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+ pdata->kr_start_time = jiffies;
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "KR training initiated\n");
+
+ if (pdata->phy_if.phy_impl.kr_training_post)
+ pdata->phy_if.phy_impl.kr_training_post(pdata);
+
+ return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an73_tx_xnp(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ u16 msg;
+
+ *state = XGBE_RX_XNP;
+
+ msg = XGBE_XNP_MCF_NULL_MESSAGE;
+ msg |= XGBE_XNP_MP_FORMATTED;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an73_rx_bpa(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int link_support;
+ unsigned int reg, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = xgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
+ if (!(reg & link_support))
+ return XGBE_AN_INCOMPAT_LINK;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+
+ return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & XGBE_XNP_NP_EXCHANGE))
+ ? xgbe_an73_tx_xnp(pdata, state)
+ : xgbe_an73_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an73_rx_xnp(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
+
+ return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & XGBE_XNP_NP_EXCHANGE))
+ ? xgbe_an73_tx_xnp(pdata, state)
+ : xgbe_an73_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an73_page_received(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_rx *state;
+ unsigned long an_timeout;
+ enum xgbe_an ret;
+
+ if (!pdata->an_start) {
+ pdata->an_start = jiffies;
+ } else {
+ an_timeout = pdata->an_start +
+ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+ if (time_after(jiffies, an_timeout)) {
+ /* Auto-negotiation timed out, reset state */
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+
+ pdata->an_start = jiffies;
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "CL73 AN timed out, resetting state\n");
+ }
+ }
+
+ state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state
+ : &pdata->kx_state;
+
+ switch (*state) {
+ case XGBE_RX_BPA:
+ ret = xgbe_an73_rx_bpa(pdata, state);
+ break;
+
+ case XGBE_RX_XNP:
+ ret = xgbe_an73_rx_xnp(pdata, state);
+ break;
+
+ default:
+ ret = XGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+
+ /* Be sure we aren't looping trying to negotiate */
+ if (xgbe_in_kr_mode(pdata)) {
+ pdata->kr_state = XGBE_RX_ERROR;
+
+ if (!XGBE_ADV(lks, 1000baseKX_Full) &&
+ !XGBE_ADV(lks, 2500baseX_Full))
+ return XGBE_AN_NO_LINK;
+
+ if (pdata->kx_state != XGBE_RX_BPA)
+ return XGBE_AN_NO_LINK;
+ } else {
+ pdata->kx_state = XGBE_RX_ERROR;
+
+ if (!XGBE_ADV(lks, 10000baseKR_Full))
+ return XGBE_AN_NO_LINK;
+
+ if (pdata->kr_state != XGBE_RX_BPA)
+ return XGBE_AN_NO_LINK;
+ }
+
+ xgbe_an_disable(pdata);
+
+ xgbe_switch_mode(pdata);
+
+ pdata->an_result = XGBE_AN_READY;
+
+ xgbe_an_restart(pdata);
+
+ return XGBE_AN_INCOMPAT_LINK;
+}
+
+static void xgbe_an37_isr(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Disable AN interrupts */
+ xgbe_an37_disable_interrupts(pdata);
+
+ /* Save the interrupt(s) that fired */
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
+ pdata->an_int = reg & XGBE_AN_CL37_INT_MASK;
+ pdata->an_status = reg & ~XGBE_AN_CL37_INT_MASK;
+
+ if (pdata->an_int) {
+ /* Clear the interrupt(s) that fired and process them */
+ reg &= ~XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
+
+ queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+ } else {
+ /* Enable AN interrupts */
+ xgbe_an37_enable_interrupts(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
+ }
+}
+
+static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
+{
+ /* Disable AN interrupts */
+ xgbe_an73_disable_interrupts(pdata);
+
+ /* Save the interrupt(s) that fired */
+ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+
+ if (pdata->an_int) {
+ /* Clear the interrupt(s) that fired and process them */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
+
+ queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+ } else {
+ /* Enable AN interrupts */
+ xgbe_an73_enable_interrupts(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
+ }
+}
+
+static void xgbe_an_isr_task(struct tasklet_struct *t)
+{
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an);
+
+ netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_isr(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_isr(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static irqreturn_t xgbe_an_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_an);
+ else
+ xgbe_an_isr_task(&pdata->tasklet_an);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
+{
+ xgbe_an_isr_task(&pdata->tasklet_an);
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_an_irq_work(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ an_irq_work);
+
+ /* Avoid a race between enabling the IRQ and exiting the work by
+ * waiting for the work to finish and then queueing it
+ */
+ flush_work(&pdata->an_work);
+ queue_work(pdata->an_workqueue, &pdata->an_work);
+}
+
+static const char *xgbe_state_as_string(enum xgbe_an state)
+{
+ switch (state) {
+ case XGBE_AN_READY:
+ return "Ready";
+ case XGBE_AN_PAGE_RECEIVED:
+ return "Page-Received";
+ case XGBE_AN_INCOMPAT_LINK:
+ return "Incompatible-Link";
+ case XGBE_AN_COMPLETE:
+ return "Complete";
+ case XGBE_AN_NO_LINK:
+ return "No-Link";
+ case XGBE_AN_ERROR:
+ return "Error";
+ default:
+ return "Undefined";
+ }
+}
+
+static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_an cur_state = pdata->an_state;
+
+ if (!pdata->an_int)
+ return;
+
+ if (pdata->an_int & XGBE_AN_CL37_INT_CMPLT) {
+ pdata->an_state = XGBE_AN_COMPLETE;
+ pdata->an_int &= ~XGBE_AN_CL37_INT_CMPLT;
+
+ /* If SGMII is enabled, check the link status */
+ if ((pdata->an_mode == XGBE_AN_MODE_CL37_SGMII) &&
+ !(pdata->an_status & XGBE_SGMII_AN_LINK_STATUS))
+ pdata->an_state = XGBE_AN_NO_LINK;
+ }
+
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN %s\n",
+ xgbe_state_as_string(pdata->an_state));
+
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case XGBE_AN_READY:
+ break;
+
+ case XGBE_AN_COMPLETE:
+ netif_dbg(pdata, link, pdata->netdev,
+ "Auto negotiation successful\n");
+ break;
+
+ case XGBE_AN_NO_LINK:
+ break;
+
+ default:
+ pdata->an_state = XGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == XGBE_AN_ERROR) {
+ netdev_err(pdata->netdev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+
+ pdata->an_int = 0;
+ xgbe_an37_clear_interrupts(pdata);
+ }
+
+ if (pdata->an_state >= XGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = XGBE_AN_READY;
+
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
+ xgbe_state_as_string(pdata->an_result));
+ }
+
+ xgbe_an37_enable_interrupts(pdata);
+}
+
+static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_an cur_state = pdata->an_state;
+
+ if (!pdata->an_int)
+ return;
+
+next_int:
+ if (pdata->an_int & XGBE_AN_CL73_PG_RCV) {
+ pdata->an_state = XGBE_AN_PAGE_RECEIVED;
+ pdata->an_int &= ~XGBE_AN_CL73_PG_RCV;
+ } else if (pdata->an_int & XGBE_AN_CL73_INC_LINK) {
+ pdata->an_state = XGBE_AN_INCOMPAT_LINK;
+ pdata->an_int &= ~XGBE_AN_CL73_INC_LINK;
+ } else if (pdata->an_int & XGBE_AN_CL73_INT_CMPLT) {
+ pdata->an_state = XGBE_AN_COMPLETE;
+ pdata->an_int &= ~XGBE_AN_CL73_INT_CMPLT;
+ } else {
+ pdata->an_state = XGBE_AN_ERROR;
+ }
+
+again:
+ netif_dbg(pdata, link, pdata->netdev, "CL73 AN %s\n",
+ xgbe_state_as_string(pdata->an_state));
+
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case XGBE_AN_READY:
+ pdata->an_supported = 0;
+ break;
+
+ case XGBE_AN_PAGE_RECEIVED:
+ pdata->an_state = xgbe_an73_page_received(pdata);
+ pdata->an_supported++;
+ break;
+
+ case XGBE_AN_INCOMPAT_LINK:
+ pdata->an_supported = 0;
+ pdata->parallel_detect = 0;
+ pdata->an_state = xgbe_an73_incompat_link(pdata);
+ break;
+
+ case XGBE_AN_COMPLETE:
+ pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+ netif_dbg(pdata, link, pdata->netdev, "%s successful\n",
+ pdata->an_supported ? "Auto negotiation"
+ : "Parallel detection");
+ break;
+
+ case XGBE_AN_NO_LINK:
+ break;
+
+ default:
+ pdata->an_state = XGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == XGBE_AN_NO_LINK) {
+ pdata->an_int = 0;
+ xgbe_an73_clear_interrupts(pdata);
+ } else if (pdata->an_state == XGBE_AN_ERROR) {
+ netdev_err(pdata->netdev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+
+ pdata->an_int = 0;
+ xgbe_an73_clear_interrupts(pdata);
+ }
+
+ if (pdata->an_state >= XGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = XGBE_AN_READY;
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+ pdata->an_start = 0;
+
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
+ xgbe_state_as_string(pdata->an_result));
+ }
+
+ if (cur_state != pdata->an_state)
+ goto again;
+
+ if (pdata->an_int)
+ goto next_int;
+
+ xgbe_an73_enable_interrupts(pdata);
+}
+
+static void xgbe_an_state_machine(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ an_work);
+
+ mutex_lock(&pdata->an_mutex);
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_state_machine(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_state_machine(pdata);
+ break;
+ default:
+ break;
+ }
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
+
+ mutex_unlock(&pdata->an_mutex);
+}
+
+static void xgbe_an37_init(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings lks;
+ unsigned int reg;
+
+ pdata->phy_if.phy_impl.an_advertising(pdata, &lks);
+
+ /* Set up Advertisement register */
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
+ if (XGBE_ADV(&lks, Pause))
+ reg |= 0x100;
+ else
+ reg &= ~0x100;
+
+ if (XGBE_ADV(&lks, Asym_Pause))
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ /* Full duplex, but not half */
+ reg |= XGBE_AN_CL37_FD_MASK;
+ reg &= ~XGBE_AN_CL37_HD_MASK;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE, reg);
+
+ /* Set up the Control register */
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg &= ~XGBE_AN_CL37_TX_CONFIG_MASK;
+ reg &= ~XGBE_AN_CL37_PCS_MODE_MASK;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL37:
+ reg |= XGBE_AN_CL37_PCS_MODE_BASEX;
+ break;
+ case XGBE_AN_MODE_CL37_SGMII:
+ reg |= XGBE_AN_CL37_PCS_MODE_SGMII;
+ break;
+ default:
+ break;
+ }
+
+ reg |= XGBE_AN_CL37_MII_CTRL_8BIT;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
+ (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
+}
+
+static void xgbe_an73_init(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings lks;
+ unsigned int reg;
+
+ pdata->phy_if.phy_impl.an_advertising(pdata, &lks);
+
+ /* Set up Advertisement register 3 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (XGBE_ADV(&lks, 10000baseR_FEC))
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
+
+ /* Set up Advertisement register 2 next */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (XGBE_ADV(&lks, 10000baseKR_Full))
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ if (XGBE_ADV(&lks, 1000baseKX_Full) ||
+ XGBE_ADV(&lks, 2500baseX_Full))
+ reg |= 0x20;
+ else
+ reg &= ~0x20;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
+
+ /* Set up Advertisement register 1 last */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (XGBE_ADV(&lks, Pause))
+ reg |= 0x400;
+ else
+ reg &= ~0x400;
+
+ if (XGBE_ADV(&lks, Asym_Pause))
+ reg |= 0x800;
+ else
+ reg &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ reg &= ~XGBE_XNP_NP_EXCHANGE;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+ netif_dbg(pdata, link, pdata->netdev, "CL73 AN initialized\n");
+}
+
+static void xgbe_an_init(struct xgbe_prv_data *pdata)
+{
+ /* Set up advertisement registers based on current settings */
+ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata);
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_init(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_init(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
+{
+ if (pdata->tx_pause && pdata->rx_pause)
+ return "rx/tx";
+ else if (pdata->rx_pause)
+ return "rx";
+ else if (pdata->tx_pause)
+ return "tx";
+ else
+ return "off";
+}
+
+static const char *xgbe_phy_speed_string(int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return "100Mbps";
+ case SPEED_1000:
+ return "1Gbps";
+ case SPEED_2500:
+ return "2.5Gbps";
+ case SPEED_10000:
+ return "10Gbps";
+ case SPEED_UNKNOWN:
+ return "Unknown";
+ default:
+ return "Unsupported";
+ }
+}
+
+static void xgbe_phy_print_status(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.link)
+ netdev_info(pdata->netdev,
+ "Link is Up - %s/%s - flow control %s\n",
+ xgbe_phy_speed_string(pdata->phy.speed),
+ pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half",
+ xgbe_phy_fc_string(pdata));
+ else
+ netdev_info(pdata->netdev, "Link is Down\n");
+}
+
+static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
+{
+ int new_state = 0;
+
+ if (pdata->phy.link) {
+ /* Flow control support */
+ pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+ if (pdata->tx_pause != pdata->phy.tx_pause) {
+ new_state = 1;
+ pdata->tx_pause = pdata->phy.tx_pause;
+ pdata->hw_if.config_tx_flow_control(pdata);
+ }
+
+ if (pdata->rx_pause != pdata->phy.rx_pause) {
+ new_state = 1;
+ pdata->rx_pause = pdata->phy.rx_pause;
+ pdata->hw_if.config_rx_flow_control(pdata);
+ }
+
+ /* Speed support */
+ if (pdata->phy_speed != pdata->phy.speed) {
+ new_state = 1;
+ pdata->phy_speed = pdata->phy.speed;
+ }
+
+ if (pdata->phy_link != pdata->phy.link) {
+ new_state = 1;
+ pdata->phy_link = pdata->phy.link;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state && netif_msg_link(pdata))
+ xgbe_phy_print_status(pdata);
+}
+
+static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ return pdata->phy_if.phy_impl.valid_speed(pdata, speed);
+}
+
+static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
+
+ netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
+
+ /* Disable auto-negotiation */
+ xgbe_an_disable(pdata);
+
+ /* Set specified mode for specified speed */
+ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed);
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ case XGBE_MODE_KX_2500:
+ case XGBE_MODE_KR:
+ case XGBE_MODE_SGMII_100:
+ case XGBE_MODE_SGMII_1000:
+ case XGBE_MODE_X:
+ case XGBE_MODE_SFI:
+ break;
+ case XGBE_MODE_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ /* Validate duplex mode */
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ /* Force the mode change for SFI in Fixed PHY config.
+ * Fixed PHY configs needs PLL to be enabled while doing mode set.
+ * When the SFP module isn't connected during boot, driver assumes
+ * AN is ON and attempts autonegotiation. However, if the connected
+ * SFP comes up in Fixed PHY config, the link will not come up as
+ * PLL isn't enabled while the initial mode set command is issued.
+ * So, force the mode change for SFI in Fixed PHY configuration to
+ * fix link issues.
+ */
+ if (mode == XGBE_MODE_SFI)
+ xgbe_change_mode(pdata, mode);
+ else
+ xgbe_set_mode(pdata, mode);
+
+ return 0;
+}
+
+static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode)
+{
+ int ret;
+
+ mutex_lock(&pdata->an_mutex);
+
+ set_bit(XGBE_LINK_INIT, &pdata->dev_state);
+ pdata->link_check = jiffies;
+
+ ret = pdata->phy_if.phy_impl.an_config(pdata);
+ if (ret)
+ goto out;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ ret = xgbe_phy_config_fixed(pdata);
+ if (ret || !pdata->kr_redrv)
+ goto out;
+
+ netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n");
+ } else {
+ netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
+ }
+
+ /* Disable auto-negotiation interrupt */
+ disable_irq(pdata->an_irq);
+
+ if (set_mode) {
+ /* Start auto-negotiation in a supported mode */
+ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX_2500);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX_1000);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SFI);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
+ xgbe_set_mode(pdata, XGBE_MODE_X);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
+ } else {
+ enable_irq(pdata->an_irq);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+ xgbe_an_disable_all(pdata);
+
+ /* Clear any auto-negotitation interrupts */
+ xgbe_an_clear_interrupts_all(pdata);
+
+ pdata->an_result = XGBE_AN_READY;
+ pdata->an_state = XGBE_AN_READY;
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+
+ /* Re-enable auto-negotiation interrupt */
+ enable_irq(pdata->an_irq);
+
+ xgbe_an_init(pdata);
+ xgbe_an_restart(pdata);
+
+out:
+ if (ret)
+ set_bit(XGBE_LINK_ERR, &pdata->dev_state);
+ else
+ clear_bit(XGBE_LINK_ERR, &pdata->dev_state);
+
+ mutex_unlock(&pdata->an_mutex);
+
+ return ret;
+}
+
+static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+ return __xgbe_phy_config_aneg(pdata, true);
+}
+
+static int xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata)
+{
+ return __xgbe_phy_config_aneg(pdata, false);
+}
+
+static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+{
+ return (pdata->an_result == XGBE_AN_COMPLETE);
+}
+
+static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+{
+ unsigned long link_timeout;
+ unsigned long kr_time;
+ int wait;
+
+ link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
+ if (time_after(jiffies, link_timeout)) {
+ if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
+ pdata->phy.autoneg == AUTONEG_ENABLE) {
+ /* AN restart should not happen while KR training is in progress.
+ * The while loop ensures no AN restart during KR training,
+ * waits up to 500ms and AN restart is triggered only if KR
+ * training is failed.
+ */
+ wait = XGBE_KR_TRAINING_WAIT_ITER;
+ while (wait--) {
+ kr_time = pdata->kr_start_time +
+ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+ if (time_after(jiffies, kr_time))
+ break;
+ /* AN restart is not required, if AN result is COMPLETE */
+ if (pdata->an_result == XGBE_AN_COMPLETE)
+ return;
+ usleep_range(10000, 11000);
+ }
+ }
+ netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+ xgbe_phy_config_aneg(pdata);
+ }
+}
+
+static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+{
+ return pdata->phy_if.phy_impl.an_outcome(pdata);
+}
+
+static bool xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ enum xgbe_mode mode;
+
+ XGBE_ZERO_LP_ADV(lks);
+
+ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+ mode = xgbe_cur_mode(pdata);
+ else
+ mode = xgbe_phy_status_aneg(pdata);
+
+ switch (mode) {
+ case XGBE_MODE_SGMII_100:
+ pdata->phy.speed = SPEED_100;
+ break;
+ case XGBE_MODE_X:
+ case XGBE_MODE_KX_1000:
+ case XGBE_MODE_SGMII_1000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+ case XGBE_MODE_KX_2500:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ case XGBE_MODE_KR:
+ case XGBE_MODE_SFI:
+ pdata->phy.speed = SPEED_10000;
+ break;
+ case XGBE_MODE_UNKNOWN:
+ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ }
+
+ pdata->phy.duplex = DUPLEX_FULL;
+
+ if (!xgbe_set_mode(pdata, mode))
+ return false;
+
+ if (pdata->an_again)
+ xgbe_phy_reconfig_aneg(pdata);
+
+ return true;
+}
+
+static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+{
+ unsigned int link_aneg;
+ int an_restart;
+
+ if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
+ netif_carrier_off(pdata->netdev);
+
+ pdata->phy.link = 0;
+ goto adjust_link;
+ }
+
+ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
+ &an_restart);
+ if (an_restart) {
+ xgbe_phy_config_aneg(pdata);
+ goto adjust_link;
+ }
+
+ if (pdata->phy.link) {
+ if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
+ xgbe_check_link_timeout(pdata);
+ return;
+ }
+
+ if (xgbe_phy_status_result(pdata))
+ return;
+
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+ clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+
+ netif_carrier_on(pdata->netdev);
+ } else {
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+ xgbe_check_link_timeout(pdata);
+
+ if (link_aneg)
+ return;
+ }
+
+ xgbe_phy_status_result(pdata);
+
+ netif_carrier_off(pdata->netdev);
+ }
+
+adjust_link:
+ xgbe_phy_adjust_link(pdata);
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
+
+ if (!pdata->phy_started)
+ return;
+
+ /* Indicate the PHY is down */
+ pdata->phy_started = 0;
+
+ /* Disable auto-negotiation */
+ xgbe_an_disable_all(pdata);
+
+ if (pdata->dev_irq != pdata->an_irq) {
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ tasklet_kill(&pdata->tasklet_an);
+ }
+
+ pdata->phy_if.phy_impl.stop(pdata);
+
+ pdata->phy.link = 0;
+
+ xgbe_phy_adjust_link(pdata);
+}
+
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
+ netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
+
+ ret = pdata->phy_if.phy_impl.start(pdata);
+ if (ret)
+ return ret;
+
+ /* If we have a separate AN irq, enable it */
+ if (pdata->dev_irq != pdata->an_irq) {
+ tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task);
+
+ ret = devm_request_irq(pdata->dev, pdata->an_irq,
+ xgbe_an_isr, 0, pdata->an_name,
+ pdata);
+ if (ret) {
+ netdev_err(netdev, "phy irq request failed\n");
+ goto err_stop;
+ }
+ }
+
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
+ xgbe_kr_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
+ xgbe_kx_2500_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
+ xgbe_kx_1000_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
+ xgbe_sfi_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
+ xgbe_x_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
+ xgbe_sgmii_1000_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
+ xgbe_sgmii_100_mode(pdata);
+ } else {
+ ret = -EINVAL;
+ goto err_irq;
+ }
+
+ /* Indicate the PHY is up and running */
+ pdata->phy_started = 1;
+
+ xgbe_an_init(pdata);
+ xgbe_an_enable_interrupts(pdata);
+
+ return xgbe_phy_config_aneg(pdata);
+
+err_irq:
+ if (pdata->dev_irq != pdata->an_irq)
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+
+err_stop:
+ pdata->phy_if.phy_impl.stop(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ ret = pdata->phy_if.phy_impl.reset(pdata);
+ if (ret)
+ return ret;
+
+ /* Disable auto-negotiation for now */
+ xgbe_an_disable_all(pdata);
+
+ /* Clear auto-negotiation interrupts */
+ xgbe_an_clear_interrupts_all(pdata);
+
+ return 0;
+}
+
+static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+
+ dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
+
+ dev_dbg(dev, "PCS Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+ dev_dbg(dev, "PCS Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+ dev_dbg(dev, "Phy Id (PHYS ID 1 %#06x)= %#06x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+ dev_dbg(dev, "Phy Id (PHYS ID 2 %#06x)= %#06x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+ dev_dbg(dev, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+ dev_dbg(dev, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+ dev_dbg(dev, "Auto-Neg Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+ dev_dbg(dev, "Auto-Neg Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+ dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#06x) = %#06x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+ dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#06x) = %#06x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+ dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#06x) = %#06x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+ dev_dbg(dev, "Auto-Neg Completion Reg (%#06x) = %#06x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ dev_dbg(dev, "\n*************************************************\n");
+}
+
+static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+
+ if (XGBE_ADV(lks, 10000baseKR_Full))
+ return SPEED_10000;
+ else if (XGBE_ADV(lks, 10000baseT_Full))
+ return SPEED_10000;
+ else if (XGBE_ADV(lks, 2500baseX_Full))
+ return SPEED_2500;
+ else if (XGBE_ADV(lks, 2500baseT_Full))
+ return SPEED_2500;
+ else if (XGBE_ADV(lks, 1000baseKX_Full))
+ return SPEED_1000;
+ else if (XGBE_ADV(lks, 1000baseT_Full))
+ return SPEED_1000;
+ else if (XGBE_ADV(lks, 100baseT_Full))
+ return SPEED_100;
+
+ return SPEED_UNKNOWN;
+}
+
+static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ pdata->phy_if.phy_impl.exit(pdata);
+}
+
+static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ int ret;
+
+ mutex_init(&pdata->an_mutex);
+ INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
+ INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ /* Check for FEC support */
+ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+ MDIO_PMA_10GBR_FECABLE_ERRABLE);
+
+ /* Setup the phy (including supported features) */
+ ret = pdata->phy_if.phy_impl.init(pdata);
+ if (ret)
+ return ret;
+
+ /* Copy supported link modes to advertising link modes */
+ XGBE_LM_COPY(lks, advertising, lks, supported);
+
+ pdata->phy.address = 0;
+
+ if (XGBE_ADV(lks, Autoneg)) {
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ } else {
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.speed = xgbe_phy_best_advertised_speed(pdata);
+ pdata->phy.duplex = DUPLEX_FULL;
+ }
+
+ pdata->phy.link = 0;
+
+ pdata->phy.pause_autoneg = pdata->pause_autoneg;
+ pdata->phy.tx_pause = pdata->tx_pause;
+ pdata->phy.rx_pause = pdata->rx_pause;
+
+ /* Fix up Flow Control advertising */
+ XGBE_CLR_ADV(lks, Pause);
+ XGBE_CLR_ADV(lks, Asym_Pause);
+
+ if (pdata->rx_pause) {
+ XGBE_SET_ADV(lks, Pause);
+ XGBE_SET_ADV(lks, Asym_Pause);
+ }
+
+ if (pdata->tx_pause) {
+ /* Equivalent to XOR of Asym_Pause */
+ if (XGBE_ADV(lks, Asym_Pause))
+ XGBE_CLR_ADV(lks, Asym_Pause);
+ else
+ XGBE_SET_ADV(lks, Asym_Pause);
+ }
+
+ if (netif_msg_drv(pdata))
+ xgbe_dump_phy_registers(pdata);
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+{
+ phy_if->phy_init = xgbe_phy_init;
+ phy_if->phy_exit = xgbe_phy_exit;
+
+ phy_if->phy_reset = xgbe_phy_reset;
+ phy_if->phy_start = xgbe_phy_start;
+ phy_if->phy_stop = xgbe_phy_stop;
+
+ phy_if->phy_status = xgbe_phy_status;
+ phy_if->phy_config_aneg = xgbe_phy_config_aneg;
+
+ phy_if->phy_valid_speed = xgbe_phy_valid_speed;
+
+ phy_if->an_isr = xgbe_an_combined_isr;
+
+ phy_if->module_info = xgbe_phy_module_info;
+ phy_if->module_eeprom = xgbe_phy_module_eeprom;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
new file mode 100644
index 000000000..014513ce0
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -0,0 +1,526 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/log2.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
+{
+ unsigned int vector_count;
+ unsigned int i, j;
+ int ret;
+
+ vector_count = XGBE_MSI_BASE_COUNT;
+ vector_count += max(pdata->rx_ring_count,
+ pdata->tx_ring_count);
+
+ ret = pci_alloc_irq_vectors(pdata->pcidev, XGBE_MSI_MIN_COUNT,
+ vector_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_info(pdata->dev, "multi MSI/MSI-X enablement failed\n");
+ return ret;
+ }
+
+ pdata->isr_as_tasklet = 1;
+ pdata->irq_count = ret;
+
+ pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
+ pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 1);
+ pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 2);
+ pdata->an_irq = pci_irq_vector(pdata->pcidev, 3);
+
+ for (i = XGBE_MSI_BASE_COUNT, j = 0; i < ret; i++, j++)
+ pdata->channel_irq[j] = pci_irq_vector(pdata->pcidev, i);
+ pdata->channel_irq_count = j;
+
+ pdata->per_channel_irq = 1;
+ pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
+
+ if (netif_msg_probe(pdata))
+ dev_dbg(pdata->dev, "multi %s interrupts enabled\n",
+ pdata->pcidev->msix_enabled ? "MSI-X" : "MSI");
+
+ return 0;
+}
+
+static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ ret = xgbe_config_multi_msi(pdata);
+ if (!ret)
+ goto out;
+
+ ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1,
+ PCI_IRQ_LEGACY | PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_info(pdata->dev, "single IRQ enablement failed\n");
+ return ret;
+ }
+
+ pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0;
+ pdata->irq_count = 1;
+ pdata->channel_irq_count = 1;
+
+ pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
+ pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 0);
+ pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 0);
+ pdata->an_irq = pci_irq_vector(pdata->pcidev, 0);
+
+ if (netif_msg_probe(pdata))
+ dev_dbg(pdata->dev, "single %s interrupt enabled\n",
+ pdata->pcidev->msi_enabled ? "MSI" : "legacy");
+
+out:
+ if (netif_msg_probe(pdata)) {
+ unsigned int i;
+
+ dev_dbg(pdata->dev, " dev irq=%d\n", pdata->dev_irq);
+ dev_dbg(pdata->dev, " ecc irq=%d\n", pdata->ecc_irq);
+ dev_dbg(pdata->dev, " i2c irq=%d\n", pdata->i2c_irq);
+ dev_dbg(pdata->dev, " an irq=%d\n", pdata->an_irq);
+ for (i = 0; i < pdata->channel_irq_count; i++)
+ dev_dbg(pdata->dev, " dma%u irq=%d\n",
+ i, pdata->channel_irq[i]);
+ }
+
+ return 0;
+}
+
+static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct xgbe_prv_data *pdata;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
+ struct pci_dev *rdev;
+ unsigned int ma_lo, ma_hi;
+ unsigned int reg;
+ int bar_mask;
+ int ret;
+
+ pdata = xgbe_alloc_pdata(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto err_alloc;
+ }
+
+ pdata->pcidev = pdev;
+ pci_set_drvdata(pdev, pdata);
+
+ /* Get the version data */
+ pdata->vdata = (struct xgbe_version_data *)id->driver_data;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pcim_enable_device failed\n");
+ goto err_pci_enable;
+ }
+
+ /* Obtain the mmio areas for the device */
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, XGBE_DRV_NAME);
+ if (ret) {
+ dev_err(dev, "pcim_iomap_regions failed\n");
+ goto err_pci_enable;
+ }
+
+ iomap_table = pcim_iomap_table(pdev);
+ if (!iomap_table) {
+ dev_err(dev, "pcim_iomap_table failed\n");
+ ret = -ENOMEM;
+ goto err_pci_enable;
+ }
+
+ pdata->xgmac_regs = iomap_table[XGBE_XGMAC_BAR];
+ if (!pdata->xgmac_regs) {
+ dev_err(dev, "xgmac ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_pci_enable;
+ }
+ pdata->xprop_regs = pdata->xgmac_regs + XGBE_MAC_PROP_OFFSET;
+ pdata->xi2c_regs = pdata->xgmac_regs + XGBE_I2C_CTRL_OFFSET;
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
+ dev_dbg(dev, "xprop_regs = %p\n", pdata->xprop_regs);
+ dev_dbg(dev, "xi2c_regs = %p\n", pdata->xi2c_regs);
+ }
+
+ pdata->xpcs_regs = iomap_table[XGBE_XPCS_BAR];
+ if (!pdata->xpcs_regs) {
+ dev_err(dev, "xpcs ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_pci_enable;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ /* Set the PCS indirect addressing definition registers */
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (rdev &&
+ (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
+ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ } else {
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ }
+ pci_dev_put(rdev);
+
+ /* Configure the PCS indirect addressing support */
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
+ pdata->xpcs_window <<= 6;
+ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
+ pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
+ pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(dev, "xpcs window def = %#010x\n",
+ pdata->xpcs_window_def_reg);
+ dev_dbg(dev, "xpcs window sel = %#010x\n",
+ pdata->xpcs_window_sel_reg);
+ dev_dbg(dev, "xpcs window = %#010x\n",
+ pdata->xpcs_window);
+ dev_dbg(dev, "xpcs window size = %#010x\n",
+ pdata->xpcs_window_size);
+ dev_dbg(dev, "xpcs window mask = %#010x\n",
+ pdata->xpcs_window_mask);
+ }
+
+ pci_set_master(pdev);
+
+ /* Enable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
+ /* Retrieve the MAC address */
+ ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
+ ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
+ pdata->mac_addr[0] = ma_lo & 0xff;
+ pdata->mac_addr[1] = (ma_lo >> 8) & 0xff;
+ pdata->mac_addr[2] = (ma_lo >> 16) & 0xff;
+ pdata->mac_addr[3] = (ma_lo >> 24) & 0xff;
+ pdata->mac_addr[4] = ma_hi & 0xff;
+ pdata->mac_addr[5] = (ma_hi >> 8) & 0xff;
+ if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID) ||
+ !is_valid_ether_addr(pdata->mac_addr)) {
+ dev_err(dev, "invalid mac address\n");
+ ret = -EINVAL;
+ goto err_pci_enable;
+ }
+
+ /* Clock settings */
+ pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ;
+ pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ;
+
+ /* Set the DMA coherency values */
+ pdata->coherent = 1;
+ pdata->arcr = XGBE_DMA_PCI_ARCR;
+ pdata->awcr = XGBE_DMA_PCI_AWCR;
+ pdata->awarcr = XGBE_DMA_PCI_AWARCR;
+
+ /* Read the port property registers */
+ pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0);
+ pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1);
+ pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2);
+ pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3);
+ pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4);
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(dev, "port property 0 = %#010x\n", pdata->pp0);
+ dev_dbg(dev, "port property 1 = %#010x\n", pdata->pp1);
+ dev_dbg(dev, "port property 2 = %#010x\n", pdata->pp2);
+ dev_dbg(dev, "port property 3 = %#010x\n", pdata->pp3);
+ dev_dbg(dev, "port property 4 = %#010x\n", pdata->pp4);
+ }
+
+ /* Set the maximum channels and queues */
+ pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_TX_DMA);
+ pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_RX_DMA);
+ pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_TX_QUEUES);
+ pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_RX_QUEUES);
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
+ pdata->tx_max_channel_count,
+ pdata->rx_max_channel_count);
+ dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
+ pdata->tx_max_q_count, pdata->rx_max_q_count);
+ }
+
+ /* Set the hardware channel and queue counts */
+ xgbe_set_counts(pdata);
+
+ /* Set the maximum fifo amounts */
+ pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
+ TX_FIFO_SIZE);
+ pdata->tx_max_fifo_size *= 16384;
+ pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
+ pdata->vdata->tx_max_fifo_size);
+ pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
+ RX_FIFO_SIZE);
+ pdata->rx_max_fifo_size *= 16384;
+ pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
+ pdata->vdata->rx_max_fifo_size);
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "max tx/rx max fifo size = %u/%u\n",
+ pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);
+
+ /* Configure interrupt support */
+ ret = xgbe_config_irqs(pdata);
+ if (ret)
+ goto err_pci_enable;
+
+ /* Configure the netdev resource */
+ ret = xgbe_config_netdev(pdata);
+ if (ret)
+ goto err_irq_vectors;
+
+ netdev_notice(pdata->netdev, "net device enabled\n");
+
+ return 0;
+
+err_irq_vectors:
+ pci_free_irq_vectors(pdata->pcidev);
+
+err_pci_enable:
+ xgbe_free_pdata(pdata);
+
+err_alloc:
+ dev_notice(dev, "net device not enabled\n");
+
+ return ret;
+}
+
+static void xgbe_pci_remove(struct pci_dev *pdev)
+{
+ struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
+
+ xgbe_deconfig_netdev(pdata);
+
+ pci_free_irq_vectors(pdata->pcidev);
+
+ /* Disable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x0);
+
+ xgbe_free_pdata(pdata);
+}
+
+static int __maybe_unused xgbe_pci_suspend(struct device *dev)
+{
+ struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
+ struct net_device *netdev = pdata->netdev;
+ int ret = 0;
+
+ if (netif_running(netdev))
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+
+ pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+
+ return ret;
+}
+
+static int __maybe_unused xgbe_pci_resume(struct device *dev)
+{
+ struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
+ struct net_device *netdev = pdata->netdev;
+ int ret = 0;
+
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
+ pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+
+ if (netif_running(netdev)) {
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+
+ /* Schedule a restart in case the link or phy state changed
+ * while we were powered down.
+ */
+ schedule_work(&pdata->restart_work);
+ }
+
+ return ret;
+}
+
+static const struct xgbe_version_data xgbe_v2a = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 229376,
+ .rx_max_fifo_size = 229376,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
+ .an_cdr_workaround = 1,
+};
+
+static const struct xgbe_version_data xgbe_v2b = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
+ .an_cdr_workaround = 1,
+};
+
+static const struct pci_device_id xgbe_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x1458),
+ .driver_data = (kernel_ulong_t)&xgbe_v2a },
+ { PCI_VDEVICE(AMD, 0x1459),
+ .driver_data = (kernel_ulong_t)&xgbe_v2b },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, xgbe_pci_table);
+
+static SIMPLE_DEV_PM_OPS(xgbe_pci_pm_ops, xgbe_pci_suspend, xgbe_pci_resume);
+
+static struct pci_driver xgbe_driver = {
+ .name = XGBE_DRV_NAME,
+ .id_table = xgbe_pci_table,
+ .probe = xgbe_pci_probe,
+ .remove = xgbe_pci_remove,
+ .driver = {
+ .pm = &xgbe_pci_pm_ops,
+ }
+};
+
+int xgbe_pci_init(void)
+{
+ return pci_register_driver(&xgbe_driver);
+}
+
+void xgbe_pci_exit(void)
+{
+ pci_unregister_driver(&xgbe_driver);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
new file mode 100644
index 000000000..d16eae415
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
@@ -0,0 +1,853 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/property.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
+#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
+#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
+#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
+#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
+
+/* Default SerDes settings */
+#define XGBE_SPEED_1000_BLWC 1
+#define XGBE_SPEED_1000_CDR 0x2
+#define XGBE_SPEED_1000_PLL 0x0
+#define XGBE_SPEED_1000_PQ 0xa
+#define XGBE_SPEED_1000_RATE 0x3
+#define XGBE_SPEED_1000_TXAMP 0xf
+#define XGBE_SPEED_1000_WORD 0x1
+#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
+
+#define XGBE_SPEED_2500_BLWC 1
+#define XGBE_SPEED_2500_CDR 0x2
+#define XGBE_SPEED_2500_PLL 0x0
+#define XGBE_SPEED_2500_PQ 0xa
+#define XGBE_SPEED_2500_RATE 0x1
+#define XGBE_SPEED_2500_TXAMP 0xf
+#define XGBE_SPEED_2500_WORD 0x1
+#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
+
+#define XGBE_SPEED_10000_BLWC 0
+#define XGBE_SPEED_10000_CDR 0x7
+#define XGBE_SPEED_10000_PLL 0x1
+#define XGBE_SPEED_10000_PQ 0x12
+#define XGBE_SPEED_10000_RATE 0x0
+#define XGBE_SPEED_10000_TXAMP 0xa
+#define XGBE_SPEED_10000_WORD 0x7
+#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
+#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
+
+/* Rate-change complete wait/retry count */
+#define XGBE_RATECHANGE_COUNT 500
+
+static const u32 xgbe_phy_blwc[] = {
+ XGBE_SPEED_1000_BLWC,
+ XGBE_SPEED_2500_BLWC,
+ XGBE_SPEED_10000_BLWC,
+};
+
+static const u32 xgbe_phy_cdr_rate[] = {
+ XGBE_SPEED_1000_CDR,
+ XGBE_SPEED_2500_CDR,
+ XGBE_SPEED_10000_CDR,
+};
+
+static const u32 xgbe_phy_pq_skew[] = {
+ XGBE_SPEED_1000_PQ,
+ XGBE_SPEED_2500_PQ,
+ XGBE_SPEED_10000_PQ,
+};
+
+static const u32 xgbe_phy_tx_amp[] = {
+ XGBE_SPEED_1000_TXAMP,
+ XGBE_SPEED_2500_TXAMP,
+ XGBE_SPEED_10000_TXAMP,
+};
+
+static const u32 xgbe_phy_dfe_tap_cfg[] = {
+ XGBE_SPEED_1000_DFE_TAP_CONFIG,
+ XGBE_SPEED_2500_DFE_TAP_CONFIG,
+ XGBE_SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 xgbe_phy_dfe_tap_ena[] = {
+ XGBE_SPEED_1000_DFE_TAP_ENABLE,
+ XGBE_SPEED_2500_DFE_TAP_ENABLE,
+ XGBE_SPEED_10000_DFE_TAP_ENABLE,
+};
+
+struct xgbe_phy_data {
+ /* 1000/10000 vs 2500/10000 indicator */
+ unsigned int speed_set;
+
+ /* SerDes UEFI configurable settings.
+ * Switching between modes/speeds requires new values for some
+ * SerDes settings. The values can be supplied as device
+ * properties in array format. The first array entry is for
+ * 1GbE, second for 2.5GbE and third for 10GbE
+ */
+ u32 blwc[XGBE_SPEEDS];
+ u32 cdr_rate[XGBE_SPEEDS];
+ u32 pq_skew[XGBE_SPEEDS];
+ u32 tx_amp[XGBE_SPEEDS];
+ u32 dfe_tap_cfg[XGBE_SPEEDS];
+ u32 dfe_tap_ena[XGBE_SPEEDS];
+};
+
+static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
+{
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
+}
+
+static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
+{
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
+}
+
+static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ XGBE_SET_LP_ADV(lks, Autoneg);
+ XGBE_SET_LP_ADV(lks, Backplane);
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ XGBE_SET_LP_ADV(lks, Pause);
+ if (lp_reg & 0x800)
+ XGBE_SET_LP_ADV(lks, Asym_Pause);
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ XGBE_SET_LP_ADV(lks, 10000baseKR_Full);
+ if (lp_reg & 0x20) {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ XGBE_SET_LP_ADV(lks, 2500baseX_Full);
+ else
+ XGBE_SET_LP_ADV(lks, 1000baseKX_Full);
+ }
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ mode = XGBE_MODE_KR;
+ } else if (ad_reg & 0x20) {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ mode = XGBE_MODE_KX_2500;
+ else
+ mode = XGBE_MODE_KX_1000;
+ } else {
+ mode = XGBE_MODE_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ XGBE_SET_LP_ADV(lks, 10000baseR_FEC);
+
+ return mode;
+}
+
+static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
+ struct ethtool_link_ksettings *dlks)
+{
+ struct ethtool_link_ksettings *slks = &pdata->phy.lks;
+
+ XGBE_LM_COPY(dlks, advertising, slks, advertising);
+}
+
+static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for an configuration */
+ return 0;
+}
+
+static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
+{
+ return XGBE_AN_MODE_CL73;
+}
+
+static void xgbe_phy_pcs_power_cycle(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+
+ reg |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ usleep_range(75, 100);
+
+ reg &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+}
+
+static void xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
+{
+ /* Assert Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
+}
+
+static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
+{
+ unsigned int wait;
+ u16 status;
+
+ /* Release Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
+
+ /* Wait for Rx and Tx ready */
+ wait = XGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ usleep_range(50, 75);
+
+ status = XSIR0_IOREAD(pdata, SIR0_STATUS);
+ if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+ XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+ goto rx_reset;
+ }
+
+ netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
+ status);
+
+rx_reset:
+ /* Perform Rx reset for the DFE changes */
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
+}
+
+static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* Set PCS to KR/10G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBR;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED10G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_phy_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 10G speed */
+ xgbe_phy_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ phy_data->cdr_rate[XGBE_SPEED_10000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ phy_data->tx_amp[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ phy_data->blwc[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ phy_data->pq_skew[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ phy_data->dfe_tap_cfg[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ phy_data->dfe_tap_ena[XGBE_SPEED_10000]);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
+}
+
+static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_phy_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 2.5G speed */
+ xgbe_phy_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ phy_data->cdr_rate[XGBE_SPEED_2500]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ phy_data->tx_amp[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ phy_data->blwc[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ phy_data->pq_skew[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ phy_data->dfe_tap_cfg[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ phy_data->dfe_tap_ena[XGBE_SPEED_2500]);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
+}
+
+static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_phy_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 1G speed */
+ xgbe_phy_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ phy_data->cdr_rate[XGBE_SPEED_1000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ phy_data->tx_amp[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ phy_data->blwc[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ phy_data->pq_skew[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ phy_data->dfe_tap_cfg[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ phy_data->dfe_tap_ena[XGBE_SPEED_1000]);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
+}
+
+static enum xgbe_mode xgbe_phy_cur_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= MDIO_PCS_CTRL2_TYPE;
+
+ if (reg == MDIO_PCS_CTRL2_10GBR) {
+ mode = XGBE_MODE_KR;
+ } else {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ mode = XGBE_MODE_KX_2500;
+ else
+ mode = XGBE_MODE_KX_1000;
+ }
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+
+ /* If we are in KR switch to KX, and vice-versa */
+ if (xgbe_phy_cur_mode(pdata) == XGBE_MODE_KR) {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ mode = XGBE_MODE_KX_2500;
+ else
+ mode = XGBE_MODE_KX_1000;
+ } else {
+ mode = XGBE_MODE_KR;
+ }
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata,
+ int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (speed) {
+ case SPEED_1000:
+ return (phy_data->speed_set == XGBE_SPEEDSET_1000_10000)
+ ? XGBE_MODE_KX_1000 : XGBE_MODE_UNKNOWN;
+ case SPEED_2500:
+ return (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ ? XGBE_MODE_KX_2500 : XGBE_MODE_UNKNOWN;
+ case SPEED_10000:
+ return XGBE_MODE_KR;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ xgbe_phy_kx_1000_mode(pdata);
+ break;
+ case XGBE_MODE_KX_2500:
+ xgbe_phy_kx_2500_mode(pdata);
+ break;
+ case XGBE_MODE_KR:
+ xgbe_phy_kr_mode(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode, bool advert)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ return advert;
+ } else {
+ enum xgbe_mode cur_mode;
+
+ cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed);
+ if (cur_mode == mode)
+ return true;
+ }
+
+ return false;
+}
+
+static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 1000baseKX_Full));
+ case XGBE_MODE_KX_2500:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 2500baseX_Full));
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10000baseKR_Full));
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (speed) {
+ case SPEED_1000:
+ if (phy_data->speed_set != XGBE_SPEEDSET_1000_10000)
+ return false;
+ return true;
+ case SPEED_2500:
+ if (phy_data->speed_set != XGBE_SPEEDSET_2500_10000)
+ return false;
+ return true;
+ case SPEED_10000:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+{
+ unsigned int reg;
+
+ *an_restart = 0;
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+
+ return (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for stop */
+}
+
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for start */
+ return 0;
+}
+
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg, count;
+
+ /* Perform a software reset of the PCS */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg |= MDIO_CTRL1_RESET;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ count = 50;
+ do {
+ msleep(20);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ } while ((reg & MDIO_CTRL1_RESET) && --count);
+
+ if (reg & MDIO_CTRL1_RESET)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for exit */
+}
+
+static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data;
+ int ret;
+
+ phy_data = devm_kzalloc(pdata->dev, sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+
+ /* Retrieve the PHY speedset */
+ ret = device_property_read_u32(pdata->phy_dev, XGBE_SPEEDSET_PROPERTY,
+ &phy_data->speed_set);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_SPEEDSET_PROPERTY);
+ return ret;
+ }
+
+ switch (phy_data->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ case XGBE_SPEEDSET_2500_10000:
+ break;
+ default:
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_SPEEDSET_PROPERTY);
+ return -EINVAL;
+ }
+
+ /* Retrieve the PHY configuration properties */
+ if (device_property_present(pdata->phy_dev, XGBE_BLWC_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_BLWC_PROPERTY,
+ phy_data->blwc,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_BLWC_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->blwc, xgbe_phy_blwc,
+ sizeof(phy_data->blwc));
+ }
+
+ if (device_property_present(pdata->phy_dev, XGBE_CDR_RATE_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_CDR_RATE_PROPERTY,
+ phy_data->cdr_rate,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_CDR_RATE_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->cdr_rate, xgbe_phy_cdr_rate,
+ sizeof(phy_data->cdr_rate));
+ }
+
+ if (device_property_present(pdata->phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_PQ_SKEW_PROPERTY,
+ phy_data->pq_skew,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_PQ_SKEW_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->pq_skew, xgbe_phy_pq_skew,
+ sizeof(phy_data->pq_skew));
+ }
+
+ if (device_property_present(pdata->phy_dev, XGBE_TX_AMP_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_TX_AMP_PROPERTY,
+ phy_data->tx_amp,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_TX_AMP_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->tx_amp, xgbe_phy_tx_amp,
+ sizeof(phy_data->tx_amp));
+ }
+
+ if (device_property_present(pdata->phy_dev, XGBE_DFE_CFG_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_DFE_CFG_PROPERTY,
+ phy_data->dfe_tap_cfg,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_DFE_CFG_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->dfe_tap_cfg, xgbe_phy_dfe_tap_cfg,
+ sizeof(phy_data->dfe_tap_cfg));
+ }
+
+ if (device_property_present(pdata->phy_dev, XGBE_DFE_ENA_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_DFE_ENA_PROPERTY,
+ phy_data->dfe_tap_ena,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_DFE_ENA_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->dfe_tap_ena, xgbe_phy_dfe_tap_ena,
+ sizeof(phy_data->dfe_tap_ena));
+ }
+
+ /* Initialize supported features */
+ XGBE_ZERO_SUP(lks);
+ XGBE_SET_SUP(lks, Autoneg);
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, Backplane);
+ XGBE_SET_SUP(lks, 10000baseKR_Full);
+ switch (phy_data->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ XGBE_SET_SUP(lks, 1000baseKX_Full);
+ break;
+ case XGBE_SPEEDSET_2500_10000:
+ XGBE_SET_SUP(lks, 2500baseX_Full);
+ break;
+ }
+
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ XGBE_SET_SUP(lks, 10000baseR_FEC);
+
+ pdata->phy_data = phy_data;
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *phy_if)
+{
+ struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = xgbe_phy_init;
+ phy_impl->exit = xgbe_phy_exit;
+
+ phy_impl->reset = xgbe_phy_reset;
+ phy_impl->start = xgbe_phy_start;
+ phy_impl->stop = xgbe_phy_stop;
+
+ phy_impl->link_status = xgbe_phy_link_status;
+
+ phy_impl->valid_speed = xgbe_phy_valid_speed;
+
+ phy_impl->use_mode = xgbe_phy_use_mode;
+ phy_impl->set_mode = xgbe_phy_set_mode;
+ phy_impl->get_mode = xgbe_phy_get_mode;
+ phy_impl->switch_mode = xgbe_phy_switch_mode;
+ phy_impl->cur_mode = xgbe_phy_cur_mode;
+
+ phy_impl->an_mode = xgbe_phy_an_mode;
+
+ phy_impl->an_config = xgbe_phy_an_config;
+
+ phy_impl->an_advertising = xgbe_phy_an_advertising;
+
+ phy_impl->an_outcome = xgbe_phy_an_outcome;
+
+ phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = xgbe_phy_kr_training_post;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
new file mode 100644
index 000000000..97e32c049
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -0,0 +1,3461 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kmod.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/ethtool.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define XGBE_PHY_PORT_SPEED_100 BIT(0)
+#define XGBE_PHY_PORT_SPEED_1000 BIT(1)
+#define XGBE_PHY_PORT_SPEED_2500 BIT(2)
+#define XGBE_PHY_PORT_SPEED_10000 BIT(3)
+
+#define XGBE_MUTEX_RELEASE 0x80000000
+
+#define XGBE_SFP_DIRECT 7
+
+/* I2C target addresses */
+#define XGBE_SFP_SERIAL_ID_ADDRESS 0x50
+#define XGBE_SFP_DIAG_INFO_ADDRESS 0x51
+#define XGBE_SFP_PHY_ADDRESS 0x56
+#define XGBE_GPIO_ADDRESS_PCA9555 0x20
+
+/* SFP sideband signal indicators */
+#define XGBE_GPIO_NO_TX_FAULT BIT(0)
+#define XGBE_GPIO_NO_RATE_SELECT BIT(1)
+#define XGBE_GPIO_NO_MOD_ABSENT BIT(2)
+#define XGBE_GPIO_NO_RX_LOS BIT(3)
+
+/* Rate-change complete wait/retry count */
+#define XGBE_RATECHANGE_COUNT 500
+
+/* CDR delay values for KR support (in usec) */
+#define XGBE_CDR_DELAY_INIT 10000
+#define XGBE_CDR_DELAY_INC 10000
+#define XGBE_CDR_DELAY_MAX 100000
+
+/* RRC frequency during link status check */
+#define XGBE_RRC_FREQUENCY 10
+
+enum xgbe_port_mode {
+ XGBE_PORT_MODE_RSVD = 0,
+ XGBE_PORT_MODE_BACKPLANE,
+ XGBE_PORT_MODE_BACKPLANE_2500,
+ XGBE_PORT_MODE_1000BASE_T,
+ XGBE_PORT_MODE_1000BASE_X,
+ XGBE_PORT_MODE_NBASE_T,
+ XGBE_PORT_MODE_10GBASE_T,
+ XGBE_PORT_MODE_10GBASE_R,
+ XGBE_PORT_MODE_SFP,
+ XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG,
+ XGBE_PORT_MODE_MAX,
+};
+
+enum xgbe_conn_type {
+ XGBE_CONN_TYPE_NONE = 0,
+ XGBE_CONN_TYPE_SFP,
+ XGBE_CONN_TYPE_MDIO,
+ XGBE_CONN_TYPE_RSVD1,
+ XGBE_CONN_TYPE_BACKPLANE,
+ XGBE_CONN_TYPE_MAX,
+};
+
+/* SFP/SFP+ related definitions */
+enum xgbe_sfp_comm {
+ XGBE_SFP_COMM_DIRECT = 0,
+ XGBE_SFP_COMM_PCA9545,
+};
+
+enum xgbe_sfp_cable {
+ XGBE_SFP_CABLE_UNKNOWN = 0,
+ XGBE_SFP_CABLE_ACTIVE,
+ XGBE_SFP_CABLE_PASSIVE,
+ XGBE_SFP_CABLE_FIBER,
+};
+
+enum xgbe_sfp_base {
+ XGBE_SFP_BASE_UNKNOWN = 0,
+ XGBE_SFP_BASE_1000_T,
+ XGBE_SFP_BASE_1000_SX,
+ XGBE_SFP_BASE_1000_LX,
+ XGBE_SFP_BASE_1000_CX,
+ XGBE_SFP_BASE_10000_SR,
+ XGBE_SFP_BASE_10000_LR,
+ XGBE_SFP_BASE_10000_LRM,
+ XGBE_SFP_BASE_10000_ER,
+ XGBE_SFP_BASE_10000_CR,
+};
+
+enum xgbe_sfp_speed {
+ XGBE_SFP_SPEED_UNKNOWN = 0,
+ XGBE_SFP_SPEED_100_1000,
+ XGBE_SFP_SPEED_1000,
+ XGBE_SFP_SPEED_10000,
+};
+
+/* SFP Serial ID Base ID values relative to an offset of 0 */
+#define XGBE_SFP_BASE_ID 0
+#define XGBE_SFP_ID_SFP 0x03
+
+#define XGBE_SFP_BASE_EXT_ID 1
+#define XGBE_SFP_EXT_ID_SFP 0x04
+
+#define XGBE_SFP_BASE_10GBE_CC 3
+#define XGBE_SFP_BASE_10GBE_CC_SR BIT(4)
+#define XGBE_SFP_BASE_10GBE_CC_LR BIT(5)
+#define XGBE_SFP_BASE_10GBE_CC_LRM BIT(6)
+#define XGBE_SFP_BASE_10GBE_CC_ER BIT(7)
+
+#define XGBE_SFP_BASE_1GBE_CC 6
+#define XGBE_SFP_BASE_1GBE_CC_SX BIT(0)
+#define XGBE_SFP_BASE_1GBE_CC_LX BIT(1)
+#define XGBE_SFP_BASE_1GBE_CC_CX BIT(2)
+#define XGBE_SFP_BASE_1GBE_CC_T BIT(3)
+
+#define XGBE_SFP_BASE_CABLE 8
+#define XGBE_SFP_BASE_CABLE_PASSIVE BIT(2)
+#define XGBE_SFP_BASE_CABLE_ACTIVE BIT(3)
+
+#define XGBE_SFP_BASE_BR 12
+#define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a
+#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
+
+#define XGBE_SFP_BASE_CU_CABLE_LEN 18
+
+#define XGBE_SFP_BASE_VENDOR_NAME 20
+#define XGBE_SFP_BASE_VENDOR_NAME_LEN 16
+#define XGBE_SFP_BASE_VENDOR_PN 40
+#define XGBE_SFP_BASE_VENDOR_PN_LEN 16
+#define XGBE_SFP_BASE_VENDOR_REV 56
+#define XGBE_SFP_BASE_VENDOR_REV_LEN 4
+
+#define XGBE_SFP_BASE_CC 63
+
+/* SFP Serial ID Extended ID values relative to an offset of 64 */
+#define XGBE_SFP_BASE_VENDOR_SN 4
+#define XGBE_SFP_BASE_VENDOR_SN_LEN 16
+
+#define XGBE_SFP_EXTD_OPT1 1
+#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1)
+#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3)
+
+#define XGBE_SFP_EXTD_DIAG 28
+#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
+
+#define XGBE_SFP_EXTD_SFF_8472 30
+
+#define XGBE_SFP_EXTD_CC 31
+
+struct xgbe_sfp_eeprom {
+ u8 base[64];
+ u8 extd[32];
+ u8 vendor[32];
+};
+
+#define XGBE_SFP_DIAGS_SUPPORTED(_x) \
+ ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \
+ !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
+
+#define XGBE_SFP_EEPROM_BASE_LEN 256
+#define XGBE_SFP_EEPROM_DIAG_LEN 256
+#define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \
+ XGBE_SFP_EEPROM_DIAG_LEN)
+
+#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
+#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
+
+#define XGBE_MOLEX_VENDOR "Molex Inc. "
+
+struct xgbe_sfp_ascii {
+ union {
+ char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
+ char partno[XGBE_SFP_BASE_VENDOR_PN_LEN + 1];
+ char rev[XGBE_SFP_BASE_VENDOR_REV_LEN + 1];
+ char serno[XGBE_SFP_BASE_VENDOR_SN_LEN + 1];
+ } u;
+};
+
+/* MDIO PHY reset types */
+enum xgbe_mdio_reset {
+ XGBE_MDIO_RESET_NONE = 0,
+ XGBE_MDIO_RESET_I2C_GPIO,
+ XGBE_MDIO_RESET_INT_GPIO,
+ XGBE_MDIO_RESET_MAX,
+};
+
+/* Re-driver related definitions */
+enum xgbe_phy_redrv_if {
+ XGBE_PHY_REDRV_IF_MDIO = 0,
+ XGBE_PHY_REDRV_IF_I2C,
+ XGBE_PHY_REDRV_IF_MAX,
+};
+
+enum xgbe_phy_redrv_model {
+ XGBE_PHY_REDRV_MODEL_4223 = 0,
+ XGBE_PHY_REDRV_MODEL_4227,
+ XGBE_PHY_REDRV_MODEL_MAX,
+};
+
+enum xgbe_phy_redrv_mode {
+ XGBE_PHY_REDRV_MODE_CX = 5,
+ XGBE_PHY_REDRV_MODE_SR = 9,
+};
+
+#define XGBE_PHY_REDRV_MODE_REG 0x12b0
+
+/* PHY related configuration information */
+struct xgbe_phy_data {
+ enum xgbe_port_mode port_mode;
+
+ unsigned int port_id;
+
+ unsigned int port_speeds;
+
+ enum xgbe_conn_type conn_type;
+
+ enum xgbe_mode cur_mode;
+ enum xgbe_mode start_mode;
+
+ unsigned int rrc_count;
+
+ unsigned int mdio_addr;
+
+ /* SFP Support */
+ enum xgbe_sfp_comm sfp_comm;
+ unsigned int sfp_mux_address;
+ unsigned int sfp_mux_channel;
+
+ unsigned int sfp_gpio_address;
+ unsigned int sfp_gpio_mask;
+ unsigned int sfp_gpio_inputs;
+ unsigned int sfp_gpio_rx_los;
+ unsigned int sfp_gpio_tx_fault;
+ unsigned int sfp_gpio_mod_absent;
+ unsigned int sfp_gpio_rate_select;
+
+ unsigned int sfp_rx_los;
+ unsigned int sfp_tx_fault;
+ unsigned int sfp_mod_absent;
+ unsigned int sfp_changed;
+ unsigned int sfp_phy_avail;
+ unsigned int sfp_cable_len;
+ enum xgbe_sfp_base sfp_base;
+ enum xgbe_sfp_cable sfp_cable;
+ enum xgbe_sfp_speed sfp_speed;
+ struct xgbe_sfp_eeprom sfp_eeprom;
+
+ /* External PHY support */
+ enum xgbe_mdio_mode phydev_mode;
+ struct mii_bus *mii;
+ struct phy_device *phydev;
+ enum xgbe_mdio_reset mdio_reset;
+ unsigned int mdio_reset_addr;
+ unsigned int mdio_reset_gpio;
+
+ /* Re-driver support */
+ unsigned int redrv;
+ unsigned int redrv_if;
+ unsigned int redrv_addr;
+ unsigned int redrv_lane;
+ unsigned int redrv_model;
+
+ /* KR AN support */
+ unsigned int phy_cdr_notrack;
+ unsigned int phy_cdr_delay;
+};
+
+/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
+static DEFINE_MUTEX(xgbe_phy_comm_lock);
+
+static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
+
+static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
+ struct xgbe_i2c_op *i2c_op)
+{
+ return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
+}
+
+static int xgbe_phy_redrv_write(struct xgbe_prv_data *pdata, unsigned int reg,
+ unsigned int val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_i2c_op i2c_op;
+ __be16 *redrv_val;
+ u8 redrv_data[5], csum;
+ unsigned int i, retry;
+ int ret;
+
+ /* High byte of register contains read/write indicator */
+ redrv_data[0] = ((reg >> 8) & 0xff) << 1;
+ redrv_data[1] = reg & 0xff;
+ redrv_val = (__be16 *)&redrv_data[2];
+ *redrv_val = cpu_to_be16(val);
+
+ /* Calculate 1 byte checksum */
+ csum = 0;
+ for (i = 0; i < 4; i++) {
+ csum += redrv_data[i];
+ if (redrv_data[i] > csum)
+ csum++;
+ }
+ redrv_data[4] = ~csum;
+
+ retry = 1;
+again1:
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = sizeof(redrv_data);
+ i2c_op.buf = redrv_data;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ i2c_op.cmd = XGBE_I2C_CMD_READ;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = 1;
+ i2c_op.buf = redrv_data;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+ }
+
+ if (redrv_data[0] != 0xff) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "Redriver write checksum error\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int xgbe_phy_i2c_write(struct xgbe_prv_data *pdata, unsigned int target,
+ void *val, unsigned int val_len)
+{
+ struct xgbe_i2c_op i2c_op;
+ int retry, ret;
+
+ retry = 1;
+again:
+ /* Write the specfied register */
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = target;
+ i2c_op.len = val_len;
+ i2c_op.buf = val;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if ((ret == -EAGAIN) && retry--)
+ goto again;
+
+ return ret;
+}
+
+static int xgbe_phy_i2c_read(struct xgbe_prv_data *pdata, unsigned int target,
+ void *reg, unsigned int reg_len,
+ void *val, unsigned int val_len)
+{
+ struct xgbe_i2c_op i2c_op;
+ int retry, ret;
+
+ retry = 1;
+again1:
+ /* Set the specified register to read */
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = target;
+ i2c_op.len = reg_len;
+ i2c_op.buf = reg;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ /* Read the specfied register */
+ i2c_op.cmd = XGBE_I2C_CMD_READ;
+ i2c_op.target = target;
+ i2c_op.len = val_len;
+ i2c_op.buf = val;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+}
+
+static int xgbe_phy_sfp_put_mux(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_i2c_op i2c_op;
+ u8 mux_channel;
+
+ if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select no mux channels */
+ mux_channel = 0;
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return xgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static int xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_i2c_op i2c_op;
+ u8 mux_channel;
+
+ if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select desired mux channel */
+ mux_channel = 1 << phy_data->sfp_mux_channel;
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return xgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata)
+{
+ mutex_unlock(&xgbe_phy_comm_lock);
+}
+
+static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned long timeout;
+ unsigned int mutex_id;
+
+ /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
+ * the driver needs to take the software mutex and then the hardware
+ * mutexes before being able to use the busses.
+ */
+ mutex_lock(&xgbe_phy_comm_lock);
+
+ /* Clear the mutexes */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, XGBE_MUTEX_RELEASE);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, XGBE_MUTEX_RELEASE);
+
+ /* Mutex formats are the same for I2C and MDIO/GPIO */
+ mutex_id = 0;
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id);
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1);
+
+ timeout = jiffies + (5 * HZ);
+ while (time_before(jiffies, timeout)) {
+ /* Must be all zeroes in order to obtain the mutex */
+ if (XP_IOREAD(pdata, XP_I2C_MUTEX) ||
+ XP_IOREAD(pdata, XP_MDIO_MUTEX)) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ /* Obtain the mutex */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
+
+ return 0;
+ }
+
+ mutex_unlock(&xgbe_phy_comm_lock);
+
+ netdev_err(pdata->netdev, "unable to obtain hardware mutexes\n");
+
+ return -ETIMEDOUT;
+}
+
+static int xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (reg & MII_ADDR_C45) {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -ENOTSUPP;
+ } else {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -ENOTSUPP;
+ }
+
+ return pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val);
+}
+
+static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
+{
+ __be16 *mii_val;
+ u8 mii_data[3];
+ int ret;
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret)
+ return ret;
+
+ mii_data[0] = reg & 0xff;
+ mii_val = (__be16 *)&mii_data[1];
+ *mii_val = cpu_to_be16(val);
+
+ ret = xgbe_phy_i2c_write(pdata, XGBE_SFP_PHY_ADDRESS,
+ mii_data, sizeof(mii_data));
+
+ xgbe_phy_sfp_put_mux(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = xgbe_phy_i2c_mii_write(pdata, reg, val);
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val);
+ else
+ ret = -ENOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (reg & MII_ADDR_C45) {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -ENOTSUPP;
+ } else {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -ENOTSUPP;
+ }
+
+ return pdata->hw_if.read_ext_mii_regs(pdata, addr, reg);
+}
+
+static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
+{
+ __be16 mii_val;
+ u8 mii_reg;
+ int ret;
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret)
+ return ret;
+
+ mii_reg = reg;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_PHY_ADDRESS,
+ &mii_reg, sizeof(mii_reg),
+ &mii_val, sizeof(mii_val));
+ if (!ret)
+ ret = be16_to_cpu(mii_val);
+
+ xgbe_phy_sfp_put_mux(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = xgbe_phy_i2c_mii_read(pdata, reg);
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_read(pdata, addr, reg);
+ else
+ ret = -ENOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed)
+ return;
+
+ XGBE_ZERO_SUP(lks);
+
+ if (phy_data->sfp_mod_absent) {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
+
+ XGBE_SET_SUP(lks, Autoneg);
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, TP);
+ XGBE_SET_SUP(lks, FIBRE);
+
+ XGBE_LM_COPY(lks, advertising, lks, supported);
+
+ return;
+ }
+
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
+ XGBE_SET_SUP(lks, Autoneg);
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) {
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+ XGBE_SET_SUP(lks, 100baseT_Full);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ XGBE_SET_SUP(lks, 1000baseT_Full);
+ } else {
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ XGBE_SET_SUP(lks, 1000baseX_Full);
+ }
+ break;
+ case XGBE_SFP_BASE_10000_SR:
+ case XGBE_SFP_BASE_10000_LR:
+ case XGBE_SFP_BASE_10000_LRM:
+ case XGBE_SFP_BASE_10000_ER:
+ case XGBE_SFP_BASE_10000_CR:
+ pdata->phy.speed = SPEED_10000;
+ pdata->phy.duplex = DUPLEX_FULL;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_10000_SR:
+ XGBE_SET_SUP(lks, 10000baseSR_Full);
+ break;
+ case XGBE_SFP_BASE_10000_LR:
+ XGBE_SET_SUP(lks, 10000baseLR_Full);
+ break;
+ case XGBE_SFP_BASE_10000_LRM:
+ XGBE_SET_SUP(lks, 10000baseLRM_Full);
+ break;
+ case XGBE_SFP_BASE_10000_ER:
+ XGBE_SET_SUP(lks, 10000baseER_Full);
+ break;
+ case XGBE_SFP_BASE_10000_CR:
+ XGBE_SET_SUP(lks, 10000baseCR_Full);
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
+ break;
+ }
+
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_CX:
+ case XGBE_SFP_BASE_10000_CR:
+ XGBE_SET_SUP(lks, TP);
+ break;
+ default:
+ XGBE_SET_SUP(lks, FIBRE);
+ break;
+ }
+
+ XGBE_LM_COPY(lks, advertising, lks, supported);
+}
+
+static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
+ enum xgbe_sfp_speed sfp_speed)
+{
+ u8 *sfp_base, min;
+
+ sfp_base = sfp_eeprom->base;
+
+ switch (sfp_speed) {
+ case XGBE_SFP_SPEED_1000:
+ min = XGBE_SFP_BASE_BR_1GBE_MIN;
+ break;
+ case XGBE_SFP_SPEED_10000:
+ min = XGBE_SFP_BASE_BR_10GBE_MIN;
+ break;
+ default:
+ return false;
+ }
+
+ return sfp_base[XGBE_SFP_BASE_BR] >= min;
+}
+
+static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev) {
+ phy_detach(phy_data->phydev);
+ phy_device_remove(phy_data->phydev);
+ phy_device_free(phy_data->phydev);
+ phy_data->phydev = NULL;
+ }
+}
+
+static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int phy_id = phy_data->phydev->phy_id;
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
+ return false;
+
+ if ((phy_id & 0xfffffff0) != 0x01ff0cc0)
+ return false;
+
+ /* Enable Base-T AN */
+ phy_write(phy_data->phydev, 0x16, 0x0001);
+ phy_write(phy_data->phydev, 0x00, 0x9140);
+ phy_write(phy_data->phydev, 0x16, 0x0000);
+
+ /* Enable SGMII at 100Base-T/1000Base-T Full Duplex */
+ phy_write(phy_data->phydev, 0x1b, 0x9084);
+ phy_write(phy_data->phydev, 0x09, 0x0e00);
+ phy_write(phy_data->phydev, 0x00, 0x8140);
+ phy_write(phy_data->phydev, 0x04, 0x0d01);
+ phy_write(phy_data->phydev, 0x00, 0x9140);
+
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ supported);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ supported);
+
+ linkmode_copy(phy_data->phydev->supported, supported);
+
+ phy_support_asym_pause(phy_data->phydev);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "Finisar PHY quirk in place\n");
+
+ return true;
+}
+
+static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ unsigned int phy_id = phy_data->phydev->phy_id;
+ int reg;
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
+ return false;
+
+ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+ XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
+ return false;
+
+ /* For Bel-Fuse, use the extra AN flag */
+ pdata->an_again = 1;
+
+ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
+ XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN))
+ return false;
+
+ if ((phy_id & 0xfffffff0) != 0x03625d10)
+ return false;
+
+ /* Reset PHY - wait for self-clearing reset bit to clear */
+ genphy_soft_reset(phy_data->phydev);
+
+ /* Disable RGMII mode */
+ phy_write(phy_data->phydev, 0x18, 0x7007);
+ reg = phy_read(phy_data->phydev, 0x18);
+ phy_write(phy_data->phydev, 0x18, reg & ~0x0080);
+
+ /* Enable fiber register bank */
+ phy_write(phy_data->phydev, 0x1c, 0x7c00);
+ reg = phy_read(phy_data->phydev, 0x1c);
+ reg &= 0x03ff;
+ reg &= ~0x0001;
+ phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0001);
+
+ /* Power down SerDes */
+ reg = phy_read(phy_data->phydev, 0x00);
+ phy_write(phy_data->phydev, 0x00, reg | 0x00800);
+
+ /* Configure SGMII-to-Copper mode */
+ phy_write(phy_data->phydev, 0x1c, 0x7c00);
+ reg = phy_read(phy_data->phydev, 0x1c);
+ reg &= 0x03ff;
+ reg &= ~0x0006;
+ phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0004);
+
+ /* Power up SerDes */
+ reg = phy_read(phy_data->phydev, 0x00);
+ phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
+
+ /* Enable copper register bank */
+ phy_write(phy_data->phydev, 0x1c, 0x7c00);
+ reg = phy_read(phy_data->phydev, 0x1c);
+ reg &= 0x03ff;
+ reg &= ~0x0001;
+ phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg);
+
+ /* Power up SerDes */
+ reg = phy_read(phy_data->phydev, 0x00);
+ phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
+
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ supported);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ supported);
+ linkmode_copy(phy_data->phydev->supported, supported);
+ phy_support_asym_pause(phy_data->phydev);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "BelFuse PHY quirk in place\n");
+
+ return true;
+}
+
+static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata)
+{
+ if (xgbe_phy_belfuse_phy_quirks(pdata))
+ return;
+
+ if (xgbe_phy_finisar_phy_quirks(pdata))
+ return;
+}
+
+static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct phy_device *phydev;
+ int ret;
+
+ /* If we already have a PHY, just return */
+ if (phy_data->phydev)
+ return 0;
+
+ /* Clear the extra AN flag */
+ pdata->an_again = 0;
+
+ /* Check for the use of an external PHY */
+ if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE)
+ return 0;
+
+ /* For SFP, only use an external PHY if available */
+ if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) &&
+ !phy_data->sfp_phy_avail)
+ return 0;
+
+ /* Set the proper MDIO mode for the PHY */
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
+ phy_data->phydev_mode);
+ if (ret) {
+ netdev_err(pdata->netdev,
+ "mdio port/clause not compatible (%u/%u)\n",
+ phy_data->mdio_addr, phy_data->phydev_mode);
+ return ret;
+ }
+
+ /* Create and connect to the PHY device */
+ phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr,
+ (phy_data->phydev_mode == XGBE_MDIO_MODE_CL45));
+ if (IS_ERR(phydev)) {
+ netdev_err(pdata->netdev, "get_phy_device failed\n");
+ return -ENODEV;
+ }
+ netif_dbg(pdata, drv, pdata->netdev, "external PHY id is %#010x\n",
+ phydev->phy_id);
+
+ /*TODO: If c45, add request_module based on one of the MMD ids? */
+
+ ret = phy_device_register(phydev);
+ if (ret) {
+ netdev_err(pdata->netdev, "phy_device_register failed\n");
+ phy_device_free(phydev);
+ return ret;
+ }
+
+ ret = phy_attach_direct(pdata->netdev, phydev, phydev->dev_flags,
+ PHY_INTERFACE_MODE_SGMII);
+ if (ret) {
+ netdev_err(pdata->netdev, "phy_attach_direct failed\n");
+ phy_device_remove(phydev);
+ phy_device_free(phydev);
+ return ret;
+ }
+ phy_data->phydev = phydev;
+
+ xgbe_phy_external_phy_quirks(pdata);
+
+ linkmode_and(phydev->advertising, phydev->advertising,
+ lks->link_modes.advertising);
+
+ phy_start_aneg(phy_data->phydev);
+
+ return 0;
+}
+
+static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ if (!phy_data->sfp_changed)
+ return;
+
+ phy_data->sfp_phy_avail = 0;
+
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return;
+
+ /* Check access to the PHY by reading CTRL1 */
+ ret = xgbe_phy_i2c_mii_read(pdata, MII_BMCR);
+ if (ret < 0)
+ return;
+
+ /* Successfully accessed the PHY */
+ phy_data->sfp_phy_avail = 1;
+}
+
+static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
+{
+ u8 *sfp_extd = phy_data->sfp_eeprom.extd;
+
+ if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
+ return false;
+
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
+ return false;
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
+ return true;
+
+ return false;
+}
+
+static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
+{
+ u8 *sfp_extd = phy_data->sfp_eeprom.extd;
+
+ if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
+ return false;
+
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
+ return false;
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
+ return true;
+
+ return false;
+}
+
+static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
+{
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
+ return false;
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
+ return true;
+
+ return false;
+}
+
+static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ u8 *sfp_base;
+
+ sfp_base = sfp_eeprom->base;
+
+ if (sfp_base[XGBE_SFP_BASE_ID] != XGBE_SFP_ID_SFP)
+ return;
+
+ if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
+ return;
+
+ /* Update transceiver signals (eeprom extd/options) */
+ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
+ phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
+
+ /* Assume FIBER cable unless told otherwise */
+ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
+ phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
+ } else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
+ } else {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER;
+ }
+
+ /* Determine the type of SFP */
+ if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER &&
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LRM)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_LRM;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_ER)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_ER;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_SX)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_SX;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_LX)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_LX;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_CX)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
+
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ phy_data->sfp_speed = XGBE_SFP_SPEED_100_1000;
+ break;
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ phy_data->sfp_speed = XGBE_SFP_SPEED_1000;
+ break;
+ case XGBE_SFP_BASE_10000_SR:
+ case XGBE_SFP_BASE_10000_LR:
+ case XGBE_SFP_BASE_10000_LRM:
+ case XGBE_SFP_BASE_10000_ER:
+ case XGBE_SFP_BASE_10000_CR:
+ phy_data->sfp_speed = XGBE_SFP_SPEED_10000;
+ break;
+ default:
+ break;
+ }
+}
+
+static void xgbe_phy_sfp_eeprom_info(struct xgbe_prv_data *pdata,
+ struct xgbe_sfp_eeprom *sfp_eeprom)
+{
+ struct xgbe_sfp_ascii sfp_ascii;
+ char *sfp_data = (char *)&sfp_ascii;
+
+ netif_dbg(pdata, drv, pdata->netdev, "SFP detected:\n");
+ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+ XGBE_SFP_BASE_VENDOR_NAME_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_NAME_LEN] = '\0';
+ netif_dbg(pdata, drv, pdata->netdev, " vendor: %s\n",
+ sfp_data);
+
+ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
+ XGBE_SFP_BASE_VENDOR_PN_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_PN_LEN] = '\0';
+ netif_dbg(pdata, drv, pdata->netdev, " part number: %s\n",
+ sfp_data);
+
+ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_REV],
+ XGBE_SFP_BASE_VENDOR_REV_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_REV_LEN] = '\0';
+ netif_dbg(pdata, drv, pdata->netdev, " revision level: %s\n",
+ sfp_data);
+
+ memcpy(sfp_data, &sfp_eeprom->extd[XGBE_SFP_BASE_VENDOR_SN],
+ XGBE_SFP_BASE_VENDOR_SN_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_SN_LEN] = '\0';
+ netif_dbg(pdata, drv, pdata->netdev, " serial number: %s\n",
+ sfp_data);
+}
+
+static bool xgbe_phy_sfp_verify_eeprom(u8 cc_in, u8 *buf, unsigned int len)
+{
+ u8 cc;
+
+ for (cc = 0; len; buf++, len--)
+ cc += *buf;
+
+ return cc == cc_in;
+}
+
+static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_sfp_eeprom sfp_eeprom;
+ u8 eeprom_addr;
+ int ret;
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+ dev_err_once(pdata->dev, "%s: I2C error setting SFP MUX\n",
+ netdev_name(pdata->netdev));
+ return ret;
+ }
+
+ /* Read the SFP serial ID eeprom */
+ eeprom_addr = 0;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ &sfp_eeprom, sizeof(sfp_eeprom));
+ if (ret) {
+ dev_err_once(pdata->dev, "%s: I2C error reading SFP EEPROM\n",
+ netdev_name(pdata->netdev));
+ goto put;
+ }
+
+ /* Validate the contents read */
+ if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[XGBE_SFP_BASE_CC],
+ sfp_eeprom.base,
+ sizeof(sfp_eeprom.base) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[XGBE_SFP_EXTD_CC],
+ sfp_eeprom.extd,
+ sizeof(sfp_eeprom.extd) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ /* Check for an added or changed SFP */
+ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) {
+ phy_data->sfp_changed = 1;
+
+ if (netif_msg_drv(pdata))
+ xgbe_phy_sfp_eeprom_info(pdata, &sfp_eeprom);
+
+ memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
+
+ xgbe_phy_free_phy_device(pdata);
+ } else {
+ phy_data->sfp_changed = 0;
+ }
+
+put:
+ xgbe_phy_sfp_put_mux(pdata);
+
+ return ret;
+}
+
+static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ u8 gpio_reg, gpio_ports[2];
+ int ret;
+
+ /* Read the input port registers */
+ gpio_reg = 0;
+ ret = xgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address,
+ &gpio_reg, sizeof(gpio_reg),
+ gpio_ports, sizeof(gpio_ports));
+ if (ret) {
+ dev_err_once(pdata->dev, "%s: I2C error reading SFP GPIOs\n",
+ netdev_name(pdata->netdev));
+ return;
+ }
+
+ phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
+
+ phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
+}
+
+static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_free_phy_device(pdata);
+
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_phy_avail = 0;
+ memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom));
+}
+
+static void xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data)
+{
+ phy_data->sfp_rx_los = 0;
+ phy_data->sfp_tx_fault = 0;
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN;
+ phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN;
+ phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN;
+}
+
+static void xgbe_phy_sfp_detect(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Reset the SFP signals and info */
+ xgbe_phy_sfp_reset(phy_data);
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ /* Read the SFP signals and check for module presence */
+ xgbe_phy_sfp_signals(pdata);
+ if (phy_data->sfp_mod_absent) {
+ xgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ ret = xgbe_phy_sfp_read_eeprom(pdata);
+ if (ret) {
+ /* Treat any error as if there isn't an SFP plugged in */
+ xgbe_phy_sfp_reset(phy_data);
+ xgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ xgbe_phy_sfp_parse_eeprom(pdata);
+
+ xgbe_phy_sfp_external_phy(pdata);
+
+put:
+ xgbe_phy_sfp_phy_settings(pdata);
+
+ xgbe_phy_put_comm_ownership(pdata);
+}
+
+static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ u8 eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX];
+ struct xgbe_sfp_eeprom *sfp_eeprom;
+ unsigned int i, j, rem;
+ int ret;
+
+ rem = eeprom->len;
+
+ if (!eeprom->len) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((eeprom->offset + eeprom->len) > XGBE_SFP_EEPROM_MAX) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP) {
+ ret = -ENXIO;
+ goto done;
+ }
+
+ if (!netif_running(pdata->netdev)) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (phy_data->sfp_mod_absent) {
+ ret = -EIO;
+ goto done;
+ }
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret) {
+ ret = -EIO;
+ goto done;
+ }
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+ netdev_err(pdata->netdev, "I2C error setting SFP MUX\n");
+ ret = -EIO;
+ goto put_own;
+ }
+
+ /* Read the SFP serial ID eeprom */
+ eeprom_addr = 0;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ eeprom_data, XGBE_SFP_EEPROM_BASE_LEN);
+ if (ret) {
+ netdev_err(pdata->netdev,
+ "I2C error reading SFP EEPROM\n");
+ ret = -EIO;
+ goto put_mux;
+ }
+
+ sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data;
+
+ if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) {
+ /* Read the SFP diagnostic eeprom */
+ eeprom_addr = 0;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ eeprom_data + XGBE_SFP_EEPROM_BASE_LEN,
+ XGBE_SFP_EEPROM_DIAG_LEN);
+ if (ret) {
+ netdev_err(pdata->netdev,
+ "I2C error reading SFP DIAGS\n");
+ ret = -EIO;
+ goto put_mux;
+ }
+ }
+
+ for (i = 0, j = eeprom->offset; i < eeprom->len; i++, j++) {
+ if ((j >= XGBE_SFP_EEPROM_BASE_LEN) &&
+ !XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom))
+ break;
+
+ data[i] = eeprom_data[j];
+ rem--;
+ }
+
+put_mux:
+ xgbe_phy_sfp_put_mux(pdata);
+
+put_own:
+ xgbe_phy_put_comm_ownership(pdata);
+
+done:
+ eeprom->len -= rem;
+
+ return ret;
+}
+
+static int xgbe_phy_module_info(struct xgbe_prv_data *pdata,
+ struct ethtool_modinfo *modinfo)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
+ return -ENXIO;
+
+ if (!netif_running(pdata->netdev))
+ return -EIO;
+
+ if (phy_data->sfp_mod_absent)
+ return -EIO;
+
+ if (XGBE_SFP_DIAGS_SUPPORTED(&phy_data->sfp_eeprom)) {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ }
+
+ return 0;
+}
+
+static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 fc;
+
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (!phy_data->phydev)
+ return;
+
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_data->phydev->advertising);
+
+ if (phy_data->phydev->pause) {
+ XGBE_SET_LP_ADV(lks, Pause);
+ rmt_adv |= LPA_PAUSE_CAP;
+ }
+ if (phy_data->phydev->asym_pause) {
+ XGBE_SET_LP_ADV(lks, Asym_Pause);
+ rmt_adv |= LPA_PAUSE_ASYM;
+ }
+
+ fc = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (fc & FLOW_CTRL_TX)
+ pdata->phy.tx_pause = 1;
+ if (fc & FLOW_CTRL_RX)
+ pdata->phy.rx_pause = 1;
+}
+
+static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ enum xgbe_mode mode;
+
+ XGBE_SET_LP_ADV(lks, Autoneg);
+ XGBE_SET_LP_ADV(lks, TP);
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+ xgbe_phy_phydev_flowctrl(pdata);
+
+ switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) {
+ case XGBE_SGMII_AN_LINK_SPEED_100:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+ XGBE_SET_LP_ADV(lks, 100baseT_Full);
+ mode = XGBE_MODE_SGMII_100;
+ } else {
+ /* Half-duplex not supported */
+ XGBE_SET_LP_ADV(lks, 100baseT_Half);
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
+ case XGBE_SGMII_AN_LINK_SPEED_1000:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+ XGBE_SET_LP_ADV(lks, 1000baseT_Full);
+ mode = XGBE_MODE_SGMII_1000;
+ } else {
+ /* Half-duplex not supported */
+ XGBE_SET_LP_ADV(lks, 1000baseT_Half);
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
+ default:
+ mode = XGBE_MODE_UNKNOWN;
+ }
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ XGBE_SET_LP_ADV(lks, Autoneg);
+ XGBE_SET_LP_ADV(lks, FIBRE);
+
+ /* Compare Advertisement and Link Partner register */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_LP_ABILITY);
+ if (lp_reg & 0x100)
+ XGBE_SET_LP_ADV(lks, Pause);
+ if (lp_reg & 0x80)
+ XGBE_SET_LP_ADV(lks, Asym_Pause);
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x100) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x80) {
+ if (ad_reg & 0x100)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x100)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ if (lp_reg & 0x20)
+ XGBE_SET_LP_ADV(lks, 1000baseX_Full);
+
+ /* Half duplex is not supported */
+ ad_reg &= lp_reg;
+ mode = (ad_reg & 0x20) ? XGBE_MODE_X : XGBE_MODE_UNKNOWN;
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ XGBE_SET_LP_ADV(lks, Autoneg);
+ XGBE_SET_LP_ADV(lks, Backplane);
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+ xgbe_phy_phydev_flowctrl(pdata);
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ XGBE_SET_LP_ADV(lks, 10000baseKR_Full);
+ if (lp_reg & 0x20)
+ XGBE_SET_LP_ADV(lks, 1000baseKX_Full);
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ mode = XGBE_MODE_KR;
+ break;
+ default:
+ mode = XGBE_MODE_SFI;
+ break;
+ }
+ } else if (ad_reg & 0x20) {
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ mode = XGBE_MODE_KX_1000;
+ break;
+ case XGBE_PORT_MODE_1000BASE_X:
+ mode = XGBE_MODE_X;
+ break;
+ case XGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
+ mode = XGBE_MODE_SGMII_100;
+ else
+ mode = XGBE_MODE_SGMII_1000;
+ break;
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ default:
+ mode = XGBE_MODE_X;
+ break;
+ }
+ break;
+ default:
+ if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
+ mode = XGBE_MODE_SGMII_100;
+ else
+ mode = XGBE_MODE_SGMII_1000;
+ break;
+ }
+ } else {
+ mode = XGBE_MODE_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ XGBE_SET_LP_ADV(lks, 10000baseR_FEC);
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ XGBE_SET_LP_ADV(lks, Autoneg);
+ XGBE_SET_LP_ADV(lks, Backplane);
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ XGBE_SET_LP_ADV(lks, Pause);
+ if (lp_reg & 0x800)
+ XGBE_SET_LP_ADV(lks, Asym_Pause);
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ XGBE_SET_LP_ADV(lks, 10000baseKR_Full);
+ if (lp_reg & 0x20)
+ XGBE_SET_LP_ADV(lks, 1000baseKX_Full);
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80)
+ mode = XGBE_MODE_KR;
+ else if (ad_reg & 0x20)
+ mode = XGBE_MODE_KX_1000;
+ else
+ mode = XGBE_MODE_UNKNOWN;
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ XGBE_SET_LP_ADV(lks, 10000baseR_FEC);
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
+{
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ return xgbe_phy_an73_outcome(pdata);
+ case XGBE_AN_MODE_CL73_REDRV:
+ return xgbe_phy_an73_redrv_outcome(pdata);
+ case XGBE_AN_MODE_CL37:
+ return xgbe_phy_an37_outcome(pdata);
+ case XGBE_AN_MODE_CL37_SGMII:
+ return xgbe_phy_an37_sgmii_outcome(pdata);
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
+ struct ethtool_link_ksettings *dlks)
+{
+ struct ethtool_link_ksettings *slks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ XGBE_LM_COPY(dlks, advertising, slks, advertising);
+
+ /* Without a re-driver, just return current advertising */
+ if (!phy_data->redrv)
+ return;
+
+ /* With the KR re-driver we need to advertise a single speed */
+ XGBE_CLR_ADV(dlks, 1000baseKX_Full);
+ XGBE_CLR_ADV(dlks, 10000baseKR_Full);
+
+ /* Advertise FEC support is present */
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ XGBE_SET_ADV(dlks, 10000baseR_FEC);
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ XGBE_SET_ADV(dlks, 1000baseKX_Full);
+ break;
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_NBASE_T:
+ XGBE_SET_ADV(dlks, 1000baseKX_Full);
+ break;
+ case XGBE_PORT_MODE_10GBASE_T:
+ if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_10000))
+ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ else
+ XGBE_SET_ADV(dlks, 1000baseKX_Full);
+ break;
+ case XGBE_PORT_MODE_10GBASE_R:
+ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ break;
+ case XGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ XGBE_SET_ADV(dlks, 1000baseKX_Full);
+ break;
+ default:
+ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ break;
+ }
+ break;
+ default:
+ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ break;
+ }
+}
+
+static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_find_phy_device(pdata);
+ if (ret)
+ return ret;
+
+ if (!phy_data->phydev)
+ return 0;
+
+ phy_data->phydev->autoneg = pdata->phy.autoneg;
+ linkmode_and(phy_data->phydev->advertising,
+ phy_data->phydev->supported,
+ lks->link_modes.advertising);
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ phy_data->phydev->speed = pdata->phy.speed;
+ phy_data->phydev->duplex = pdata->phy.duplex;
+ }
+
+ ret = phy_start_aneg(phy_data->phydev);
+
+ return ret;
+}
+
+static enum xgbe_an_mode xgbe_phy_an_sfp_mode(struct xgbe_phy_data *phy_data)
+{
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ return XGBE_AN_MODE_CL37_SGMII;
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ return XGBE_AN_MODE_CL37;
+ default:
+ return XGBE_AN_MODE_NONE;
+ }
+}
+
+static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* A KR re-driver will always require CL73 AN */
+ if (phy_data->redrv)
+ return XGBE_AN_MODE_CL73_REDRV;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return XGBE_AN_MODE_CL73;
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return XGBE_AN_MODE_NONE;
+ case XGBE_PORT_MODE_1000BASE_T:
+ return XGBE_AN_MODE_CL37_SGMII;
+ case XGBE_PORT_MODE_1000BASE_X:
+ return XGBE_AN_MODE_CL37;
+ case XGBE_PORT_MODE_NBASE_T:
+ return XGBE_AN_MODE_CL37_SGMII;
+ case XGBE_PORT_MODE_10GBASE_T:
+ return XGBE_AN_MODE_CL73;
+ case XGBE_PORT_MODE_10GBASE_R:
+ return XGBE_AN_MODE_NONE;
+ case XGBE_PORT_MODE_SFP:
+ return xgbe_phy_an_sfp_mode(phy_data);
+ default:
+ return XGBE_AN_MODE_NONE;
+ }
+}
+
+static int xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata,
+ enum xgbe_phy_redrv_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ u16 redrv_reg, redrv_val;
+
+ redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+ redrv_val = (u16)mode;
+
+ return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val);
+}
+
+static int xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata,
+ enum xgbe_phy_redrv_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int redrv_reg;
+ int ret;
+
+ /* Calculate the register to write */
+ redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+
+ ret = xgbe_phy_redrv_write(pdata, redrv_reg, mode);
+
+ return ret;
+}
+
+static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_phy_redrv_mode mode;
+ int ret;
+
+ if (!phy_data->redrv)
+ return;
+
+ mode = XGBE_PHY_REDRV_MODE_CX;
+ if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) &&
+ (phy_data->sfp_base != XGBE_SFP_BASE_1000_CX) &&
+ (phy_data->sfp_base != XGBE_SFP_BASE_10000_CR))
+ mode = XGBE_PHY_REDRV_MODE_SR;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ if (phy_data->redrv_if)
+ xgbe_phy_set_redrv_mode_i2c(pdata, mode);
+ else
+ xgbe_phy_set_redrv_mode_mdio(pdata, mode);
+
+ xgbe_phy_put_comm_ownership(pdata);
+}
+
+static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PCS, MDIO_PCS_DIGITAL_STAT,
+ XGBE_PCS_PSEQ_STATE_MASK);
+ if (reg == XGBE_PCS_PSEQ_STATE_POWER_GOOD) {
+ /* Mailbox command timed out, reset of RX block is required.
+ * This can be done by asseting the reset bit and wait for
+ * its compeletion.
+ */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
+ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_ON);
+ ndelay(20);
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
+ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF);
+ usleep_range(40, 50);
+ netif_err(pdata, link, pdata->netdev, "firmware mailbox reset performed\n");
+ }
+}
+
+static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)
+{
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0,
+ XGBE_PMA_PLL_CTRL_MASK,
+ enable ? XGBE_PMA_PLL_CTRL_ENABLE
+ : XGBE_PMA_PLL_CTRL_DISABLE);
+
+ /* Wait for command to complete */
+ usleep_range(100, 200);
+}
+
+static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ unsigned int cmd, unsigned int sub_cmd)
+{
+ unsigned int s0 = 0;
+ unsigned int wait;
+
+ /* Disable PLL re-initialization during FW command processing */
+ xgbe_phy_pll_ctrl(pdata, false);
+
+ /* Log if a previous command did not complete */
+ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "firmware mailbox not ready for command\n");
+ xgbe_phy_rx_reset(pdata);
+ }
+
+ /* Construct the command */
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, sub_cmd);
+
+ /* Issue the command */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ /* Wait for command to complete */
+ wait = XGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ goto reenable_pll;
+
+ usleep_range(1000, 2000);
+ }
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "firmware mailbox command did not complete\n");
+
+ /* Reset on error */
+ xgbe_phy_rx_reset(pdata);
+
+reenable_pll:
+ /* Enable PLL re-initialization */
+ xgbe_phy_pll_ctrl(pdata, true);
+}
+
+static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
+{
+ /* Receiver Reset Cycle */
+ xgbe_phy_perform_ratechange(pdata, 5, 0);
+
+ netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");
+}
+
+static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Power off */
+ xgbe_phy_perform_ratechange(pdata, 0, 0);
+
+ phy_data->cur_mode = XGBE_MODE_UNKNOWN;
+
+ netif_dbg(pdata, link, pdata->netdev, "phy powered off\n");
+}
+
+static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 10G/SFI */
+ if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
+ xgbe_phy_perform_ratechange(pdata, 3, 0);
+ } else {
+ if (phy_data->sfp_cable_len <= 1)
+ xgbe_phy_perform_ratechange(pdata, 3, 1);
+ else if (phy_data->sfp_cable_len <= 3)
+ xgbe_phy_perform_ratechange(pdata, 3, 2);
+ else
+ xgbe_phy_perform_ratechange(pdata, 3, 3);
+ }
+
+ phy_data->cur_mode = XGBE_MODE_SFI;
+
+ netif_dbg(pdata, link, pdata->netdev, "10GbE SFI mode set\n");
+}
+
+static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 1G/X */
+ xgbe_phy_perform_ratechange(pdata, 1, 3);
+
+ phy_data->cur_mode = XGBE_MODE_X;
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE X mode set\n");
+}
+
+static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 1G/SGMII */
+ xgbe_phy_perform_ratechange(pdata, 1, 2);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_1000;
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE SGMII mode set\n");
+}
+
+static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 100M/SGMII */
+ xgbe_phy_perform_ratechange(pdata, 1, 1);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_100;
+
+ netif_dbg(pdata, link, pdata->netdev, "100MbE SGMII mode set\n");
+}
+
+static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 10G/KR */
+ xgbe_phy_perform_ratechange(pdata, 4, 0);
+
+ phy_data->cur_mode = XGBE_MODE_KR;
+
+ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
+}
+
+static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 2.5G/KX */
+ xgbe_phy_perform_ratechange(pdata, 2, 0);
+
+ phy_data->cur_mode = XGBE_MODE_KX_2500;
+
+ netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
+}
+
+static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 1G/KX */
+ xgbe_phy_perform_ratechange(pdata, 1, 3);
+
+ phy_data->cur_mode = XGBE_MODE_KX_1000;
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
+}
+
+static enum xgbe_mode xgbe_phy_cur_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ return phy_data->cur_mode;
+}
+
+static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* No switching if not 10GBase-T */
+ if (phy_data->port_mode != XGBE_PORT_MODE_10GBASE_T)
+ return xgbe_phy_cur_mode(pdata);
+
+ switch (xgbe_phy_cur_mode(pdata)) {
+ case XGBE_MODE_SGMII_100:
+ case XGBE_MODE_SGMII_1000:
+ return XGBE_MODE_KR;
+ case XGBE_MODE_KR:
+ default:
+ return XGBE_MODE_SGMII_1000;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_switch_bp_2500_mode(struct xgbe_prv_data *pdata)
+{
+ return XGBE_MODE_KX_2500;
+}
+
+static enum xgbe_mode xgbe_phy_switch_bp_mode(struct xgbe_prv_data *pdata)
+{
+ /* If we are in KR switch to KX, and vice-versa */
+ switch (xgbe_phy_cur_mode(pdata)) {
+ case XGBE_MODE_KX_1000:
+ return XGBE_MODE_KR;
+ case XGBE_MODE_KR:
+ default:
+ return XGBE_MODE_KX_1000;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ return xgbe_phy_switch_bp_mode(pdata);
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return xgbe_phy_switch_bp_2500_mode(pdata);
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return xgbe_phy_switch_baset_mode(pdata);
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ case XGBE_PORT_MODE_SFP:
+ /* No switching, so just return current mode */
+ return xgbe_phy_cur_mode(pdata);
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_basex_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return XGBE_MODE_X;
+ case SPEED_10000:
+ return XGBE_MODE_KR;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return XGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ return XGBE_MODE_SGMII_1000;
+ case SPEED_2500:
+ return XGBE_MODE_KX_2500;
+ case SPEED_10000:
+ return XGBE_MODE_KR;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return XGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T)
+ return XGBE_MODE_SGMII_1000;
+ else
+ return XGBE_MODE_X;
+ case SPEED_10000:
+ case SPEED_UNKNOWN:
+ return XGBE_MODE_SFI;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_bp_2500_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_2500:
+ return XGBE_MODE_KX_2500;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_bp_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return XGBE_MODE_KX_1000;
+ case SPEED_10000:
+ return XGBE_MODE_KR;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata,
+ int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ return xgbe_phy_get_bp_mode(speed);
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return xgbe_phy_get_bp_2500_mode(speed);
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return xgbe_phy_get_baset_mode(phy_data, speed);
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ return xgbe_phy_get_basex_mode(phy_data, speed);
+ case XGBE_PORT_MODE_SFP:
+ return xgbe_phy_get_sfp_mode(phy_data, speed);
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ xgbe_phy_kx_1000_mode(pdata);
+ break;
+ case XGBE_MODE_KX_2500:
+ xgbe_phy_kx_2500_mode(pdata);
+ break;
+ case XGBE_MODE_KR:
+ xgbe_phy_kr_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_100:
+ xgbe_phy_sgmii_100_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_1000:
+ xgbe_phy_sgmii_1000_mode(pdata);
+ break;
+ case XGBE_MODE_X:
+ xgbe_phy_x_mode(pdata);
+ break;
+ case XGBE_MODE_SFI:
+ xgbe_phy_sfi_mode(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode, bool advert)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ return advert;
+ } else {
+ enum xgbe_mode cur_mode;
+
+ cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed);
+ if (cur_mode == mode)
+ return true;
+ }
+
+ return false;
+}
+
+static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+
+ switch (mode) {
+ case XGBE_MODE_X:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 1000baseX_Full));
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10000baseKR_Full));
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+
+ switch (mode) {
+ case XGBE_MODE_SGMII_100:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 100baseT_Full));
+ case XGBE_MODE_SGMII_1000:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 1000baseT_Full));
+ case XGBE_MODE_KX_2500:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 2500baseT_Full));
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10000baseT_Full));
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (mode) {
+ case XGBE_MODE_X:
+ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 1000baseX_Full));
+ case XGBE_MODE_SGMII_100:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 100baseT_Full));
+ case XGBE_MODE_SGMII_1000:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 1000baseT_Full));
+ case XGBE_MODE_SFI:
+ if (phy_data->sfp_mod_absent)
+ return true;
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10000baseSR_Full) ||
+ XGBE_ADV(lks, 10000baseLR_Full) ||
+ XGBE_ADV(lks, 10000baseLRM_Full) ||
+ XGBE_ADV(lks, 10000baseER_Full) ||
+ XGBE_ADV(lks, 10000baseCR_Full));
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+
+ switch (mode) {
+ case XGBE_MODE_KX_2500:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 2500baseX_Full));
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_use_bp_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 1000baseKX_Full));
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10000baseKR_Full));
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ return xgbe_phy_use_bp_mode(pdata, mode);
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return xgbe_phy_use_bp_2500_mode(pdata, mode);
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return xgbe_phy_use_baset_mode(pdata, mode);
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ return xgbe_phy_use_basex_mode(pdata, mode);
+ case XGBE_PORT_MODE_SFP:
+ return xgbe_phy_use_sfp_mode(pdata, mode);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return (phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X);
+ case SPEED_10000:
+ return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ case SPEED_1000:
+ return true;
+ case SPEED_2500:
+ return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T);
+ case SPEED_10000:
+ return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
+ case SPEED_1000:
+ return ((phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000) ||
+ (phy_data->sfp_speed == XGBE_SFP_SPEED_1000));
+ case SPEED_10000:
+ return (phy_data->sfp_speed == XGBE_SFP_SPEED_10000);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed_bp_2500_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_2500:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed_bp_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ case SPEED_10000:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ return xgbe_phy_valid_speed_bp_mode(speed);
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return xgbe_phy_valid_speed_bp_2500_mode(speed);
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return xgbe_phy_valid_speed_baset_mode(phy_data, speed);
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ return xgbe_phy_valid_speed_basex_mode(phy_data, speed);
+ case XGBE_PORT_MODE_SFP:
+ return xgbe_phy_valid_speed_sfp_mode(phy_data, speed);
+ default:
+ return false;
+ }
+}
+
+static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+ int ret;
+
+ *an_restart = 0;
+
+ if (phy_data->port_mode == XGBE_PORT_MODE_SFP) {
+ /* Check SFP signals */
+ xgbe_phy_sfp_detect(pdata);
+
+ if (phy_data->sfp_changed) {
+ *an_restart = 1;
+ return 0;
+ }
+
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ return 0;
+ }
+
+ if (phy_data->phydev) {
+ /* Check external PHY */
+ ret = phy_read_status(phy_data->phydev);
+ if (ret < 0)
+ return 0;
+
+ if ((pdata->phy.autoneg == AUTONEG_ENABLE) &&
+ !phy_aneg_done(phy_data->phydev))
+ return 0;
+
+ if (!phy_data->phydev->link)
+ return 0;
+ }
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS)
+ return 1;
+
+ if (pdata->phy.autoneg == AUTONEG_ENABLE &&
+ phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) {
+ if (!test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+ netif_carrier_off(pdata->netdev);
+ *an_restart = 1;
+ }
+ }
+
+ /* No link, attempt a receiver reset cycle */
+ if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
+ phy_data->rrc_count = 0;
+ xgbe_phy_rrc(pdata);
+ }
+
+ return 0;
+}
+
+static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_ADDR);
+
+ phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_MASK);
+
+ phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_RX_LOS);
+ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_TX_FAULT);
+ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_MOD_ABS);
+ phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_RATE_SELECT);
+
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "SFP: gpio_address=%#x\n",
+ phy_data->sfp_gpio_address);
+ dev_dbg(pdata->dev, "SFP: gpio_mask=%#x\n",
+ phy_data->sfp_gpio_mask);
+ dev_dbg(pdata->dev, "SFP: gpio_rx_los=%u\n",
+ phy_data->sfp_gpio_rx_los);
+ dev_dbg(pdata->dev, "SFP: gpio_tx_fault=%u\n",
+ phy_data->sfp_gpio_tx_fault);
+ dev_dbg(pdata->dev, "SFP: gpio_mod_absent=%u\n",
+ phy_data->sfp_gpio_mod_absent);
+ dev_dbg(pdata->dev, "SFP: gpio_rate_select=%u\n",
+ phy_data->sfp_gpio_rate_select);
+ }
+}
+
+static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int mux_addr_hi, mux_addr_lo;
+
+ mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI);
+ mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO);
+ if (mux_addr_lo == XGBE_SFP_DIRECT)
+ return;
+
+ phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545;
+ phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
+ phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4,
+ MUX_CHAN);
+
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "SFP: mux_address=%#x\n",
+ phy_data->sfp_mux_address);
+ dev_dbg(pdata->dev, "SFP: mux_channel=%u\n",
+ phy_data->sfp_mux_channel);
+ }
+}
+
+static void xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata)
+{
+ xgbe_phy_sfp_comm_setup(pdata);
+ xgbe_phy_sfp_gpio_setup(pdata);
+}
+
+static int xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ret;
+
+ ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio);
+ if (ret)
+ return ret;
+
+ ret = pdata->hw_if.clr_gpio(pdata, phy_data->mdio_reset_gpio);
+
+ return ret;
+}
+
+static int xgbe_phy_i2c_mdio_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ u8 gpio_reg, gpio_ports[2], gpio_data[3];
+ int ret;
+
+ /* Read the output port registers */
+ gpio_reg = 2;
+ ret = xgbe_phy_i2c_read(pdata, phy_data->mdio_reset_addr,
+ &gpio_reg, sizeof(gpio_reg),
+ gpio_ports, sizeof(gpio_ports));
+ if (ret)
+ return ret;
+
+ /* Prepare to write the GPIO data */
+ gpio_data[0] = 2;
+ gpio_data[1] = gpio_ports[0];
+ gpio_data[2] = gpio_ports[1];
+
+ /* Set the GPIO pin */
+ if (phy_data->mdio_reset_gpio < 8)
+ gpio_data[1] |= (1 << (phy_data->mdio_reset_gpio % 8));
+ else
+ gpio_data[2] |= (1 << (phy_data->mdio_reset_gpio % 8));
+
+ /* Write the output port registers */
+ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr,
+ gpio_data, sizeof(gpio_data));
+ if (ret)
+ return ret;
+
+ /* Clear the GPIO pin */
+ if (phy_data->mdio_reset_gpio < 8)
+ gpio_data[1] &= ~(1 << (phy_data->mdio_reset_gpio % 8));
+ else
+ gpio_data[2] &= ~(1 << (phy_data->mdio_reset_gpio % 8));
+
+ /* Write the output port registers */
+ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr,
+ gpio_data, sizeof(gpio_data));
+
+ return ret;
+}
+
+static int xgbe_phy_mdio_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
+ return 0;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO)
+ ret = xgbe_phy_i2c_mdio_reset(pdata);
+ else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO)
+ ret = xgbe_phy_int_mdio_reset(pdata);
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static bool xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data)
+{
+ if (!phy_data->redrv)
+ return false;
+
+ if (phy_data->redrv_if >= XGBE_PHY_REDRV_IF_MAX)
+ return true;
+
+ switch (phy_data->redrv_model) {
+ case XGBE_PHY_REDRV_MODEL_4223:
+ if (phy_data->redrv_lane > 3)
+ return true;
+ break;
+ case XGBE_PHY_REDRV_MODEL_4227:
+ if (phy_data->redrv_lane > 1)
+ return true;
+ break;
+ default:
+ return true;
+ }
+
+ return false;
+}
+
+static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
+ return 0;
+
+ phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET);
+ switch (phy_data->mdio_reset) {
+ case XGBE_MDIO_RESET_NONE:
+ case XGBE_MDIO_RESET_I2C_GPIO:
+ case XGBE_MDIO_RESET_INT_GPIO:
+ break;
+ default:
+ dev_err(pdata->dev, "unsupported MDIO reset (%#x)\n",
+ phy_data->mdio_reset);
+ return -EINVAL;
+ }
+
+ if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) {
+ phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ MDIO_RESET_I2C_ADDR);
+ phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ MDIO_RESET_I2C_GPIO);
+ } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) {
+ phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ MDIO_RESET_INT_GPIO);
+ }
+
+ return 0;
+}
+
+static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500)
+ return false;
+ break;
+ case XGBE_PORT_MODE_1000BASE_T:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000))
+ return false;
+ break;
+ case XGBE_PORT_MODE_1000BASE_X:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ return false;
+ break;
+ case XGBE_PORT_MODE_NBASE_T:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500))
+ return false;
+ break;
+ case XGBE_PORT_MODE_10GBASE_T:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case XGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+ return false;
+ break;
+ case XGBE_PORT_MODE_SFP:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE)
+ return false;
+ break;
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ case XGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->conn_type == XGBE_CONN_TYPE_MDIO)
+ return false;
+ break;
+ case XGBE_PORT_MODE_SFP:
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
+{
+ if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS))
+ return false;
+ if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE))
+ return false;
+
+ return true;
+}
+
+static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->debugfs_an_cdr_workaround)
+ return;
+
+ if (!phy_data->phy_cdr_notrack)
+ return;
+
+ usleep_range(phy_data->phy_cdr_delay,
+ phy_data->phy_cdr_delay + 500);
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ XGBE_PMA_CDR_TRACK_EN_MASK,
+ XGBE_PMA_CDR_TRACK_EN_ON);
+
+ phy_data->phy_cdr_notrack = 0;
+}
+
+static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->debugfs_an_cdr_workaround)
+ return;
+
+ if (phy_data->phy_cdr_notrack)
+ return;
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ XGBE_PMA_CDR_TRACK_EN_MASK,
+ XGBE_PMA_CDR_TRACK_EN_OFF);
+
+ xgbe_phy_rrc(pdata);
+
+ phy_data->phy_cdr_notrack = 1;
+}
+
+static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->debugfs_an_cdr_track_early)
+ xgbe_phy_cdr_track(pdata);
+}
+
+static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
+{
+ if (pdata->debugfs_an_cdr_track_early)
+ xgbe_phy_cdr_track(pdata);
+}
+
+static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != XGBE_MODE_KR)
+ break;
+
+ xgbe_phy_cdr_track(pdata);
+
+ switch (pdata->an_result) {
+ case XGBE_AN_READY:
+ case XGBE_AN_COMPLETE:
+ break;
+ default:
+ if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
+ phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
+ else
+ phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != XGBE_MODE_KR)
+ break;
+
+ xgbe_phy_cdr_notrack(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* If we have an external PHY, free it */
+ xgbe_phy_free_phy_device(pdata);
+
+ /* Reset SFP data */
+ xgbe_phy_sfp_reset(phy_data);
+ xgbe_phy_sfp_mod_absent(pdata);
+
+ /* Reset CDR support */
+ xgbe_phy_cdr_track(pdata);
+
+ /* Power off the PHY */
+ xgbe_phy_power_off(pdata);
+
+ /* Stop the I2C controller */
+ pdata->i2c_if.i2c_stop(pdata);
+}
+
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Start the I2C controller */
+ ret = pdata->i2c_if.i2c_start(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the proper MDIO mode for the re-driver */
+ if (phy_data->redrv && !phy_data->redrv_if) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
+ XGBE_MDIO_MODE_CL22);
+ if (ret) {
+ netdev_err(pdata->netdev,
+ "redriver mdio port not compatible (%u)\n",
+ phy_data->redrv_addr);
+ return ret;
+ }
+ }
+
+ /* Start in highest supported mode */
+ xgbe_phy_set_mode(pdata, phy_data->start_mode);
+
+ /* Reset CDR support */
+ xgbe_phy_cdr_track(pdata);
+
+ /* After starting the I2C controller, we can check for an SFP */
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_SFP:
+ xgbe_phy_sfp_detect(pdata);
+ break;
+ default:
+ break;
+ }
+
+ /* If we have an external PHY, start it */
+ ret = xgbe_phy_find_phy_device(pdata);
+ if (ret)
+ goto err_i2c;
+
+ return 0;
+
+err_i2c:
+ pdata->i2c_if.i2c_stop(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode cur_mode;
+ int ret;
+
+ /* Reset by power cycling the PHY */
+ cur_mode = phy_data->cur_mode;
+ xgbe_phy_power_off(pdata);
+ xgbe_phy_set_mode(pdata, cur_mode);
+
+ if (!phy_data->phydev)
+ return 0;
+
+ /* Reset the external PHY */
+ ret = xgbe_phy_mdio_reset(pdata);
+ if (ret)
+ return ret;
+
+ return phy_init_hw(phy_data->phydev);
+}
+
+static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Unregister for driving external PHYs */
+ mdiobus_unregister(phy_data->mii);
+}
+
+static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data;
+ struct mii_bus *mii;
+ int ret;
+
+ /* Check if enabled */
+ if (!xgbe_phy_port_enabled(pdata)) {
+ dev_info(pdata->dev, "device is not enabled\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the I2C controller */
+ ret = pdata->i2c_if.i2c_init(pdata);
+ if (ret)
+ return ret;
+
+ phy_data = devm_kzalloc(pdata->dev, sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ pdata->phy_data = phy_data;
+
+ phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE);
+ phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID);
+ phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS);
+ phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE);
+ phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR);
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "port mode=%u\n", phy_data->port_mode);
+ dev_dbg(pdata->dev, "port id=%u\n", phy_data->port_id);
+ dev_dbg(pdata->dev, "port speeds=%#x\n", phy_data->port_speeds);
+ dev_dbg(pdata->dev, "conn type=%u\n", phy_data->conn_type);
+ dev_dbg(pdata->dev, "mdio addr=%u\n", phy_data->mdio_addr);
+ }
+
+ phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT);
+ phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF);
+ phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR);
+ phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE);
+ phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL);
+ if (phy_data->redrv && netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "redrv present\n");
+ dev_dbg(pdata->dev, "redrv i/f=%u\n", phy_data->redrv_if);
+ dev_dbg(pdata->dev, "redrv addr=%#x\n", phy_data->redrv_addr);
+ dev_dbg(pdata->dev, "redrv lane=%u\n", phy_data->redrv_lane);
+ dev_dbg(pdata->dev, "redrv model=%u\n", phy_data->redrv_model);
+ }
+
+ /* Validate the connection requested */
+ if (xgbe_phy_conn_type_mismatch(pdata)) {
+ dev_err(pdata->dev, "phy mode/connection mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->conn_type);
+ return -EINVAL;
+ }
+
+ /* Validate the mode requested */
+ if (xgbe_phy_port_mode_mismatch(pdata)) {
+ dev_err(pdata->dev, "phy mode/speed mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->port_speeds);
+ return -EINVAL;
+ }
+
+ /* Check for and validate MDIO reset support */
+ ret = xgbe_phy_mdio_reset_setup(pdata);
+ if (ret)
+ return ret;
+
+ /* Validate the re-driver information */
+ if (xgbe_phy_redrv_error(phy_data)) {
+ dev_err(pdata->dev, "phy re-driver settings error\n");
+ return -EINVAL;
+ }
+ pdata->kr_redrv = phy_data->redrv;
+
+ /* Indicate current mode is unknown */
+ phy_data->cur_mode = XGBE_MODE_UNKNOWN;
+
+ /* Initialize supported features */
+ XGBE_ZERO_SUP(lks);
+
+ switch (phy_data->port_mode) {
+ /* Backplane support */
+ case XGBE_PORT_MODE_BACKPLANE:
+ XGBE_SET_SUP(lks, Autoneg);
+ fallthrough;
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, Backplane);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ XGBE_SET_SUP(lks, 1000baseKX_Full);
+ phy_data->start_mode = XGBE_MODE_KX_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+ XGBE_SET_SUP(lks, 10000baseKR_Full);
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ XGBE_SET_SUP(lks, 10000baseR_FEC);
+ phy_data->start_mode = XGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, Backplane);
+ XGBE_SET_SUP(lks, 2500baseX_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+
+ /* MDIO 1GBase-T support */
+ case XGBE_PORT_MODE_1000BASE_T:
+ XGBE_SET_SUP(lks, Autoneg);
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+ XGBE_SET_SUP(lks, 100baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ XGBE_SET_SUP(lks, 1000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO Base-X support */
+ case XGBE_PORT_MODE_1000BASE_X:
+ XGBE_SET_SUP(lks, Autoneg);
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, FIBRE);
+ XGBE_SET_SUP(lks, 1000baseX_Full);
+ phy_data->start_mode = XGBE_MODE_X;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO NBase-T support */
+ case XGBE_PORT_MODE_NBASE_T:
+ XGBE_SET_SUP(lks, Autoneg);
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+ XGBE_SET_SUP(lks, 100baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ XGBE_SET_SUP(lks, 1000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) {
+ XGBE_SET_SUP(lks, 2500baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL45;
+ break;
+
+ /* 10GBase-T support */
+ case XGBE_PORT_MODE_10GBASE_T:
+ XGBE_SET_SUP(lks, Autoneg);
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+ XGBE_SET_SUP(lks, 100baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ XGBE_SET_SUP(lks, 1000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+ XGBE_SET_SUP(lks, 10000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL45;
+ break;
+
+ /* 10GBase-R support */
+ case XGBE_PORT_MODE_10GBASE_R:
+ XGBE_SET_SUP(lks, Autoneg);
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, FIBRE);
+ XGBE_SET_SUP(lks, 10000baseSR_Full);
+ XGBE_SET_SUP(lks, 10000baseLR_Full);
+ XGBE_SET_SUP(lks, 10000baseLRM_Full);
+ XGBE_SET_SUP(lks, 10000baseER_Full);
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ XGBE_SET_SUP(lks, 10000baseR_FEC);
+ phy_data->start_mode = XGBE_MODE_SFI;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+
+ /* SFP support */
+ case XGBE_PORT_MODE_SFP:
+ XGBE_SET_SUP(lks, Autoneg);
+ XGBE_SET_SUP(lks, Pause);
+ XGBE_SET_SUP(lks, Asym_Pause);
+ XGBE_SET_SUP(lks, TP);
+ XGBE_SET_SUP(lks, FIBRE);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+ phy_data->start_mode = XGBE_MODE_SFI;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+
+ xgbe_phy_sfp_setup(pdata);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (netif_msg_probe(pdata))
+ dev_dbg(pdata->dev, "phy supported=0x%*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ lks->link_modes.supported);
+
+ if ((phy_data->conn_type & XGBE_CONN_TYPE_MDIO) &&
+ (phy_data->phydev_mode != XGBE_MDIO_MODE_NONE)) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
+ phy_data->phydev_mode);
+ if (ret) {
+ dev_err(pdata->dev,
+ "mdio port/clause not compatible (%d/%u)\n",
+ phy_data->mdio_addr, phy_data->phydev_mode);
+ return -EINVAL;
+ }
+ }
+
+ if (phy_data->redrv && !phy_data->redrv_if) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
+ XGBE_MDIO_MODE_CL22);
+ if (ret) {
+ dev_err(pdata->dev,
+ "redriver mdio port not compatible (%u)\n",
+ phy_data->redrv_addr);
+ return -EINVAL;
+ }
+ }
+
+ phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
+
+ /* Register for driving external PHYs */
+ mii = devm_mdiobus_alloc(pdata->dev);
+ if (!mii) {
+ dev_err(pdata->dev, "mdiobus_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mii->priv = pdata;
+ mii->name = "amd-xgbe-mii";
+ mii->read = xgbe_phy_mii_read;
+ mii->write = xgbe_phy_mii_write;
+ mii->parent = pdata->dev;
+ mii->phy_mask = ~0;
+ snprintf(mii->id, sizeof(mii->id), "%s", dev_name(pdata->dev));
+ ret = mdiobus_register(mii);
+ if (ret) {
+ dev_err(pdata->dev, "mdiobus_register failed\n");
+ return ret;
+ }
+ phy_data->mii = mii;
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
+{
+ struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = xgbe_phy_init;
+ phy_impl->exit = xgbe_phy_exit;
+
+ phy_impl->reset = xgbe_phy_reset;
+ phy_impl->start = xgbe_phy_start;
+ phy_impl->stop = xgbe_phy_stop;
+
+ phy_impl->link_status = xgbe_phy_link_status;
+
+ phy_impl->valid_speed = xgbe_phy_valid_speed;
+
+ phy_impl->use_mode = xgbe_phy_use_mode;
+ phy_impl->set_mode = xgbe_phy_set_mode;
+ phy_impl->get_mode = xgbe_phy_get_mode;
+ phy_impl->switch_mode = xgbe_phy_switch_mode;
+ phy_impl->cur_mode = xgbe_phy_cur_mode;
+
+ phy_impl->an_mode = xgbe_phy_an_mode;
+
+ phy_impl->an_config = xgbe_phy_an_config;
+
+ phy_impl->an_advertising = xgbe_phy_an_advertising;
+
+ phy_impl->an_outcome = xgbe_phy_an_outcome;
+
+ phy_impl->an_pre = xgbe_phy_an_pre;
+ phy_impl->an_post = xgbe_phy_an_post;
+
+ phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = xgbe_phy_kr_training_post;
+
+ phy_impl->module_info = xgbe_phy_module_info;
+ phy_impl->module_eeprom = xgbe_phy_module_eeprom;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
new file mode 100644
index 000000000..4ebd24101
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -0,0 +1,629 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/property.h>
+#include <linux/acpi.h>
+#include <linux/mdio.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgbe_acpi_match[];
+
+static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(xgbe_acpi_match, pdata->dev);
+
+ return id ? (struct xgbe_version_data *)id->driver_data : NULL;
+}
+
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ u32 property;
+ int ret;
+
+ /* Obtain the system clock setting */
+ ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
+ if (ret) {
+ dev_err(dev, "unable to obtain %s property\n",
+ XGBE_ACPI_DMA_FREQ);
+ return ret;
+ }
+ pdata->sysclk_rate = property;
+
+ /* Obtain the PTP clock setting */
+ ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
+ if (ret) {
+ dev_err(dev, "unable to obtain %s property\n",
+ XGBE_ACPI_PTP_FREQ);
+ return ret;
+ }
+ pdata->ptpclk_rate = property;
+
+ return 0;
+}
+#else /* CONFIG_ACPI */
+static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
+{
+ return NULL;
+}
+
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_OF
+static const struct of_device_id xgbe_of_match[];
+
+static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
+{
+ const struct of_device_id *id;
+
+ id = of_match_device(xgbe_of_match, pdata->dev);
+
+ return id ? (struct xgbe_version_data *)id->data : NULL;
+}
+
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+
+ /* Obtain the system clock setting */
+ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+ if (IS_ERR(pdata->sysclk)) {
+ dev_err(dev, "dma devm_clk_get failed\n");
+ return PTR_ERR(pdata->sysclk);
+ }
+ pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
+
+ /* Obtain the PTP clock setting */
+ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+ if (IS_ERR(pdata->ptpclk)) {
+ dev_err(dev, "ptp devm_clk_get failed\n");
+ return PTR_ERR(pdata->ptpclk);
+ }
+ pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
+
+ return 0;
+}
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ struct device_node *phy_node;
+ struct platform_device *phy_pdev;
+
+ phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (phy_node) {
+ /* Old style device tree:
+ * The XGBE and PHY resources are separate
+ */
+ phy_pdev = of_find_device_by_node(phy_node);
+ of_node_put(phy_node);
+ } else {
+ /* New style device tree:
+ * The XGBE and PHY resources are grouped together with
+ * the PHY resources listed last
+ */
+ get_device(dev);
+ phy_pdev = pdata->platdev;
+ }
+
+ return phy_pdev;
+}
+#else /* CONFIG_OF */
+static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
+{
+ return NULL;
+}
+
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+ return -EINVAL;
+}
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static unsigned int xgbe_resource_count(struct platform_device *pdev,
+ unsigned int type)
+{
+ unsigned int count;
+ int i;
+
+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
+ struct resource *res = &pdev->resource[i];
+
+ if (type == resource_type(res))
+ count++;
+ }
+
+ return count;
+}
+
+static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ struct platform_device *phy_pdev;
+
+ if (pdata->use_acpi) {
+ get_device(pdata->dev);
+ phy_pdev = pdata->platdev;
+ } else {
+ phy_pdev = xgbe_of_get_phy_pdev(pdata);
+ }
+
+ return phy_pdev;
+}
+
+static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata)
+{
+ return pdata->use_acpi ? xgbe_acpi_vdata(pdata)
+ : xgbe_of_vdata(pdata);
+}
+
+static int xgbe_platform_probe(struct platform_device *pdev)
+{
+ struct xgbe_prv_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct platform_device *phy_pdev;
+ const char *phy_mode;
+ unsigned int phy_memnum, phy_irqnum;
+ unsigned int dma_irqnum, dma_irqend;
+ enum dev_dma_attr attr;
+ int ret;
+
+ pdata = xgbe_alloc_pdata(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto err_alloc;
+ }
+
+ pdata->platdev = pdev;
+ pdata->adev = ACPI_COMPANION(dev);
+ platform_set_drvdata(pdev, pdata);
+
+ /* Check if we should use ACPI or DT */
+ pdata->use_acpi = dev->of_node ? 0 : 1;
+
+ /* Get the version data */
+ pdata->vdata = xgbe_get_vdata(pdata);
+
+ phy_pdev = xgbe_get_phy_pdev(pdata);
+ if (!phy_pdev) {
+ dev_err(dev, "unable to obtain phy device\n");
+ ret = -EINVAL;
+ goto err_phydev;
+ }
+ pdata->phy_platdev = phy_pdev;
+ pdata->phy_dev = &phy_pdev->dev;
+
+ if (pdev == phy_pdev) {
+ /* New style device tree or ACPI:
+ * The XGBE and PHY resources are grouped together with
+ * the PHY resources listed last
+ */
+ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
+ phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+ dma_irqnum = 1;
+ dma_irqend = phy_irqnum;
+ } else {
+ /* Old style device tree:
+ * The XGBE and PHY resources are separate
+ */
+ phy_memnum = 0;
+ phy_irqnum = 0;
+ dma_irqnum = 1;
+ dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
+ }
+
+ /* Obtain the mmio areas for the device */
+ pdata->xgmac_regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pdata->xgmac_regs)) {
+ dev_err(dev, "xgmac ioremap failed\n");
+ ret = PTR_ERR(pdata->xgmac_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
+
+ pdata->xpcs_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pdata->xpcs_regs)) {
+ dev_err(dev, "xpcs ioremap failed\n");
+ ret = PTR_ERR(pdata->xpcs_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ pdata->rxtx_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
+ if (IS_ERR(pdata->rxtx_regs)) {
+ dev_err(dev, "rxtx ioremap failed\n");
+ ret = PTR_ERR(pdata->rxtx_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
+
+ pdata->sir0_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
+ if (IS_ERR(pdata->sir0_regs)) {
+ dev_err(dev, "sir0 ioremap failed\n");
+ ret = PTR_ERR(pdata->sir0_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
+
+ pdata->sir1_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
+ if (IS_ERR(pdata->sir1_regs)) {
+ dev_err(dev, "sir1 ioremap failed\n");
+ ret = PTR_ERR(pdata->sir1_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
+
+ /* Retrieve the MAC address */
+ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
+ pdata->mac_addr,
+ sizeof(pdata->mac_addr));
+ if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
+ dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Retrieve the PHY mode - it must be "xgmii" */
+ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
+ &phy_mode);
+ if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
+ dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_io;
+ }
+ pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
+
+ /* Check for per channel interrupt support */
+ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) {
+ pdata->per_channel_irq = 1;
+ pdata->channel_irq_mode = XGBE_IRQ_MODE_EDGE;
+ }
+
+ /* Obtain device settings unique to ACPI/OF */
+ if (pdata->use_acpi)
+ ret = xgbe_acpi_support(pdata);
+ else
+ ret = xgbe_of_support(pdata);
+ if (ret)
+ goto err_io;
+
+ /* Set the DMA coherency values */
+ attr = device_get_dma_attr(dev);
+ if (attr == DEV_DMA_NOT_SUPPORTED) {
+ dev_err(dev, "DMA is not supported");
+ ret = -ENODEV;
+ goto err_io;
+ }
+ pdata->coherent = (attr == DEV_DMA_COHERENT);
+ if (pdata->coherent) {
+ pdata->arcr = XGBE_DMA_OS_ARCR;
+ pdata->awcr = XGBE_DMA_OS_AWCR;
+ } else {
+ pdata->arcr = XGBE_DMA_SYS_ARCR;
+ pdata->awcr = XGBE_DMA_SYS_AWCR;
+ }
+
+ /* Set the maximum fifo amounts */
+ pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size;
+ pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size;
+
+ /* Set the hardware channel and queue counts */
+ xgbe_set_counts(pdata);
+
+ /* Always have XGMAC and XPCS (auto-negotiation) interrupts */
+ pdata->irq_count = 2;
+
+ /* Get the device interrupt */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_io;
+ pdata->dev_irq = ret;
+
+ /* Get the per channel DMA interrupts */
+ if (pdata->per_channel_irq) {
+ unsigned int i, max = ARRAY_SIZE(pdata->channel_irq);
+
+ for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
+ ret = platform_get_irq(pdata->platdev, dma_irqnum++);
+ if (ret < 0)
+ goto err_io;
+
+ pdata->channel_irq[i] = ret;
+ }
+
+ pdata->channel_irq_count = max;
+
+ pdata->irq_count += max;
+ }
+
+ /* Get the auto-negotiation interrupt */
+ ret = platform_get_irq(phy_pdev, phy_irqnum++);
+ if (ret < 0)
+ goto err_io;
+ pdata->an_irq = ret;
+
+ /* Configure the netdev resource */
+ ret = xgbe_config_netdev(pdata);
+ if (ret)
+ goto err_io;
+
+ netdev_notice(pdata->netdev, "net device enabled\n");
+
+ return 0;
+
+err_io:
+ platform_device_put(phy_pdev);
+
+err_phydev:
+ xgbe_free_pdata(pdata);
+
+err_alloc:
+ dev_notice(dev, "net device not enabled\n");
+
+ return ret;
+}
+
+static int xgbe_platform_remove(struct platform_device *pdev)
+{
+ struct xgbe_prv_data *pdata = platform_get_drvdata(pdev);
+
+ xgbe_deconfig_netdev(pdata);
+
+ platform_device_put(pdata->phy_platdev);
+
+ xgbe_free_pdata(pdata);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int xgbe_platform_suspend(struct device *dev)
+{
+ struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
+ struct net_device *netdev = pdata->netdev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_suspend\n");
+
+ if (netif_running(netdev))
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+
+ pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+
+ DBGPR("<--xgbe_suspend\n");
+
+ return ret;
+}
+
+static int xgbe_platform_resume(struct device *dev)
+{
+ struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
+ struct net_device *netdev = pdata->netdev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_resume\n");
+
+ pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+
+ if (netif_running(netdev)) {
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+
+ /* Schedule a restart in case the link or phy state changed
+ * while we were powered down.
+ */
+ schedule_work(&pdata->restart_work);
+ }
+
+ DBGPR("<--xgbe_resume\n");
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct xgbe_version_data xgbe_v1 = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
+ .xpcs_access = XGBE_XPCS_ACCESS_V1,
+ .tx_max_fifo_size = 81920,
+ .rx_max_fifo_size = 81920,
+ .tx_tstamp_workaround = 1,
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgbe_acpi_match[] = {
+ { .id = "AMDI8001",
+ .driver_data = (kernel_ulong_t)&xgbe_v1 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id xgbe_of_match[] = {
+ { .compatible = "amd,xgbe-seattle-v1a",
+ .data = &xgbe_v1 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xgbe_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
+ xgbe_platform_suspend, xgbe_platform_resume);
+
+static struct platform_driver xgbe_driver = {
+ .driver = {
+ .name = XGBE_DRV_NAME,
+#ifdef CONFIG_ACPI
+ .acpi_match_table = xgbe_acpi_match,
+#endif
+#ifdef CONFIG_OF
+ .of_match_table = xgbe_of_match,
+#endif
+ .pm = &xgbe_platform_pm_ops,
+ },
+ .probe = xgbe_platform_probe,
+ .remove = xgbe_platform_remove,
+};
+
+int xgbe_platform_init(void)
+{
+ return platform_driver_register(&xgbe_driver);
+}
+
+void xgbe_platform_exit(void)
+{
+ platform_driver_unregister(&xgbe_driver);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
new file mode 100644
index 000000000..d06d260cf
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -0,0 +1,279 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static u64 xgbe_cc_read(const struct cyclecounter *cc)
+{
+ struct xgbe_prv_data *pdata = container_of(cc,
+ struct xgbe_prv_data,
+ tstamp_cc);
+ u64 nsec;
+
+ nsec = pdata->hw_if.get_tstamp_time(pdata);
+
+ return nsec;
+}
+
+static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 adjust;
+ u32 addend, diff;
+ unsigned int neg_adjust = 0;
+
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ adjust = pdata->tstamp_addend;
+ adjust *= delta;
+ diff = div_u64(adjust, 1000000000UL);
+
+ addend = (neg_adjust) ? pdata->tstamp_addend - diff :
+ pdata->tstamp_addend + diff;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ pdata->hw_if.update_tstamp_addend(pdata, addend);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ timecounter_adjtime(&pdata->tstamp_tc, delta);
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ nsec = timecounter_read(&pdata->tstamp_tc);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int xgbe_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+void xgbe_ptp_register(struct xgbe_prv_data *pdata)
+{
+ struct ptp_clock_info *info = &pdata->ptp_clock_info;
+ struct ptp_clock *clock;
+ struct cyclecounter *cc = &pdata->tstamp_cc;
+ u64 dividend;
+
+ snprintf(info->name, sizeof(info->name), "%s",
+ netdev_name(pdata->netdev));
+ info->owner = THIS_MODULE;
+ info->max_adj = pdata->ptpclk_rate;
+ info->adjfreq = xgbe_adjfreq;
+ info->adjtime = xgbe_adjtime;
+ info->gettime64 = xgbe_gettime;
+ info->settime64 = xgbe_settime;
+ info->enable = xgbe_enable;
+
+ clock = ptp_clock_register(info, pdata->dev);
+ if (IS_ERR(clock)) {
+ dev_err(pdata->dev, "ptp_clock_register failed\n");
+ return;
+ }
+
+ pdata->ptp_clock = clock;
+
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / 50Mhz)
+ * = (2^32 * 50Mhz) / PTP ref clock
+ */
+ dividend = 50000000;
+ dividend <<= 32;
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ /* Setup the timecounter */
+ cc->read = xgbe_cc_read;
+ cc->mask = CLOCKSOURCE_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ /* Disable all timestamping to start */
+ XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
+ pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
+void xgbe_ptp_unregister(struct xgbe_prv_data *pdata)
+{
+ if (pdata->ptp_clock)
+ ptp_clock_unregister(pdata->ptp_clock);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
new file mode 100644
index 000000000..e0b8f3c4c
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -0,0 +1,1364 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_H__
+#define __XGBE_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <linux/net_tstamp.h>
+#include <net/dcbnl.h>
+#include <linux/completion.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/dcache.h>
+#include <linux/ethtool.h>
+#include <linux/list.h>
+
+#define XGBE_DRV_NAME "amd-xgbe"
+#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
+
+/* Descriptor related defines */
+#define XGBE_TX_DESC_CNT 512
+#define XGBE_TX_DESC_MIN_FREE (XGBE_TX_DESC_CNT >> 3)
+#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
+#define XGBE_RX_DESC_CNT 512
+
+#define XGBE_TX_DESC_CNT_MIN 64
+#define XGBE_TX_DESC_CNT_MAX 4096
+#define XGBE_RX_DESC_CNT_MIN 64
+#define XGBE_RX_DESC_CNT_MAX 4096
+
+#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+
+/* Descriptors required for maximum contiguous TSO/GSO packet */
+#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
+
+/* Maximum possible descriptors needed for an SKB:
+ * - Maximum number of SKB frags
+ * - Maximum descriptors for contiguous TSO/GSO packet
+ * - Possible context descriptor
+ * - Possible TSO header descriptor
+ */
+#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2)
+
+#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XGBE_RX_BUF_ALIGN 64
+#define XGBE_SKB_ALLOC_SIZE 256
+#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
+
+#define XGBE_MAX_DMA_CHANNELS 16
+#define XGBE_MAX_QUEUES 16
+#define XGBE_PRIORITY_QUEUES 8
+#define XGBE_DMA_STOP_TIMEOUT 1
+
+/* DMA cache settings - Outer sharable, write-back, write-allocate */
+#define XGBE_DMA_OS_ARCR 0x002b2b2b
+#define XGBE_DMA_OS_AWCR 0x2f2f2f2f
+
+/* DMA cache settings - System, no caches used */
+#define XGBE_DMA_SYS_ARCR 0x00303030
+#define XGBE_DMA_SYS_AWCR 0x30303030
+
+/* DMA cache settings - PCI device */
+#define XGBE_DMA_PCI_ARCR 0x000f0f0f
+#define XGBE_DMA_PCI_AWCR 0x0f0f0f0f
+#define XGBE_DMA_PCI_AWARCR 0x00000f0f
+
+/* DMA channel interrupt modes */
+#define XGBE_IRQ_MODE_EDGE 0
+#define XGBE_IRQ_MODE_LEVEL 1
+
+#define XGMAC_MIN_PACKET 60
+#define XGMAC_STD_PACKET_MTU 1500
+#define XGMAC_MAX_STD_PACKET 1518
+#define XGMAC_JUMBO_PACKET_MTU 9000
+#define XGMAC_MAX_JUMBO_PACKET 9018
+#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
+
+#define XGMAC_PFC_DATA_LEN 46
+#define XGMAC_PFC_DELAYS 14000
+
+#define XGMAC_PRIO_QUEUES(_cnt) \
+ min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, (_cnt))
+
+/* Common property names */
+#define XGBE_MAC_ADDR_PROPERTY "mac-address"
+#define XGBE_PHY_MODE_PROPERTY "phy-mode"
+#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
+#define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
+
+/* Device-tree clock names */
+#define XGBE_DMA_CLOCK "dma_clk"
+#define XGBE_PTP_CLOCK "ptp_clk"
+
+/* ACPI property names */
+#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
+#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
+
+/* PCI BAR mapping */
+#define XGBE_XGMAC_BAR 0
+#define XGBE_XPCS_BAR 1
+#define XGBE_MAC_PROP_OFFSET 0x1d000
+#define XGBE_I2C_CTRL_OFFSET 0x1e000
+
+/* PCI MSI/MSIx support */
+#define XGBE_MSI_BASE_COUNT 4
+#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1)
+
+/* PCI clock frequencies */
+#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
+#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */
+
+/* Timestamp support - values based on 50MHz PTP clock
+ * 50MHz => 20 nsec
+ */
+#define XGBE_TSTAMP_SSINC 20
+#define XGBE_TSTAMP_SNSINC 0
+
+/* Driver PMT macros */
+#define XGMAC_DRIVER_CONTEXT 1
+#define XGMAC_IOCTL_CONTEXT 2
+
+#define XGMAC_FIFO_MIN_ALLOC 2048
+#define XGMAC_FIFO_UNIT 256
+#define XGMAC_FIFO_ALIGN(_x) \
+ (((_x) + XGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
+#define XGMAC_FIFO_FC_OFF 2048
+#define XGMAC_FIFO_FC_MIN 4096
+
+#define XGBE_TC_MIN_QUANTUM 10
+
+/* Helper macro for descriptor handling
+ * Always use XGBE_GET_DESC_DATA to access the descriptor data
+ * since the index is free-running and needs to be and-ed
+ * with the descriptor count value of the ring to index to
+ * the proper descriptor data.
+ */
+#define XGBE_GET_DESC_DATA(_ring, _idx) \
+ ((_ring)->rdata + \
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+/* Default coalescing parameters */
+#define XGMAC_INIT_DMA_TX_USECS 1000
+#define XGMAC_INIT_DMA_TX_FRAMES 25
+
+#define XGMAC_MAX_DMA_RIWT 0xff
+#define XGMAC_INIT_DMA_RX_USECS 30
+#define XGMAC_INIT_DMA_RX_FRAMES 25
+
+/* Flow control queue count */
+#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* Flow control threshold units */
+#define XGMAC_FLOW_CONTROL_UNIT 512
+#define XGMAC_FLOW_CONTROL_ALIGN(_x) \
+ (((_x) + XGMAC_FLOW_CONTROL_UNIT - 1) & ~(XGMAC_FLOW_CONTROL_UNIT - 1))
+#define XGMAC_FLOW_CONTROL_VALUE(_x) \
+ (((_x) < 1024) ? 0 : ((_x) / XGMAC_FLOW_CONTROL_UNIT) - 2)
+#define XGMAC_FLOW_CONTROL_MAX 33280
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define XGBE_MAC_HASH_TABLE_SIZE 8
+
+/* Receive Side Scaling */
+#define XGBE_RSS_HASH_KEY_SIZE 40
+#define XGBE_RSS_MAX_TABLE_SIZE 256
+#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
+#define XGBE_RSS_HASH_KEY_TYPE 1
+
+/* Auto-negotiation */
+#define XGBE_AN_MS_TIMEOUT 500
+#define XGBE_LINK_TIMEOUT 5
+#define XGBE_KR_TRAINING_WAIT_ITER 50
+
+#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
+#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
+#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+
+/* ECC correctable error notification window (seconds) */
+#define XGBE_ECC_LIMIT 60
+
+/* MDIO port types */
+#define XGMAC_MAX_C22_PORT 3
+
+/* Link mode bit operations */
+#define XGBE_ZERO_SUP(_ls) \
+ ethtool_link_ksettings_zero_link_mode((_ls), supported)
+
+#define XGBE_SET_SUP(_ls, _mode) \
+ ethtool_link_ksettings_add_link_mode((_ls), supported, _mode)
+
+#define XGBE_CLR_SUP(_ls, _mode) \
+ ethtool_link_ksettings_del_link_mode((_ls), supported, _mode)
+
+#define XGBE_IS_SUP(_ls, _mode) \
+ ethtool_link_ksettings_test_link_mode((_ls), supported, _mode)
+
+#define XGBE_ZERO_ADV(_ls) \
+ ethtool_link_ksettings_zero_link_mode((_ls), advertising)
+
+#define XGBE_SET_ADV(_ls, _mode) \
+ ethtool_link_ksettings_add_link_mode((_ls), advertising, _mode)
+
+#define XGBE_CLR_ADV(_ls, _mode) \
+ ethtool_link_ksettings_del_link_mode((_ls), advertising, _mode)
+
+#define XGBE_ADV(_ls, _mode) \
+ ethtool_link_ksettings_test_link_mode((_ls), advertising, _mode)
+
+#define XGBE_ZERO_LP_ADV(_ls) \
+ ethtool_link_ksettings_zero_link_mode((_ls), lp_advertising)
+
+#define XGBE_SET_LP_ADV(_ls, _mode) \
+ ethtool_link_ksettings_add_link_mode((_ls), lp_advertising, _mode)
+
+#define XGBE_CLR_LP_ADV(_ls, _mode) \
+ ethtool_link_ksettings_del_link_mode((_ls), lp_advertising, _mode)
+
+#define XGBE_LP_ADV(_ls, _mode) \
+ ethtool_link_ksettings_test_link_mode((_ls), lp_advertising, _mode)
+
+#define XGBE_LM_COPY(_dst, _dname, _src, _sname) \
+ bitmap_copy((_dst)->link_modes._dname, \
+ (_src)->link_modes._sname, \
+ __ETHTOOL_LINK_MODE_MASK_NBITS)
+
+struct xgbe_prv_data;
+
+struct xgbe_packet_data {
+ struct sk_buff *skb;
+
+ unsigned int attributes;
+
+ unsigned int errors;
+
+ unsigned int rdesc_count;
+ unsigned int length;
+
+ unsigned int header_len;
+ unsigned int tcp_header_len;
+ unsigned int tcp_payload_len;
+ unsigned short mss;
+
+ unsigned short vlan_ctag;
+
+ u64 rx_tstamp;
+
+ u32 rss_hash;
+ enum pkt_hash_types rss_hash_type;
+
+ unsigned int tx_packets;
+ unsigned int tx_bytes;
+};
+
+/* Common Rx and Tx descriptor mapping */
+struct xgbe_ring_desc {
+ __le32 desc0;
+ __le32 desc1;
+ __le32 desc2;
+ __le32 desc3;
+};
+
+/* Page allocation related values */
+struct xgbe_page_alloc {
+ struct page *pages;
+ unsigned int pages_len;
+ unsigned int pages_offset;
+
+ dma_addr_t pages_dma;
+};
+
+/* Ring entry buffer data */
+struct xgbe_buffer_data {
+ struct xgbe_page_alloc pa;
+ struct xgbe_page_alloc pa_unmap;
+
+ dma_addr_t dma_base;
+ unsigned long dma_off;
+ unsigned int dma_len;
+};
+
+/* Tx-related ring data */
+struct xgbe_tx_ring_data {
+ unsigned int packets; /* BQL packet count */
+ unsigned int bytes; /* BQL byte count */
+};
+
+/* Rx-related ring data */
+struct xgbe_rx_ring_data {
+ struct xgbe_buffer_data hdr; /* Header locations */
+ struct xgbe_buffer_data buf; /* Payload locations */
+
+ unsigned short hdr_len; /* Length of received header */
+ unsigned short len; /* Length of received packet */
+};
+
+/* Structure used to hold information related to the descriptor
+ * and the packet associated with the descriptor (always use
+ * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
+ */
+struct xgbe_ring_data {
+ struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
+ dma_addr_t rdesc_dma; /* DMA address of descriptor */
+
+ struct sk_buff *skb; /* Virtual address of SKB */
+ dma_addr_t skb_dma; /* DMA address of SKB data */
+ unsigned int skb_dma_len; /* Length of SKB DMA area */
+
+ struct xgbe_tx_ring_data tx; /* Tx-related data */
+ struct xgbe_rx_ring_data rx; /* Rx-related data */
+
+ unsigned int mapped_as_page;
+
+ /* Incomplete receive save location. If the budget is exhausted
+ * or the last descriptor (last normal descriptor or a following
+ * context descriptor) has not been DMA'd yet the current state
+ * of the receive processing needs to be saved.
+ */
+ unsigned int state_saved;
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
+};
+
+struct xgbe_ring {
+ /* Ring lock - used just for TX rings at the moment */
+ spinlock_t lock;
+
+ /* Per packet related information */
+ struct xgbe_packet_data packet_data;
+
+ /* Virtual/DMA addresses and count of allocated descriptor memory */
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int rdesc_count;
+
+ /* Array of descriptor data corresponding the descriptor memory
+ * (always use the XGBE_GET_DESC_DATA macro to access this data)
+ */
+ struct xgbe_ring_data *rdata;
+
+ /* Page allocation for RX buffers */
+ struct xgbe_page_alloc rx_hdr_pa;
+ struct xgbe_page_alloc rx_buf_pa;
+ int node;
+
+ /* Ring index values
+ * cur - Tx: index of descriptor to be used for current transfer
+ * Rx: index of descriptor to check for packet availability
+ * dirty - Tx: index of descriptor to check for transfer complete
+ * Rx: index of descriptor to check for buffer reallocation
+ */
+ unsigned int cur;
+ unsigned int dirty;
+
+ /* Coalesce frame count used for interrupt bit setting */
+ unsigned int coalesce_count;
+
+ union {
+ struct {
+ unsigned int queue_stopped;
+ unsigned int xmit_more;
+ unsigned short cur_mss;
+ unsigned short cur_vlan_ctag;
+ } tx;
+ };
+} ____cacheline_aligned;
+
+/* Structure used to describe the descriptor rings associated with
+ * a DMA channel.
+ */
+struct xgbe_channel {
+ char name[16];
+
+ /* Address of private data area for device */
+ struct xgbe_prv_data *pdata;
+
+ /* Queue index and base address of queue's DMA registers */
+ unsigned int queue_index;
+ void __iomem *dma_regs;
+
+ /* Per channel interrupt irq number */
+ int dma_irq;
+ char dma_irq_name[IFNAMSIZ + 32];
+
+ /* Netdev related settings */
+ struct napi_struct napi;
+
+ /* Per channel interrupt enablement tracker */
+ unsigned int curr_ier;
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+ struct timer_list tx_timer;
+
+ struct xgbe_ring *tx_ring;
+ struct xgbe_ring *rx_ring;
+
+ int node;
+ cpumask_t affinity_mask;
+} ____cacheline_aligned;
+
+enum xgbe_state {
+ XGBE_DOWN,
+ XGBE_LINK_INIT,
+ XGBE_LINK_ERR,
+ XGBE_STOPPED,
+};
+
+enum xgbe_int {
+ XGMAC_INT_DMA_CH_SR_TI,
+ XGMAC_INT_DMA_CH_SR_TPS,
+ XGMAC_INT_DMA_CH_SR_TBU,
+ XGMAC_INT_DMA_CH_SR_RI,
+ XGMAC_INT_DMA_CH_SR_RBU,
+ XGMAC_INT_DMA_CH_SR_RPS,
+ XGMAC_INT_DMA_CH_SR_TI_RI,
+ XGMAC_INT_DMA_CH_SR_FBE,
+ XGMAC_INT_DMA_ALL,
+};
+
+enum xgbe_int_state {
+ XGMAC_INT_STATE_SAVE,
+ XGMAC_INT_STATE_RESTORE,
+};
+
+enum xgbe_ecc_sec {
+ XGBE_ECC_SEC_TX,
+ XGBE_ECC_SEC_RX,
+ XGBE_ECC_SEC_DESC,
+};
+
+enum xgbe_speed {
+ XGBE_SPEED_1000 = 0,
+ XGBE_SPEED_2500,
+ XGBE_SPEED_10000,
+ XGBE_SPEEDS,
+};
+
+enum xgbe_xpcs_access {
+ XGBE_XPCS_ACCESS_V1 = 0,
+ XGBE_XPCS_ACCESS_V2,
+};
+
+enum xgbe_an_mode {
+ XGBE_AN_MODE_CL73 = 0,
+ XGBE_AN_MODE_CL73_REDRV,
+ XGBE_AN_MODE_CL37,
+ XGBE_AN_MODE_CL37_SGMII,
+ XGBE_AN_MODE_NONE,
+};
+
+enum xgbe_an {
+ XGBE_AN_READY = 0,
+ XGBE_AN_PAGE_RECEIVED,
+ XGBE_AN_INCOMPAT_LINK,
+ XGBE_AN_COMPLETE,
+ XGBE_AN_NO_LINK,
+ XGBE_AN_ERROR,
+};
+
+enum xgbe_rx {
+ XGBE_RX_BPA = 0,
+ XGBE_RX_XNP,
+ XGBE_RX_COMPLETE,
+ XGBE_RX_ERROR,
+};
+
+enum xgbe_mode {
+ XGBE_MODE_KX_1000 = 0,
+ XGBE_MODE_KX_2500,
+ XGBE_MODE_KR,
+ XGBE_MODE_X,
+ XGBE_MODE_SGMII_100,
+ XGBE_MODE_SGMII_1000,
+ XGBE_MODE_SFI,
+ XGBE_MODE_UNKNOWN,
+};
+
+enum xgbe_speedset {
+ XGBE_SPEEDSET_1000_10000 = 0,
+ XGBE_SPEEDSET_2500_10000,
+};
+
+enum xgbe_mdio_mode {
+ XGBE_MDIO_MODE_NONE = 0,
+ XGBE_MDIO_MODE_CL22,
+ XGBE_MDIO_MODE_CL45,
+};
+
+struct xgbe_phy {
+ struct ethtool_link_ksettings lks;
+
+ int address;
+
+ int autoneg;
+ int speed;
+ int duplex;
+
+ int link;
+
+ int pause_autoneg;
+ int tx_pause;
+ int rx_pause;
+};
+
+enum xgbe_i2c_cmd {
+ XGBE_I2C_CMD_READ = 0,
+ XGBE_I2C_CMD_WRITE,
+};
+
+struct xgbe_i2c_op {
+ enum xgbe_i2c_cmd cmd;
+
+ unsigned int target;
+
+ void *buf;
+ unsigned int len;
+};
+
+struct xgbe_i2c_op_state {
+ struct xgbe_i2c_op *op;
+
+ unsigned int tx_len;
+ unsigned char *tx_buf;
+
+ unsigned int rx_len;
+ unsigned char *rx_buf;
+
+ unsigned int tx_abort_source;
+
+ int ret;
+};
+
+struct xgbe_i2c {
+ unsigned int started;
+ unsigned int max_speed_mode;
+ unsigned int rx_fifo_size;
+ unsigned int tx_fifo_size;
+
+ struct xgbe_i2c_op_state op_state;
+};
+
+struct xgbe_mmc_stats {
+ /* Tx Stats */
+ u64 txoctetcount_gb;
+ u64 txframecount_gb;
+ u64 txbroadcastframes_g;
+ u64 txmulticastframes_g;
+ u64 tx64octets_gb;
+ u64 tx65to127octets_gb;
+ u64 tx128to255octets_gb;
+ u64 tx256to511octets_gb;
+ u64 tx512to1023octets_gb;
+ u64 tx1024tomaxoctets_gb;
+ u64 txunicastframes_gb;
+ u64 txmulticastframes_gb;
+ u64 txbroadcastframes_gb;
+ u64 txunderflowerror;
+ u64 txoctetcount_g;
+ u64 txframecount_g;
+ u64 txpauseframes;
+ u64 txvlanframes_g;
+
+ /* Rx Stats */
+ u64 rxframecount_gb;
+ u64 rxoctetcount_gb;
+ u64 rxoctetcount_g;
+ u64 rxbroadcastframes_g;
+ u64 rxmulticastframes_g;
+ u64 rxcrcerror;
+ u64 rxrunterror;
+ u64 rxjabbererror;
+ u64 rxundersize_g;
+ u64 rxoversize_g;
+ u64 rx64octets_gb;
+ u64 rx65to127octets_gb;
+ u64 rx128to255octets_gb;
+ u64 rx256to511octets_gb;
+ u64 rx512to1023octets_gb;
+ u64 rx1024tomaxoctets_gb;
+ u64 rxunicastframes_g;
+ u64 rxlengtherror;
+ u64 rxoutofrangetype;
+ u64 rxpauseframes;
+ u64 rxfifooverflow;
+ u64 rxvlanframes_gb;
+ u64 rxwatchdogerror;
+};
+
+struct xgbe_ext_stats {
+ u64 tx_tso_packets;
+ u64 rx_split_header_packets;
+ u64 rx_buffer_unavailable;
+
+ u64 txq_packets[XGBE_MAX_DMA_CHANNELS];
+ u64 txq_bytes[XGBE_MAX_DMA_CHANNELS];
+ u64 rxq_packets[XGBE_MAX_DMA_CHANNELS];
+ u64 rxq_bytes[XGBE_MAX_DMA_CHANNELS];
+
+ u64 tx_vxlan_packets;
+ u64 rx_vxlan_packets;
+ u64 rx_csum_errors;
+ u64 rx_vxlan_csum_errors;
+};
+
+struct xgbe_hw_if {
+ int (*tx_complete)(struct xgbe_ring_desc *);
+
+ int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+ int (*config_rx_mode)(struct xgbe_prv_data *);
+
+ int (*enable_rx_csum)(struct xgbe_prv_data *);
+ int (*disable_rx_csum)(struct xgbe_prv_data *);
+
+ int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
+ int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
+ int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *);
+ int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *);
+ int (*update_vlan_hash_table)(struct xgbe_prv_data *);
+
+ int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
+ void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
+ int (*set_speed)(struct xgbe_prv_data *, int);
+
+ int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int,
+ enum xgbe_mdio_mode);
+ int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int);
+ int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, u16);
+
+ int (*set_gpio)(struct xgbe_prv_data *, unsigned int);
+ int (*clr_gpio)(struct xgbe_prv_data *, unsigned int);
+
+ void (*enable_tx)(struct xgbe_prv_data *);
+ void (*disable_tx)(struct xgbe_prv_data *);
+ void (*enable_rx)(struct xgbe_prv_data *);
+ void (*disable_rx)(struct xgbe_prv_data *);
+
+ void (*powerup_tx)(struct xgbe_prv_data *);
+ void (*powerdown_tx)(struct xgbe_prv_data *);
+ void (*powerup_rx)(struct xgbe_prv_data *);
+ void (*powerdown_rx)(struct xgbe_prv_data *);
+
+ int (*init)(struct xgbe_prv_data *);
+ int (*exit)(struct xgbe_prv_data *);
+
+ int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
+ int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
+ void (*dev_xmit)(struct xgbe_channel *);
+ int (*dev_read)(struct xgbe_channel *);
+ void (*tx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_init)(struct xgbe_channel *);
+ void (*tx_desc_reset)(struct xgbe_ring_data *);
+ void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *,
+ unsigned int);
+ int (*is_last_desc)(struct xgbe_ring_desc *);
+ int (*is_context_desc)(struct xgbe_ring_desc *);
+ void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
+
+ /* For FLOW ctrl */
+ int (*config_tx_flow_control)(struct xgbe_prv_data *);
+ int (*config_rx_flow_control)(struct xgbe_prv_data *);
+
+ /* For RX coalescing */
+ int (*config_rx_coalesce)(struct xgbe_prv_data *);
+ int (*config_tx_coalesce)(struct xgbe_prv_data *);
+ unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int);
+ unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX threshold config */
+ int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX Store and Forward Mode config */
+ int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int);
+
+ /* For TX DMA Operate on Second Frame config */
+ int (*config_osp_mode)(struct xgbe_prv_data *);
+
+ /* For MMC statistics */
+ void (*rx_mmc_int)(struct xgbe_prv_data *);
+ void (*tx_mmc_int)(struct xgbe_prv_data *);
+ void (*read_mmc_stats)(struct xgbe_prv_data *);
+
+ /* For Timestamp config */
+ int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
+ void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
+ void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
+ unsigned int nsec);
+ u64 (*get_tstamp_time)(struct xgbe_prv_data *);
+ u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
+
+ /* For Data Center Bridging config */
+ void (*config_tc)(struct xgbe_prv_data *);
+ void (*config_dcb_tc)(struct xgbe_prv_data *);
+ void (*config_dcb_pfc)(struct xgbe_prv_data *);
+
+ /* For Receive Side Scaling */
+ int (*enable_rss)(struct xgbe_prv_data *);
+ int (*disable_rss)(struct xgbe_prv_data *);
+ int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
+ int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
+
+ /* For ECC */
+ void (*disable_ecc_ded)(struct xgbe_prv_data *);
+ void (*disable_ecc_sec)(struct xgbe_prv_data *, enum xgbe_ecc_sec);
+
+ /* For VXLAN */
+ void (*enable_vxlan)(struct xgbe_prv_data *);
+ void (*disable_vxlan)(struct xgbe_prv_data *);
+ void (*set_vxlan_id)(struct xgbe_prv_data *);
+};
+
+/* This structure represents implementation specific routines for an
+ * implementation of a PHY. All routines are required unless noted below.
+ * Optional routines:
+ * an_pre, an_post
+ * kr_training_pre, kr_training_post
+ * module_info, module_eeprom
+ */
+struct xgbe_phy_impl_if {
+ /* Perform Setup/teardown actions */
+ int (*init)(struct xgbe_prv_data *);
+ void (*exit)(struct xgbe_prv_data *);
+
+ /* Perform start/stop specific actions */
+ int (*reset)(struct xgbe_prv_data *);
+ int (*start)(struct xgbe_prv_data *);
+ void (*stop)(struct xgbe_prv_data *);
+
+ /* Return the link status */
+ int (*link_status)(struct xgbe_prv_data *, int *);
+
+ /* Indicate if a particular speed is valid */
+ bool (*valid_speed)(struct xgbe_prv_data *, int);
+
+ /* Check if the specified mode can/should be used */
+ bool (*use_mode)(struct xgbe_prv_data *, enum xgbe_mode);
+ /* Switch the PHY into various modes */
+ void (*set_mode)(struct xgbe_prv_data *, enum xgbe_mode);
+ /* Retrieve mode needed for a specific speed */
+ enum xgbe_mode (*get_mode)(struct xgbe_prv_data *, int);
+ /* Retrieve new/next mode when trying to auto-negotiate */
+ enum xgbe_mode (*switch_mode)(struct xgbe_prv_data *);
+ /* Retrieve current mode */
+ enum xgbe_mode (*cur_mode)(struct xgbe_prv_data *);
+
+ /* Retrieve current auto-negotiation mode */
+ enum xgbe_an_mode (*an_mode)(struct xgbe_prv_data *);
+
+ /* Configure auto-negotiation settings */
+ int (*an_config)(struct xgbe_prv_data *);
+
+ /* Set/override auto-negotiation advertisement settings */
+ void (*an_advertising)(struct xgbe_prv_data *,
+ struct ethtool_link_ksettings *);
+
+ /* Process results of auto-negotiation */
+ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
+
+ /* Pre/Post auto-negotiation support */
+ void (*an_pre)(struct xgbe_prv_data *);
+ void (*an_post)(struct xgbe_prv_data *);
+
+ /* Pre/Post KR training enablement support */
+ void (*kr_training_pre)(struct xgbe_prv_data *);
+ void (*kr_training_post)(struct xgbe_prv_data *);
+
+ /* SFP module related info */
+ int (*module_info)(struct xgbe_prv_data *pdata,
+ struct ethtool_modinfo *modinfo);
+ int (*module_eeprom)(struct xgbe_prv_data *pdata,
+ struct ethtool_eeprom *eeprom, u8 *data);
+};
+
+struct xgbe_phy_if {
+ /* For PHY setup/teardown */
+ int (*phy_init)(struct xgbe_prv_data *);
+ void (*phy_exit)(struct xgbe_prv_data *);
+
+ /* For PHY support when setting device up/down */
+ int (*phy_reset)(struct xgbe_prv_data *);
+ int (*phy_start)(struct xgbe_prv_data *);
+ void (*phy_stop)(struct xgbe_prv_data *);
+
+ /* For PHY support while device is up */
+ void (*phy_status)(struct xgbe_prv_data *);
+ int (*phy_config_aneg)(struct xgbe_prv_data *);
+
+ /* For PHY settings validation */
+ bool (*phy_valid_speed)(struct xgbe_prv_data *, int);
+
+ /* For single interrupt support */
+ irqreturn_t (*an_isr)(struct xgbe_prv_data *);
+
+ /* For ethtool PHY support */
+ int (*module_info)(struct xgbe_prv_data *pdata,
+ struct ethtool_modinfo *modinfo);
+ int (*module_eeprom)(struct xgbe_prv_data *pdata,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+ /* PHY implementation specific services */
+ struct xgbe_phy_impl_if phy_impl;
+};
+
+struct xgbe_i2c_if {
+ /* For initial I2C setup */
+ int (*i2c_init)(struct xgbe_prv_data *);
+
+ /* For I2C support when setting device up/down */
+ int (*i2c_start)(struct xgbe_prv_data *);
+ void (*i2c_stop)(struct xgbe_prv_data *);
+
+ /* For performing I2C operations */
+ int (*i2c_xfer)(struct xgbe_prv_data *, struct xgbe_i2c_op *);
+
+ /* For single interrupt support */
+ irqreturn_t (*i2c_isr)(struct xgbe_prv_data *);
+};
+
+struct xgbe_desc_if {
+ int (*alloc_ring_resources)(struct xgbe_prv_data *);
+ void (*free_ring_resources)(struct xgbe_prv_data *);
+ int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
+ int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
+ struct xgbe_ring_data *);
+ void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
+ void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
+ void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct xgbe_hw_features {
+ /* HW Version */
+ unsigned int version;
+
+ /* HW Feature Register0 */
+ unsigned int gmii; /* 1000 Mbps support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+ unsigned int vxn; /* VXLAN/NVGRE */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dma_width; /* DMA width */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct xgbe_version_data {
+ void (*init_function_ptrs_phy_impl)(struct xgbe_phy_if *);
+ enum xgbe_xpcs_access xpcs_access;
+ unsigned int mmc_64bit;
+ unsigned int tx_max_fifo_size;
+ unsigned int rx_max_fifo_size;
+ unsigned int tx_tstamp_workaround;
+ unsigned int ecc_support;
+ unsigned int i2c_support;
+ unsigned int irq_reissue_support;
+ unsigned int tx_desc_prefetch;
+ unsigned int rx_desc_prefetch;
+ unsigned int an_cdr_workaround;
+};
+
+struct xgbe_prv_data {
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ struct platform_device *platdev;
+ struct acpi_device *adev;
+ struct device *dev;
+ struct platform_device *phy_platdev;
+ struct device *phy_dev;
+
+ /* Version related data */
+ struct xgbe_version_data *vdata;
+
+ /* ACPI or DT flag */
+ unsigned int use_acpi;
+
+ /* XGMAC/XPCS related mmio registers */
+ void __iomem *xgmac_regs; /* XGMAC CSRs */
+ void __iomem *xpcs_regs; /* XPCS MMD registers */
+ void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
+ void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
+ void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
+ void __iomem *xprop_regs; /* XGBE property registers */
+ void __iomem *xi2c_regs; /* XGBE I2C CSRs */
+
+ /* Port property registers */
+ unsigned int pp0;
+ unsigned int pp1;
+ unsigned int pp2;
+ unsigned int pp3;
+ unsigned int pp4;
+
+ /* Overall device lock */
+ spinlock_t lock;
+
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+ unsigned int xpcs_window_sel_reg;
+ unsigned int xpcs_window;
+ unsigned int xpcs_window_size;
+ unsigned int xpcs_window_mask;
+
+ /* RSS addressing mutex */
+ struct mutex rss_mutex;
+
+ /* Flags representing xgbe_state */
+ unsigned long dev_state;
+
+ /* ECC support */
+ unsigned long tx_sec_period;
+ unsigned long tx_ded_period;
+ unsigned long rx_sec_period;
+ unsigned long rx_ded_period;
+ unsigned long desc_sec_period;
+ unsigned long desc_ded_period;
+
+ unsigned int tx_sec_count;
+ unsigned int tx_ded_count;
+ unsigned int rx_sec_count;
+ unsigned int rx_ded_count;
+ unsigned int desc_ded_count;
+ unsigned int desc_sec_count;
+
+ int dev_irq;
+ int ecc_irq;
+ int i2c_irq;
+ int channel_irq[XGBE_MAX_DMA_CHANNELS];
+
+ unsigned int per_channel_irq;
+ unsigned int irq_count;
+ unsigned int channel_irq_count;
+ unsigned int channel_irq_mode;
+
+ char ecc_name[IFNAMSIZ + 32];
+
+ struct xgbe_hw_if hw_if;
+ struct xgbe_phy_if phy_if;
+ struct xgbe_desc_if desc_if;
+ struct xgbe_i2c_if i2c_if;
+
+ /* AXI DMA settings */
+ unsigned int coherent;
+ unsigned int arcr;
+ unsigned int awcr;
+ unsigned int awarcr;
+
+ /* Service routine support */
+ struct workqueue_struct *dev_workqueue;
+ struct work_struct service_work;
+ struct timer_list service_timer;
+
+ /* Rings for Tx/Rx on a DMA channel */
+ struct xgbe_channel *channel[XGBE_MAX_DMA_CHANNELS];
+ unsigned int tx_max_channel_count;
+ unsigned int rx_max_channel_count;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
+ unsigned int new_tx_ring_count;
+ unsigned int new_rx_ring_count;
+
+ unsigned int tx_max_q_count;
+ unsigned int rx_max_q_count;
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+ unsigned int blen;
+ unsigned int pbl;
+ unsigned int aal;
+ unsigned int rd_osr_limit;
+ unsigned int wr_osr_limit;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_osp_mode;
+ unsigned int tx_max_fifo_size;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_max_fifo_size;
+
+ /* Tx coalescing settings */
+ unsigned int tx_usecs;
+ unsigned int tx_frames;
+
+ /* Rx coalescing settings */
+ unsigned int rx_riwt;
+ unsigned int rx_usecs;
+ unsigned int rx_frames;
+
+ /* Current Rx buffer size */
+ unsigned int rx_buf_size;
+
+ /* Flow control settings */
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+ unsigned int rx_rfa[XGBE_MAX_QUEUES];
+ unsigned int rx_rfd[XGBE_MAX_QUEUES];
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
+ u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
+ u32 rss_options;
+
+ /* VXLAN settings */
+ u16 vxlan_port;
+
+ /* Netdev related settings */
+ unsigned char mac_addr[ETH_ALEN];
+ netdev_features_t netdev_features;
+ struct napi_struct napi;
+ struct xgbe_mmc_stats mmc_stats;
+ struct xgbe_ext_stats ext_stats;
+
+ /* Filtering support */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ /* Device clocks */
+ struct clk *sysclk;
+ unsigned long sysclk_rate;
+ struct clk *ptpclk;
+ unsigned long ptpclk_rate;
+
+ /* Timestamp support */
+ spinlock_t tstamp_lock;
+ struct ptp_clock_info ptp_clock_info;
+ struct ptp_clock *ptp_clock;
+ struct hwtstamp_config tstamp_config;
+ struct cyclecounter tstamp_cc;
+ struct timecounter tstamp_tc;
+ unsigned int tstamp_addend;
+ struct work_struct tx_tstamp_work;
+ struct sk_buff *tx_tstamp_skb;
+ u64 tx_tstamp;
+
+ /* DCB support */
+ struct ieee_ets *ets;
+ struct ieee_pfc *pfc;
+ unsigned int q2tc_map[XGBE_MAX_QUEUES];
+ unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
+ unsigned int pfcq[XGBE_MAX_QUEUES];
+ unsigned int pfc_rfa;
+ u8 num_tcs;
+
+ /* Hardware features of the device */
+ struct xgbe_hw_features hw_feat;
+
+ /* Device work structures */
+ struct work_struct restart_work;
+ struct work_struct stopdev_work;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+
+ /* Network interface message level setting */
+ u32 msg_enable;
+
+ /* Current PHY settings */
+ phy_interface_t phy_mode;
+ int phy_link;
+ int phy_speed;
+
+ /* MDIO/PHY related settings */
+ unsigned int phy_started;
+ void *phy_data;
+ struct xgbe_phy phy;
+ int mdio_mmd;
+ unsigned long link_check;
+ struct completion mdio_complete;
+
+ unsigned int kr_redrv;
+
+ char an_name[IFNAMSIZ + 32];
+ struct workqueue_struct *an_workqueue;
+
+ int an_irq;
+ struct work_struct an_irq_work;
+
+ /* Auto-negotiation state machine support */
+ unsigned int an_int;
+ unsigned int an_status;
+ struct mutex an_mutex;
+ enum xgbe_an an_result;
+ enum xgbe_an an_state;
+ enum xgbe_rx kr_state;
+ enum xgbe_rx kx_state;
+ struct work_struct an_work;
+ unsigned int an_again;
+ unsigned int an_supported;
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
+ unsigned long kr_start_time;
+ enum xgbe_an_mode an_mode;
+
+ /* I2C support */
+ struct xgbe_i2c i2c;
+ struct mutex i2c_mutex;
+ struct completion i2c_complete;
+ char i2c_name[IFNAMSIZ + 32];
+
+ unsigned int lpm_ctrl; /* CTRL1 for resume */
+
+ unsigned int isr_as_tasklet;
+ struct tasklet_struct tasklet_dev;
+ struct tasklet_struct tasklet_ecc;
+ struct tasklet_struct tasklet_i2c;
+ struct tasklet_struct tasklet_an;
+
+ struct dentry *xgbe_debugfs;
+
+ unsigned int debugfs_xgmac_reg;
+
+ unsigned int debugfs_xpcs_mmd;
+ unsigned int debugfs_xpcs_reg;
+
+ unsigned int debugfs_xprop_reg;
+
+ unsigned int debugfs_xi2c_reg;
+
+ bool debugfs_an_cdr_workaround;
+ bool debugfs_an_cdr_track_early;
+};
+
+/* Function prototypes*/
+struct xgbe_prv_data *xgbe_alloc_pdata(struct device *);
+void xgbe_free_pdata(struct xgbe_prv_data *);
+void xgbe_set_counts(struct xgbe_prv_data *);
+int xgbe_config_netdev(struct xgbe_prv_data *);
+void xgbe_deconfig_netdev(struct xgbe_prv_data *);
+
+int xgbe_platform_init(void);
+void xgbe_platform_exit(void);
+#ifdef CONFIG_PCI
+int xgbe_pci_init(void);
+void xgbe_pci_exit(void);
+#else
+static inline int xgbe_pci_init(void) { return 0; }
+static inline void xgbe_pci_exit(void) { }
+#endif
+
+void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
+void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *);
+void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *);
+void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
+void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *);
+const struct net_device_ops *xgbe_get_netdev_ops(void);
+const struct ethtool_ops *xgbe_get_ethtool_ops(void);
+const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void);
+
+#ifdef CONFIG_AMD_XGBE_DCB
+const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
+#endif
+
+void xgbe_ptp_register(struct xgbe_prv_data *);
+void xgbe_ptp_unregister(struct xgbe_prv_data *);
+void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
+ unsigned int, unsigned int, unsigned int);
+void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
+ unsigned int);
+void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
+void xgbe_get_all_hw_features(struct xgbe_prv_data *);
+int xgbe_powerup(struct net_device *, unsigned int);
+int xgbe_powerdown(struct net_device *, unsigned int);
+void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
+void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
+void xgbe_restart_dev(struct xgbe_prv_data *pdata);
+void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
+
+#ifdef CONFIG_DEBUG_FS
+void xgbe_debugfs_init(struct xgbe_prv_data *);
+void xgbe_debugfs_exit(struct xgbe_prv_data *);
+void xgbe_debugfs_rename(struct xgbe_prv_data *pdata);
+#else
+static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
+static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
+static inline void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
+#if 0
+#define YDEBUG
+#define YDEBUG_MDIO
+#endif
+
+/* For debug prints */
+#ifdef YDEBUG
+#define DBGPR(x...) pr_alert(x)
+#else
+#define DBGPR(x...) do { } while (0)
+#endif
+
+#ifdef YDEBUG_MDIO
+#define DBGPR_MDIO(x...) pr_alert(x)
+#else
+#define DBGPR_MDIO(x...) do { } while (0)
+#endif
+
+#endif