diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/net/ethernet/sgi | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/sgi')
-rw-r--r-- | drivers/net/ethernet/sgi/Kconfig | 33 | ||||
-rw-r--r-- | drivers/net/ethernet/sgi/Makefile | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/sgi/ioc3-eth.c | 1288 | ||||
-rw-r--r-- | drivers/net/ethernet/sgi/meth.c | 880 | ||||
-rw-r--r-- | drivers/net/ethernet/sgi/meth.h | 227 |
5 files changed, 2435 insertions, 0 deletions
diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig new file mode 100644 index 000000000..af66bb0a2 --- /dev/null +++ b/drivers/net/ethernet/sgi/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# SGI device configuration +# + +config NET_VENDOR_SGI + bool "SGI devices" + default y + depends on (PCI && SGI_MFD_IOC3) || SGI_IP32 + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about SGI devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_SGI + +config SGI_IOC3_ETH + bool "SGI IOC3 Ethernet" + depends on PCI && SGI_MFD_IOC3 + select CRC16 + select CRC32 + select MII + help + If you have a network (Ethernet) card of this type, say Y here. + +config SGI_O2MACE_ETH + tristate "SGI O2 MACE Fast Ethernet support" + depends on SGI_IP32=y + +endif # NET_VENDOR_SGI diff --git a/drivers/net/ethernet/sgi/Makefile b/drivers/net/ethernet/sgi/Makefile new file mode 100644 index 000000000..68eefbcf5 --- /dev/null +++ b/drivers/net/ethernet/sgi/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the SGI device drivers. +# + +obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o +obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c new file mode 100644 index 000000000..2b29fd4cb --- /dev/null +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -0,0 +1,1288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. + * + * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle + * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. + * + * References: + * o IOC3 ASIC specification 4.51, 1996-04-18 + * o IEEE 802.3 specification, 2000 edition + * o DP38840A Specification, National Semiconductor, March 1997 + * + * To do: + * + * o Use prefetching for large packets. What is a good lower limit for + * prefetching? + * o Use hardware checksums. + * o Which PHYs might possibly be attached to the IOC3 in real live, + * which workarounds are required for them? Do we ever have Lucent's? + * o For the 2.5 branch kill the mii-tool ioctls. + */ + +#define IOC3_NAME "ioc3-eth" +#define IOC3_VERSION "2.6.3-4" + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/crc16.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/in.h> +#include <linux/io.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/gfp.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/nvmem-consumer.h> + +#include <net/ip.h> + +#include <asm/sn/ioc3.h> +#include <asm/pci/bridge.h> + +#define CRC16_INIT 0 +#define CRC16_VALID 0xb001 + +/* Number of RX buffers. This is tunable in the range of 16 <= x < 512. + * The value must be a power of two. + */ +#define RX_BUFFS 64 +#define RX_RING_ENTRIES 512 /* fixed in hardware */ +#define RX_RING_MASK (RX_RING_ENTRIES - 1) +#define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64)) + +/* 128 TX buffers (not tunable) */ +#define TX_RING_ENTRIES 128 +#define TX_RING_MASK (TX_RING_ENTRIES - 1) +#define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd)) + +/* IOC3 does dma transfers in 128 byte blocks */ +#define IOC3_DMA_XFER_LEN 128UL + +/* Every RX buffer starts with 8 byte descriptor data */ +#define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN) +#define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN) + +#define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21) +#define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21) + +/* Private per NIC data of the driver. */ +struct ioc3_private { + struct ioc3_ethregs *regs; + struct device *dma_dev; + u32 *ssram; + unsigned long *rxr; /* pointer to receiver ring */ + void *tx_ring; + struct ioc3_etxd *txr; + dma_addr_t rxr_dma; + dma_addr_t txr_dma; + struct sk_buff *rx_skbs[RX_RING_ENTRIES]; + struct sk_buff *tx_skbs[TX_RING_ENTRIES]; + int rx_ci; /* RX consumer index */ + int rx_pi; /* RX producer index */ + int tx_ci; /* TX consumer index */ + int tx_pi; /* TX producer index */ + int txqlen; + u32 emcr, ehar_h, ehar_l; + spinlock_t ioc3_lock; + struct mii_if_info mii; + + /* Members used by autonegotiation */ + struct timer_list ioc3_timer; +}; + +static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static void ioc3_set_multicast_list(struct net_device *dev); +static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); +static void ioc3_timeout(struct net_device *dev, unsigned int txqueue); +static inline unsigned int ioc3_hash(const unsigned char *addr); +static void ioc3_start(struct ioc3_private *ip); +static inline void ioc3_stop(struct ioc3_private *ip); +static void ioc3_init(struct net_device *dev); +static int ioc3_alloc_rx_bufs(struct net_device *dev); +static void ioc3_free_rx_bufs(struct ioc3_private *ip); +static inline void ioc3_clean_tx_ring(struct ioc3_private *ip); + +static const struct ethtool_ops ioc3_ethtool_ops; + +static inline unsigned long aligned_rx_skb_addr(unsigned long addr) +{ + return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL); +} + +static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb, + struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma) +{ + struct sk_buff *new_skb; + dma_addr_t d; + int offset; + + new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC); + if (!new_skb) + return -ENOMEM; + + /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */ + offset = aligned_rx_skb_addr((unsigned long)new_skb->data); + if (offset) + skb_reserve(new_skb, offset); + + d = dma_map_single(ip->dma_dev, new_skb->data, + RX_BUF_SIZE, DMA_FROM_DEVICE); + + if (dma_mapping_error(ip->dma_dev, d)) { + dev_kfree_skb_any(new_skb); + return -ENOMEM; + } + *rxb_dma = d; + *rxb = (struct ioc3_erxbuf *)new_skb->data; + skb_reserve(new_skb, RX_OFFSET); + *skb = new_skb; + + return 0; +} + +#ifdef CONFIG_PCI_XTALK_BRIDGE +static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) +{ + return (addr & ~PCI64_ATTR_BAR) | attr; +} + +#define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT) +#else +static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) +{ + return addr; +} + +#define ERBAR_VAL 0 +#endif + +static int ioc3eth_nvmem_match(struct device *dev, const void *data) +{ + const char *name = dev_name(dev); + const char *prefix = data; + int prefix_len; + + prefix_len = strlen(prefix); + if (strlen(name) < (prefix_len + 3)) + return 0; + + if (memcmp(prefix, name, prefix_len) != 0) + return 0; + + /* found nvmem device which is attached to our ioc3 + * now check for one wire family code 09, 89 and 91 + */ + if (memcmp(name + prefix_len, "09-", 3) == 0) + return 1; + if (memcmp(name + prefix_len, "89-", 3) == 0) + return 1; + if (memcmp(name + prefix_len, "91-", 3) == 0) + return 1; + + return 0; +} + +static int ioc3eth_get_mac_addr(struct resource *res, u8 mac_addr[6]) +{ + struct nvmem_device *nvmem; + char prefix[24]; + u8 prom[16]; + int ret; + int i; + + snprintf(prefix, sizeof(prefix), "ioc3-%012llx-", + res->start & ~0xffff); + + nvmem = nvmem_device_find(prefix, ioc3eth_nvmem_match); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + ret = nvmem_device_read(nvmem, 0, 16, prom); + nvmem_device_put(nvmem); + if (ret < 0) + return ret; + + /* check, if content is valid */ + if (prom[0] != 0x0a || + crc16(CRC16_INIT, prom, 13) != CRC16_VALID) + return -EINVAL; + + for (i = 0; i < 6; i++) + mac_addr[i] = prom[10 - i]; + + return 0; +} + +static void __ioc3_set_mac_address(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + + writel((dev->dev_addr[5] << 8) | + dev->dev_addr[4], + &ip->regs->emar_h); + writel((dev->dev_addr[3] << 24) | + (dev->dev_addr[2] << 16) | + (dev->dev_addr[1] << 8) | + dev->dev_addr[0], + &ip->regs->emar_l); +} + +static int ioc3_set_mac_address(struct net_device *dev, void *addr) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct sockaddr *sa = addr; + + memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + + spin_lock_irq(&ip->ioc3_lock); + __ioc3_set_mac_address(dev); + spin_unlock_irq(&ip->ioc3_lock); + + return 0; +} + +/* Caller must hold the ioc3_lock ever for MII readers. This is also + * used to protect the transmitter side but it's low contention. + */ +static int ioc3_mdio_read(struct net_device *dev, int phy, int reg) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3_ethregs *regs = ip->regs; + + while (readl(®s->micr) & MICR_BUSY) + ; + writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG, + ®s->micr); + while (readl(®s->micr) & MICR_BUSY) + ; + + return readl(®s->midr_r) & MIDR_DATA_MASK; +} + +static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3_ethregs *regs = ip->regs; + + while (readl(®s->micr) & MICR_BUSY) + ; + writel(data, ®s->midr_w); + writel((phy << MICR_PHYADDR_SHIFT) | reg, ®s->micr); + while (readl(®s->micr) & MICR_BUSY) + ; +} + +static int ioc3_mii_init(struct ioc3_private *ip); + +static struct net_device_stats *ioc3_get_stats(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3_ethregs *regs = ip->regs; + + dev->stats.collisions += readl(®s->etcdc) & ETCDC_COLLCNT_MASK; + return &dev->stats; +} + +static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len) +{ + struct ethhdr *eh = eth_hdr(skb); + unsigned int proto; + unsigned char *cp; + struct iphdr *ih; + u32 csum, ehsum; + u16 *ew; + + /* Did hardware handle the checksum at all? The cases we can handle + * are: + * + * - TCP and UDP checksums of IPv4 only. + * - IPv6 would be doable but we keep that for later ... + * - Only unfragmented packets. Did somebody already tell you + * fragmentation is evil? + * - don't care about packet size. Worst case when processing a + * malformed packet we'll try to access the packet at ip header + + * 64 bytes which is still inside the skb. Even in the unlikely + * case where the checksum is right the higher layers will still + * drop the packet as appropriate. + */ + if (eh->h_proto != htons(ETH_P_IP)) + return; + + ih = (struct iphdr *)((char *)eh + ETH_HLEN); + if (ip_is_fragment(ih)) + return; + + proto = ih->protocol; + if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) + return; + + /* Same as tx - compute csum of pseudo header */ + csum = hwsum + + (ih->tot_len - (ih->ihl << 2)) + + htons((u16)ih->protocol) + + (ih->saddr >> 16) + (ih->saddr & 0xffff) + + (ih->daddr >> 16) + (ih->daddr & 0xffff); + + /* Sum up ethernet dest addr, src addr and protocol */ + ew = (u16 *)eh; + ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6]; + + ehsum = (ehsum & 0xffff) + (ehsum >> 16); + ehsum = (ehsum & 0xffff) + (ehsum >> 16); + + csum += 0xffff ^ ehsum; + + /* In the next step we also subtract the 1's complement + * checksum of the trailing ethernet CRC. + */ + cp = (char *)eh + len; /* points at trailing CRC */ + if (len & 1) { + csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]); + csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]); + } else { + csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]); + csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]); + } + + csum = (csum & 0xffff) + (csum >> 16); + csum = (csum & 0xffff) + (csum >> 16); + + if (csum == 0xffff) + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +static inline void ioc3_rx(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct sk_buff *skb, *new_skb; + int rx_entry, n_entry, len; + struct ioc3_erxbuf *rxb; + unsigned long *rxr; + dma_addr_t d; + u32 w0, err; + + rxr = ip->rxr; /* Ring base */ + rx_entry = ip->rx_ci; /* RX consume index */ + n_entry = ip->rx_pi; + + skb = ip->rx_skbs[rx_entry]; + rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); + w0 = be32_to_cpu(rxb->w0); + + while (w0 & ERXBUF_V) { + err = be32_to_cpu(rxb->err); /* It's valid ... */ + if (err & ERXBUF_GOODPKT) { + len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, dev); + + if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) { + /* Ouch, drop packet and just recycle packet + * to keep the ring filled. + */ + dev->stats.rx_dropped++; + new_skb = skb; + d = rxr[rx_entry]; + goto next; + } + + if (likely(dev->features & NETIF_F_RXCSUM)) + ioc3_tcpudp_checksum(skb, + w0 & ERXBUF_IPCKSUM_MASK, + len); + + dma_unmap_single(ip->dma_dev, rxr[rx_entry], + RX_BUF_SIZE, DMA_FROM_DEVICE); + + netif_rx(skb); + + ip->rx_skbs[rx_entry] = NULL; /* Poison */ + + dev->stats.rx_packets++; /* Statistics */ + dev->stats.rx_bytes += len; + } else { + /* The frame is invalid and the skb never + * reached the network layer so we can just + * recycle it. + */ + new_skb = skb; + d = rxr[rx_entry]; + dev->stats.rx_errors++; + } + if (err & ERXBUF_CRCERR) /* Statistics */ + dev->stats.rx_crc_errors++; + if (err & ERXBUF_FRAMERR) + dev->stats.rx_frame_errors++; + +next: + ip->rx_skbs[n_entry] = new_skb; + rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); + rxb->w0 = 0; /* Clear valid flag */ + n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */ + + /* Now go on to the next ring entry. */ + rx_entry = (rx_entry + 1) & RX_RING_MASK; + skb = ip->rx_skbs[rx_entry]; + rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); + w0 = be32_to_cpu(rxb->w0); + } + writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir); + ip->rx_pi = n_entry; + ip->rx_ci = rx_entry; +} + +static inline void ioc3_tx(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3_ethregs *regs = ip->regs; + unsigned long packets, bytes; + int tx_entry, o_entry; + struct sk_buff *skb; + u32 etcir; + + spin_lock(&ip->ioc3_lock); + etcir = readl(®s->etcir); + + tx_entry = (etcir >> 7) & TX_RING_MASK; + o_entry = ip->tx_ci; + packets = 0; + bytes = 0; + + while (o_entry != tx_entry) { + packets++; + skb = ip->tx_skbs[o_entry]; + bytes += skb->len; + dev_consume_skb_irq(skb); + ip->tx_skbs[o_entry] = NULL; + + o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */ + + etcir = readl(®s->etcir); /* More pkts sent? */ + tx_entry = (etcir >> 7) & TX_RING_MASK; + } + + dev->stats.tx_packets += packets; + dev->stats.tx_bytes += bytes; + ip->txqlen -= packets; + + if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES) + netif_wake_queue(dev); + + ip->tx_ci = o_entry; + spin_unlock(&ip->ioc3_lock); +} + +/* Deal with fatal IOC3 errors. This condition might be caused by a hard or + * software problems, so we should try to recover + * more gracefully if this ever happens. In theory we might be flooded + * with such error interrupts if something really goes wrong, so we might + * also consider to take the interface down. + */ +static void ioc3_error(struct net_device *dev, u32 eisr) +{ + struct ioc3_private *ip = netdev_priv(dev); + + spin_lock(&ip->ioc3_lock); + + if (eisr & EISR_RXOFLO) + net_err_ratelimited("%s: RX overflow.\n", dev->name); + if (eisr & EISR_RXBUFOFLO) + net_err_ratelimited("%s: RX buffer overflow.\n", dev->name); + if (eisr & EISR_RXMEMERR) + net_err_ratelimited("%s: RX PCI error.\n", dev->name); + if (eisr & EISR_RXPARERR) + net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name); + if (eisr & EISR_TXBUFUFLO) + net_err_ratelimited("%s: TX buffer underflow.\n", dev->name); + if (eisr & EISR_TXMEMERR) + net_err_ratelimited("%s: TX PCI error.\n", dev->name); + + ioc3_stop(ip); + ioc3_free_rx_bufs(ip); + ioc3_clean_tx_ring(ip); + + ioc3_init(dev); + if (ioc3_alloc_rx_bufs(dev)) { + netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); + spin_unlock(&ip->ioc3_lock); + return; + } + ioc3_start(ip); + ioc3_mii_init(ip); + + netif_wake_queue(dev); + + spin_unlock(&ip->ioc3_lock); +} + +/* The interrupt handler does all of the Rx thread work and cleans up + * after the Tx thread. + */ +static irqreturn_t ioc3_interrupt(int irq, void *dev_id) +{ + struct ioc3_private *ip = netdev_priv(dev_id); + struct ioc3_ethregs *regs = ip->regs; + u32 eisr; + + eisr = readl(®s->eisr); + writel(eisr, ®s->eisr); + readl(®s->eisr); /* Flush */ + + if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | + EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) + ioc3_error(dev_id, eisr); + if (eisr & EISR_RXTIMERINT) + ioc3_rx(dev_id); + if (eisr & EISR_TXEXPLICIT) + ioc3_tx(dev_id); + + return IRQ_HANDLED; +} + +static inline void ioc3_setup_duplex(struct ioc3_private *ip) +{ + struct ioc3_ethregs *regs = ip->regs; + + spin_lock_irq(&ip->ioc3_lock); + + if (ip->mii.full_duplex) { + writel(ETCSR_FD, ®s->etcsr); + ip->emcr |= EMCR_DUPLEX; + } else { + writel(ETCSR_HD, ®s->etcsr); + ip->emcr &= ~EMCR_DUPLEX; + } + writel(ip->emcr, ®s->emcr); + + spin_unlock_irq(&ip->ioc3_lock); +} + +static void ioc3_timer(struct timer_list *t) +{ + struct ioc3_private *ip = from_timer(ip, t, ioc3_timer); + + /* Print the link status if it has changed */ + mii_check_media(&ip->mii, 1, 0); + ioc3_setup_duplex(ip); + + ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */ + add_timer(&ip->ioc3_timer); +} + +/* Try to find a PHY. There is no apparent relation between the MII addresses + * in the SGI documentation and what we find in reality, so we simply probe + * for the PHY. + */ +static int ioc3_mii_init(struct ioc3_private *ip) +{ + u16 word; + int i; + + for (i = 0; i < 32; i++) { + word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1); + + if (word != 0xffff && word != 0x0000) { + ip->mii.phy_id = i; + return 0; + } + } + ip->mii.phy_id = -1; + return -ENODEV; +} + +static void ioc3_mii_start(struct ioc3_private *ip) +{ + ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */ + add_timer(&ip->ioc3_timer); +} + +static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry) +{ + struct ioc3_etxd *desc; + u32 cmd, bufcnt, len; + + desc = &ip->txr[entry]; + cmd = be32_to_cpu(desc->cmd); + bufcnt = be32_to_cpu(desc->bufcnt); + if (cmd & ETXD_B1V) { + len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT; + dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1), + len, DMA_TO_DEVICE); + } + if (cmd & ETXD_B2V) { + len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT; + dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2), + len, DMA_TO_DEVICE); + } +} + +static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) +{ + struct sk_buff *skb; + int i; + + for (i = 0; i < TX_RING_ENTRIES; i++) { + skb = ip->tx_skbs[i]; + if (skb) { + ioc3_tx_unmap(ip, i); + ip->tx_skbs[i] = NULL; + dev_kfree_skb_any(skb); + } + ip->txr[i].cmd = 0; + } + ip->tx_pi = 0; + ip->tx_ci = 0; +} + +static void ioc3_free_rx_bufs(struct ioc3_private *ip) +{ + int rx_entry, n_entry; + struct sk_buff *skb; + + n_entry = ip->rx_ci; + rx_entry = ip->rx_pi; + + while (n_entry != rx_entry) { + skb = ip->rx_skbs[n_entry]; + if (skb) { + dma_unmap_single(ip->dma_dev, + be64_to_cpu(ip->rxr[n_entry]), + RX_BUF_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } + n_entry = (n_entry + 1) & RX_RING_MASK; + } +} + +static int ioc3_alloc_rx_bufs(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3_erxbuf *rxb; + dma_addr_t d; + int i; + + /* Now the rx buffers. The RX ring may be larger but + * we only allocate 16 buffers for now. Need to tune + * this for performance and memory later. + */ + for (i = 0; i < RX_BUFFS; i++) { + if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d)) + return -ENOMEM; + + rxb->w0 = 0; /* Clear valid flag */ + ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); + } + ip->rx_ci = 0; + ip->rx_pi = RX_BUFFS; + + return 0; +} + +static inline void ioc3_ssram_disc(struct ioc3_private *ip) +{ + struct ioc3_ethregs *regs = ip->regs; + u32 *ssram0 = &ip->ssram[0x0000]; + u32 *ssram1 = &ip->ssram[0x4000]; + u32 pattern = 0x5555; + + /* Assume the larger size SSRAM and enable parity checking */ + writel(readl(®s->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), ®s->emcr); + readl(®s->emcr); /* Flush */ + + writel(pattern, ssram0); + writel(~pattern & IOC3_SSRAM_DM, ssram1); + + if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern || + (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) { + /* set ssram size to 64 KB */ + ip->emcr |= EMCR_RAMPAR; + writel(readl(®s->emcr) & ~EMCR_BUFSIZ, ®s->emcr); + } else { + ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR; + } +} + +static void ioc3_init(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3_ethregs *regs = ip->regs; + + del_timer_sync(&ip->ioc3_timer); /* Kill if running */ + + writel(EMCR_RST, ®s->emcr); /* Reset */ + readl(®s->emcr); /* Flush WB */ + udelay(4); /* Give it time ... */ + writel(0, ®s->emcr); + readl(®s->emcr); + + /* Misc registers */ + writel(ERBAR_VAL, ®s->erbar); + readl(®s->etcdc); /* Clear on read */ + writel(15, ®s->ercsr); /* RX low watermark */ + writel(0, ®s->ertr); /* Interrupt immediately */ + __ioc3_set_mac_address(dev); + writel(ip->ehar_h, ®s->ehar_h); + writel(ip->ehar_l, ®s->ehar_l); + writel(42, ®s->ersr); /* XXX should be random */ +} + +static void ioc3_start(struct ioc3_private *ip) +{ + struct ioc3_ethregs *regs = ip->regs; + unsigned long ring; + + /* Now the rx ring base, consume & produce registers. */ + ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC); + writel(ring >> 32, ®s->erbr_h); + writel(ring & 0xffffffff, ®s->erbr_l); + writel(ip->rx_ci << 3, ®s->ercir); + writel((ip->rx_pi << 3) | ERPIR_ARM, ®s->erpir); + + ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC); + + ip->txqlen = 0; /* nothing queued */ + + /* Now the tx ring base, consume & produce registers. */ + writel(ring >> 32, ®s->etbr_h); + writel(ring & 0xffffffff, ®s->etbr_l); + writel(ip->tx_pi << 7, ®s->etpir); + writel(ip->tx_ci << 7, ®s->etcir); + readl(®s->etcir); /* Flush */ + + ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN | + EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN; + writel(ip->emcr, ®s->emcr); + writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | + EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | + EISR_TXEXPLICIT | EISR_TXMEMERR, ®s->eier); + readl(®s->eier); +} + +static inline void ioc3_stop(struct ioc3_private *ip) +{ + struct ioc3_ethregs *regs = ip->regs; + + writel(0, ®s->emcr); /* Shutup */ + writel(0, ®s->eier); /* Disable interrupts */ + readl(®s->eier); /* Flush */ +} + +static int ioc3_open(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + + ip->ehar_h = 0; + ip->ehar_l = 0; + + ioc3_init(dev); + if (ioc3_alloc_rx_bufs(dev)) { + netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); + return -ENOMEM; + } + ioc3_start(ip); + ioc3_mii_start(ip); + + netif_start_queue(dev); + return 0; +} + +static int ioc3_close(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + + del_timer_sync(&ip->ioc3_timer); + + netif_stop_queue(dev); + + ioc3_stop(ip); + + ioc3_free_rx_bufs(ip); + ioc3_clean_tx_ring(ip); + + return 0; +} + +static const struct net_device_ops ioc3_netdev_ops = { + .ndo_open = ioc3_open, + .ndo_stop = ioc3_close, + .ndo_start_xmit = ioc3_start_xmit, + .ndo_tx_timeout = ioc3_timeout, + .ndo_get_stats = ioc3_get_stats, + .ndo_set_rx_mode = ioc3_set_multicast_list, + .ndo_do_ioctl = ioc3_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ioc3_set_mac_address, +}; + +static int ioc3eth_probe(struct platform_device *pdev) +{ + u32 sw_physid1, sw_physid2, vendor, model, rev; + struct ioc3_private *ip; + struct net_device *dev; + struct resource *regs; + u8 mac_addr[6]; + int err; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + /* get mac addr from one wire prom */ + if (ioc3eth_get_mac_addr(regs, mac_addr)) + return -EPROBE_DEFER; /* not available yet */ + + dev = alloc_etherdev(sizeof(struct ioc3_private)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + + ip = netdev_priv(dev); + ip->dma_dev = pdev->dev.parent; + ip->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ip->regs)) { + err = PTR_ERR(ip->regs); + goto out_free; + } + + ip->ssram = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(ip->ssram)) { + err = PTR_ERR(ip->ssram); + goto out_free; + } + + dev->irq = platform_get_irq(pdev, 0); + if (dev->irq < 0) { + err = dev->irq; + goto out_free; + } + + if (devm_request_irq(&pdev->dev, dev->irq, ioc3_interrupt, + IRQF_SHARED, "ioc3-eth", dev)) { + dev_err(&pdev->dev, "Can't get irq %d\n", dev->irq); + err = -ENODEV; + goto out_free; + } + + spin_lock_init(&ip->ioc3_lock); + timer_setup(&ip->ioc3_timer, ioc3_timer, 0); + + ioc3_stop(ip); + + /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */ + ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma, + GFP_KERNEL); + if (!ip->rxr) { + pr_err("ioc3-eth: rx ring allocation failed\n"); + err = -ENOMEM; + goto out_stop; + } + + /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */ + ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, + &ip->txr_dma, GFP_KERNEL); + if (!ip->tx_ring) { + pr_err("ioc3-eth: tx ring allocation failed\n"); + err = -ENOMEM; + goto out_stop; + } + /* Align TX ring */ + ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K); + ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K); + + ioc3_init(dev); + + ip->mii.phy_id_mask = 0x1f; + ip->mii.reg_num_mask = 0x1f; + ip->mii.dev = dev; + ip->mii.mdio_read = ioc3_mdio_read; + ip->mii.mdio_write = ioc3_mdio_write; + + ioc3_mii_init(ip); + + if (ip->mii.phy_id == -1) { + netdev_err(dev, "Didn't find a PHY, goodbye.\n"); + err = -ENODEV; + goto out_stop; + } + + ioc3_mii_start(ip); + ioc3_ssram_disc(ip); + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + + /* The IOC3-specific entries in the device structure. */ + dev->watchdog_timeo = 5 * HZ; + dev->netdev_ops = &ioc3_netdev_ops; + dev->ethtool_ops = &ioc3_ethtool_ops; + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA; + + sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); + sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); + + err = register_netdev(dev); + if (err) + goto out_stop; + + mii_check_media(&ip->mii, 1, 1); + ioc3_setup_duplex(ip); + + vendor = (sw_physid1 << 12) | (sw_physid2 >> 4); + model = (sw_physid2 >> 4) & 0x3f; + rev = sw_physid2 & 0xf; + netdev_info(dev, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n", + ip->mii.phy_id, vendor, model, rev); + netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n", + ip->emcr & EMCR_BUFSIZ ? 128 : 64); + + return 0; + +out_stop: + del_timer_sync(&ip->ioc3_timer); + if (ip->rxr) + dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, + ip->rxr_dma); + if (ip->tx_ring) + dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, + ip->txr_dma); +out_free: + free_netdev(dev); + return err; +} + +static int ioc3eth_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct ioc3_private *ip = netdev_priv(dev); + + dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma); + dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); + + unregister_netdev(dev); + del_timer_sync(&ip->ioc3_timer); + free_netdev(dev); + + return 0; +} + + +static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3_etxd *desc; + unsigned long data; + unsigned int len; + int produce; + u32 w0 = 0; + + /* IOC3 has a fairly simple minded checksumming hardware which simply + * adds up the 1's complement checksum for the entire packet and + * inserts it at an offset which can be specified in the descriptor + * into the transmit packet. This means we have to compensate for the + * MAC header which should not be summed and the TCP/UDP pseudo headers + * manually. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ih = ip_hdr(skb); + const int proto = ntohs(ih->protocol); + unsigned int csoff; + u32 csum, ehsum; + u16 *eh; + + /* The MAC header. skb->mac seem the logic approach + * to find the MAC header - except it's a NULL pointer ... + */ + eh = (u16 *)skb->data; + + /* Sum up dest addr, src addr and protocol */ + ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6]; + + /* Skip IP header; it's sum is always zero and was + * already filled in by ip_output.c + */ + csum = csum_tcpudp_nofold(ih->saddr, ih->daddr, + ih->tot_len - (ih->ihl << 2), + proto, csum_fold(ehsum)); + + csum = (csum & 0xffff) + (csum >> 16); /* Fold again */ + csum = (csum & 0xffff) + (csum >> 16); + + csoff = ETH_HLEN + (ih->ihl << 2); + if (proto == IPPROTO_UDP) { + csoff += offsetof(struct udphdr, check); + udp_hdr(skb)->check = csum; + } + if (proto == IPPROTO_TCP) { + csoff += offsetof(struct tcphdr, check); + tcp_hdr(skb)->check = csum; + } + + w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); + } + + spin_lock_irq(&ip->ioc3_lock); + + data = (unsigned long)skb->data; + len = skb->len; + + produce = ip->tx_pi; + desc = &ip->txr[produce]; + + if (len <= 104) { + /* Short packet, let's copy it directly into the ring. */ + skb_copy_from_linear_data(skb, desc->data, skb->len); + if (len < ETH_ZLEN) { + /* Very short packet, pad with zeros at the end. */ + memset(desc->data + len, 0, ETH_ZLEN - len); + len = ETH_ZLEN; + } + desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0); + desc->bufcnt = cpu_to_be32(len); + } else if ((data ^ (data + len - 1)) & 0x4000) { + unsigned long b2 = (data | 0x3fffUL) + 1UL; + unsigned long s1 = b2 - data; + unsigned long s2 = data + len - b2; + dma_addr_t d1, d2; + + desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | + ETXD_B1V | ETXD_B2V | w0); + desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) | + (s2 << ETXD_B2CNT_SHIFT)); + d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE); + if (dma_mapping_error(ip->dma_dev, d1)) + goto drop_packet; + d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE); + if (dma_mapping_error(ip->dma_dev, d2)) { + dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE); + goto drop_packet; + } + desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF)); + desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF)); + } else { + dma_addr_t d; + + /* Normal sized packet that doesn't cross a page boundary. */ + desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0); + desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT); + d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ip->dma_dev, d)) + goto drop_packet; + desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF)); + } + + mb(); /* make sure all descriptor changes are visible */ + + ip->tx_skbs[produce] = skb; /* Remember skb */ + produce = (produce + 1) & TX_RING_MASK; + ip->tx_pi = produce; + writel(produce << 7, &ip->regs->etpir); /* Fire ... */ + + ip->txqlen++; + + if (ip->txqlen >= (TX_RING_ENTRIES - 1)) + netif_stop_queue(dev); + + spin_unlock_irq(&ip->ioc3_lock); + + return NETDEV_TX_OK; + +drop_packet: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + + spin_unlock_irq(&ip->ioc3_lock); + + return NETDEV_TX_OK; +} + +static void ioc3_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct ioc3_private *ip = netdev_priv(dev); + + netdev_err(dev, "transmit timed out, resetting\n"); + + spin_lock_irq(&ip->ioc3_lock); + + ioc3_stop(ip); + ioc3_free_rx_bufs(ip); + ioc3_clean_tx_ring(ip); + + ioc3_init(dev); + if (ioc3_alloc_rx_bufs(dev)) { + netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); + spin_unlock_irq(&ip->ioc3_lock); + return; + } + ioc3_start(ip); + ioc3_mii_init(ip); + ioc3_mii_start(ip); + + spin_unlock_irq(&ip->ioc3_lock); + + netif_wake_queue(dev); +} + +/* Given a multicast ethernet address, this routine calculates the + * address's bit index in the logical address filter mask + */ +static inline unsigned int ioc3_hash(const unsigned char *addr) +{ + unsigned int temp = 0; + int bits; + u32 crc; + + crc = ether_crc_le(ETH_ALEN, addr); + + crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */ + for (bits = 6; --bits >= 0; ) { + temp <<= 1; + temp |= (crc & 0x1); + crc >>= 1; + } + + return temp; +} + +static void ioc3_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, IOC3_NAME, sizeof(info->driver)); + strlcpy(info->version, IOC3_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)), + sizeof(info->bus_info)); +} + +static int ioc3_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct ioc3_private *ip = netdev_priv(dev); + + spin_lock_irq(&ip->ioc3_lock); + mii_ethtool_get_link_ksettings(&ip->mii, cmd); + spin_unlock_irq(&ip->ioc3_lock); + + return 0; +} + +static int ioc3_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct ioc3_private *ip = netdev_priv(dev); + int rc; + + spin_lock_irq(&ip->ioc3_lock); + rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd); + spin_unlock_irq(&ip->ioc3_lock); + + return rc; +} + +static int ioc3_nway_reset(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + int rc; + + spin_lock_irq(&ip->ioc3_lock); + rc = mii_nway_restart(&ip->mii); + spin_unlock_irq(&ip->ioc3_lock); + + return rc; +} + +static u32 ioc3_get_link(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + int rc; + + spin_lock_irq(&ip->ioc3_lock); + rc = mii_link_ok(&ip->mii); + spin_unlock_irq(&ip->ioc3_lock); + + return rc; +} + +static const struct ethtool_ops ioc3_ethtool_ops = { + .get_drvinfo = ioc3_get_drvinfo, + .nway_reset = ioc3_nway_reset, + .get_link = ioc3_get_link, + .get_link_ksettings = ioc3_get_link_ksettings, + .set_link_ksettings = ioc3_set_link_ksettings, +}; + +static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct ioc3_private *ip = netdev_priv(dev); + int rc; + + spin_lock_irq(&ip->ioc3_lock); + rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL); + spin_unlock_irq(&ip->ioc3_lock); + + return rc; +} + +static void ioc3_set_multicast_list(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3_ethregs *regs = ip->regs; + struct netdev_hw_addr *ha; + u64 ehar = 0; + + spin_lock_irq(&ip->ioc3_lock); + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + ip->emcr |= EMCR_PROMISC; + writel(ip->emcr, ®s->emcr); + readl(®s->emcr); + } else { + ip->emcr &= ~EMCR_PROMISC; + writel(ip->emcr, ®s->emcr); /* Clear promiscuous. */ + readl(®s->emcr); + + if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > 64)) { + /* Too many for hashing to make sense or we want all + * multicast packets anyway, so skip computing all the + * hashes and just accept all packets. + */ + ip->ehar_h = 0xffffffff; + ip->ehar_l = 0xffffffff; + } else { + netdev_for_each_mc_addr(ha, dev) { + ehar |= (1UL << ioc3_hash(ha->addr)); + } + ip->ehar_h = ehar >> 32; + ip->ehar_l = ehar & 0xffffffff; + } + writel(ip->ehar_h, ®s->ehar_h); + writel(ip->ehar_l, ®s->ehar_l); + } + + spin_unlock_irq(&ip->ioc3_lock); +} + +static struct platform_driver ioc3eth_driver = { + .probe = ioc3eth_probe, + .remove = ioc3eth_remove, + .driver = { + .name = "ioc3-eth", + } +}; + +module_platform_driver(ioc3eth_driver); + +MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); +MODULE_DESCRIPTION("SGI IOC3 Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c new file mode 100644 index 000000000..0c396ecd3 --- /dev/null +++ b/drivers/net/ethernet/sgi/meth.c @@ -0,0 +1,880 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * meth.c -- O2 Builtin 10/100 Ethernet driver + * + * Copyright (C) 2001-2003 Ilya Volynets + */ +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> + +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/device.h> /* struct device, et al */ +#include <linux/netdevice.h> /* struct device, and other headers */ +#include <linux/etherdevice.h> /* eth_type_trans */ +#include <linux/ip.h> /* struct iphdr */ +#include <linux/tcp.h> /* struct tcphdr */ +#include <linux/skbuff.h> +#include <linux/mii.h> /* MII definitions */ +#include <linux/crc32.h> + +#include <asm/ip32/mace.h> +#include <asm/ip32/ip32_ints.h> + +#include <asm/io.h> + +#include "meth.h" + +#ifndef MFE_DEBUG +#define MFE_DEBUG 0 +#endif + +#if MFE_DEBUG>=1 +#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args) +#define MFE_RX_DEBUG 2 +#else +#define DPRINTK(str,args...) +#define MFE_RX_DEBUG 0 +#endif + + +static const char *meth_str="SGI O2 Fast Ethernet"; + +/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */ +#define TX_TIMEOUT (400*HZ/1000) + +static int timeout = TX_TIMEOUT; +module_param(timeout, int, 0); + +/* + * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC. + */ +#define METH_MCF_LIMIT 32 + +/* + * This structure is private to each device. It is used to pass + * packets in and out, so there is place for a packet + */ +struct meth_private { + struct platform_device *pdev; + + /* in-memory copy of MAC Control register */ + u64 mac_ctrl; + + /* in-memory copy of DMA Control register */ + unsigned long dma_ctrl; + /* address of PHY, used by mdio_* functions, initialized in mdio_probe */ + unsigned long phy_addr; + tx_packet *tx_ring; + dma_addr_t tx_ring_dma; + struct sk_buff *tx_skbs[TX_RING_ENTRIES]; + dma_addr_t tx_skb_dmas[TX_RING_ENTRIES]; + unsigned long tx_read, tx_write, tx_count; + + rx_packet *rx_ring[RX_RING_ENTRIES]; + dma_addr_t rx_ring_dmas[RX_RING_ENTRIES]; + struct sk_buff *rx_skbs[RX_RING_ENTRIES]; + unsigned long rx_write; + + /* Multicast filter. */ + u64 mcast_filter; + + spinlock_t meth_lock; +}; + +static void meth_tx_timeout(struct net_device *dev, unsigned int txqueue); +static irqreturn_t meth_interrupt(int irq, void *dev_id); + +/* global, initialized in ip32-setup.c */ +char o2meth_eaddr[8]={0,0,0,0,0,0,0,0}; + +static inline void load_eaddr(struct net_device *dev) +{ + int i; + u64 macaddr; + + DPRINTK("Loading MAC Address: %pM\n", dev->dev_addr); + macaddr = 0; + for (i = 0; i < 6; i++) + macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); + + mace->eth.mac_addr = macaddr; +} + +/* + * Waits for BUSY status of mdio bus to clear + */ +#define WAIT_FOR_PHY(___rval) \ + while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \ + udelay(25); \ + } +/*read phy register, return value read */ +static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg) +{ + unsigned long rval; + WAIT_FOR_PHY(rval); + mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f); + udelay(25); + mace->eth.phy_trans_go = 1; + udelay(25); + WAIT_FOR_PHY(rval); + return rval & MDIO_DATA_MASK; +} + +static int mdio_probe(struct meth_private *priv) +{ + int i; + unsigned long p2, p3, flags; + /* check if phy is detected already */ + if(priv->phy_addr>=0&&priv->phy_addr<32) + return 0; + spin_lock_irqsave(&priv->meth_lock, flags); + for (i=0;i<32;++i){ + priv->phy_addr=i; + p2=mdio_read(priv,2); + p3=mdio_read(priv,3); +#if MFE_DEBUG>=2 + switch ((p2<<12)|(p3>>4)){ + case PHY_QS6612X: + DPRINTK("PHY is QS6612X\n"); + break; + case PHY_ICS1889: + DPRINTK("PHY is ICS1889\n"); + break; + case PHY_ICS1890: + DPRINTK("PHY is ICS1890\n"); + break; + case PHY_DP83840: + DPRINTK("PHY is DP83840\n"); + break; + } +#endif + if(p2!=0xffff&&p2!=0x0000){ + DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4)); + break; + } + } + spin_unlock_irqrestore(&priv->meth_lock, flags); + if(priv->phy_addr<32) { + return 0; + } + DPRINTK("Oopsie! PHY is not known!\n"); + priv->phy_addr=-1; + return -ENODEV; +} + +static void meth_check_link(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long mii_advertising = mdio_read(priv, 4); + unsigned long mii_partner = mdio_read(priv, 5); + unsigned long negotiated = mii_advertising & mii_partner; + unsigned long duplex, speed; + + if (mii_partner == 0xffff) + return; + + speed = (negotiated & 0x0380) ? METH_100MBIT : 0; + duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ? + METH_PHY_FDX : 0; + + if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) { + DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half"); + if (duplex) + priv->mac_ctrl |= METH_PHY_FDX; + else + priv->mac_ctrl &= ~METH_PHY_FDX; + mace->eth.mac_ctrl = priv->mac_ctrl; + } + + if ((priv->mac_ctrl & METH_100MBIT) ^ speed) { + DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10); + if (duplex) + priv->mac_ctrl |= METH_100MBIT; + else + priv->mac_ctrl &= ~METH_100MBIT; + mace->eth.mac_ctrl = priv->mac_ctrl; + } +} + + +static int meth_init_tx_ring(struct meth_private *priv) +{ + /* Init TX ring */ + priv->tx_ring = dma_alloc_coherent(&priv->pdev->dev, + TX_RING_BUFFER_SIZE, &priv->tx_ring_dma, GFP_ATOMIC); + if (!priv->tx_ring) + return -ENOMEM; + + priv->tx_count = priv->tx_read = priv->tx_write = 0; + mace->eth.tx_ring_base = priv->tx_ring_dma; + /* Now init skb save area */ + memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs)); + memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas)); + return 0; +} + +static int meth_init_rx_ring(struct meth_private *priv) +{ + int i; + + for (i = 0; i < RX_RING_ENTRIES; i++) { + priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0); + /* 8byte status vector + 3quad padding + 2byte padding, + * to put data on 64bit aligned boundary */ + skb_reserve(priv->rx_skbs[i],METH_RX_HEAD); + priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); + /* I'll need to re-sync it after each RX */ + priv->rx_ring_dmas[i] = + dma_map_single(&priv->pdev->dev, priv->rx_ring[i], + METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); + mace->eth.rx_fifo = priv->rx_ring_dmas[i]; + } + priv->rx_write = 0; + return 0; +} +static void meth_free_tx_ring(struct meth_private *priv) +{ + int i; + + /* Remove any pending skb */ + for (i = 0; i < TX_RING_ENTRIES; i++) { + dev_kfree_skb(priv->tx_skbs[i]); + priv->tx_skbs[i] = NULL; + } + dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring, + priv->tx_ring_dma); +} + +/* Presumes RX DMA engine is stopped, and RX fifo ring is reset */ +static void meth_free_rx_ring(struct meth_private *priv) +{ + int i; + + for (i = 0; i < RX_RING_ENTRIES; i++) { + dma_unmap_single(&priv->pdev->dev, priv->rx_ring_dmas[i], + METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); + priv->rx_ring[i] = 0; + priv->rx_ring_dmas[i] = 0; + kfree_skb(priv->rx_skbs[i]); + } +} + +int meth_reset(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + + /* Reset card */ + mace->eth.mac_ctrl = SGI_MAC_RESET; + udelay(1); + mace->eth.mac_ctrl = 0; + udelay(25); + + /* Load ethernet address */ + load_eaddr(dev); + /* Should load some "errata", but later */ + + /* Check for device */ + if (mdio_probe(priv) < 0) { + DPRINTK("Unable to find PHY\n"); + return -ENODEV; + } + + /* Initial mode: 10 | Half-duplex | Accept normal packets */ + priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG; + if (dev->flags & IFF_PROMISC) + priv->mac_ctrl |= METH_PROMISC; + mace->eth.mac_ctrl = priv->mac_ctrl; + + /* Autonegotiate speed and duplex mode */ + meth_check_link(dev); + + /* Now set dma control, but don't enable DMA, yet */ + priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) | + (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT); + mace->eth.dma_ctrl = priv->dma_ctrl; + + return 0; +} + +/*============End Helper Routines=====================*/ + +/* + * Open and close + */ +static int meth_open(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + int ret; + + priv->phy_addr = -1; /* No PHY is known yet... */ + + /* Initialize the hardware */ + ret = meth_reset(dev); + if (ret < 0) + return ret; + + /* Allocate the ring buffers */ + ret = meth_init_tx_ring(priv); + if (ret < 0) + return ret; + ret = meth_init_rx_ring(priv); + if (ret < 0) + goto out_free_tx_ring; + + ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev); + if (ret) { + printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); + goto out_free_rx_ring; + } + + /* Start DMA */ + priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/ + METH_DMA_RX_EN | METH_DMA_RX_INT_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + + DPRINTK("About to start queue\n"); + netif_start_queue(dev); + + return 0; + +out_free_rx_ring: + meth_free_rx_ring(priv); +out_free_tx_ring: + meth_free_tx_ring(priv); + + return ret; +} + +static int meth_release(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + + DPRINTK("Stopping queue\n"); + netif_stop_queue(dev); /* can't transmit any more */ + /* shut down DMA */ + priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN | + METH_DMA_RX_EN | METH_DMA_RX_INT_EN); + mace->eth.dma_ctrl = priv->dma_ctrl; + free_irq(dev->irq, dev); + meth_free_tx_ring(priv); + meth_free_rx_ring(priv); + + return 0; +} + +/* + * Receive a packet: retrieve, encapsulate and pass over to upper levels + */ +static void meth_rx(struct net_device* dev, unsigned long int_status) +{ + struct sk_buff *skb; + unsigned long status, flags; + struct meth_private *priv = netdev_priv(dev); + unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8; + + spin_lock_irqsave(&priv->meth_lock, flags); + priv->dma_ctrl &= ~METH_DMA_RX_INT_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + spin_unlock_irqrestore(&priv->meth_lock, flags); + + if (int_status & METH_INT_RX_UNDERFLOW) { + fifo_rptr = (fifo_rptr - 1) & 0x0f; + } + while (priv->rx_write != fifo_rptr) { + dma_unmap_single(&priv->pdev->dev, + priv->rx_ring_dmas[priv->rx_write], + METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); + status = priv->rx_ring[priv->rx_write]->status.raw; +#if MFE_DEBUG + if (!(status & METH_RX_ST_VALID)) { + DPRINTK("Not received? status=%016lx\n",status); + } +#endif + if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) { + int len = (status & 0xffff) - 4; /* omit CRC */ + /* length sanity check */ + if (len < 60 || len > 1518) { + printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2Lx.\n", + dev->name, priv->rx_write, + priv->rx_ring[priv->rx_write]->status.raw); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + skb = priv->rx_skbs[priv->rx_write]; + } else { + skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); + if (!skb) { + /* Ouch! No memory! Drop packet on the floor */ + DPRINTK("No mem: dropping packet\n"); + dev->stats.rx_dropped++; + skb = priv->rx_skbs[priv->rx_write]; + } else { + struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; + /* 8byte status vector + 3quad padding + 2byte padding, + * to put data on 64bit aligned boundary */ + skb_reserve(skb, METH_RX_HEAD); + /* Write metadata, and then pass to the receive level */ + skb_put(skb_c, len); + priv->rx_skbs[priv->rx_write] = skb; + skb_c->protocol = eth_type_trans(skb_c, dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + netif_rx(skb_c); + } + } + } else { + dev->stats.rx_errors++; + skb=priv->rx_skbs[priv->rx_write]; +#if MFE_DEBUG>0 + printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); + if(status&METH_RX_ST_RCV_CODE_VIOLATION) + printk(KERN_WARNING "Receive Code Violation\n"); + if(status&METH_RX_ST_CRC_ERR) + printk(KERN_WARNING "CRC error\n"); + if(status&METH_RX_ST_INV_PREAMBLE_CTX) + printk(KERN_WARNING "Invalid Preamble Context\n"); + if(status&METH_RX_ST_LONG_EVT_SEEN) + printk(KERN_WARNING "Long Event Seen...\n"); + if(status&METH_RX_ST_BAD_PACKET) + printk(KERN_WARNING "Bad Packet\n"); + if(status&METH_RX_ST_CARRIER_EVT_SEEN) + printk(KERN_WARNING "Carrier Event Seen\n"); +#endif + } + priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; + priv->rx_ring[priv->rx_write]->status.raw = 0; + priv->rx_ring_dmas[priv->rx_write] = + dma_map_single(&priv->pdev->dev, + priv->rx_ring[priv->rx_write], + METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); + mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; + ADVANCE_RX_PTR(priv->rx_write); + } + spin_lock_irqsave(&priv->meth_lock, flags); + /* In case there was underflow, and Rx DMA was disabled */ + priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + mace->eth.int_stat = METH_INT_RX_THRESHOLD; + spin_unlock_irqrestore(&priv->meth_lock, flags); +} + +static int meth_tx_full(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + + return priv->tx_count >= TX_RING_ENTRIES - 1; +} + +static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long status, flags; + struct sk_buff *skb; + unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16; + + spin_lock_irqsave(&priv->meth_lock, flags); + + /* Stop DMA notification */ + priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); + mace->eth.dma_ctrl = priv->dma_ctrl; + + while (priv->tx_read != rptr) { + skb = priv->tx_skbs[priv->tx_read]; + status = priv->tx_ring[priv->tx_read].header.raw; +#if MFE_DEBUG>=1 + if (priv->tx_read == priv->tx_write) + DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr); +#endif + if (status & METH_TX_ST_DONE) { + if (status & METH_TX_ST_SUCCESS){ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + } else { + dev->stats.tx_errors++; +#if MFE_DEBUG>=1 + DPRINTK("TX error: status=%016lx <",status); + if(status & METH_TX_ST_SUCCESS) + printk(" SUCCESS"); + if(status & METH_TX_ST_TOOLONG) + printk(" TOOLONG"); + if(status & METH_TX_ST_UNDERRUN) + printk(" UNDERRUN"); + if(status & METH_TX_ST_EXCCOLL) + printk(" EXCCOLL"); + if(status & METH_TX_ST_DEFER) + printk(" DEFER"); + if(status & METH_TX_ST_LATECOLL) + printk(" LATECOLL"); + printk(" >\n"); +#endif + } + } else { + DPRINTK("RPTR points us here, but packet not done?\n"); + break; + } + dev_consume_skb_irq(skb); + priv->tx_skbs[priv->tx_read] = NULL; + priv->tx_ring[priv->tx_read].header.raw = 0; + priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1); + priv->tx_count--; + } + + /* wake up queue if it was stopped */ + if (netif_queue_stopped(dev) && !meth_tx_full(dev)) { + netif_wake_queue(dev); + } + + mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT; + spin_unlock_irqrestore(&priv->meth_lock, flags); +} + +static void meth_error(struct net_device* dev, unsigned status) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long flags; + + printk(KERN_WARNING "meth: error status: 0x%08x\n",status); + /* check for errors too... */ + if (status & (METH_INT_TX_LINK_FAIL)) + printk(KERN_WARNING "meth: link failure\n"); + /* Should I do full reset in this case? */ + if (status & (METH_INT_MEM_ERROR)) + printk(KERN_WARNING "meth: memory error\n"); + if (status & (METH_INT_TX_ABORT)) + printk(KERN_WARNING "meth: aborted\n"); + if (status & (METH_INT_RX_OVERFLOW)) + printk(KERN_WARNING "meth: Rx overflow\n"); + if (status & (METH_INT_RX_UNDERFLOW)) { + printk(KERN_WARNING "meth: Rx underflow\n"); + spin_lock_irqsave(&priv->meth_lock, flags); + mace->eth.int_stat = METH_INT_RX_UNDERFLOW; + /* more underflow interrupts will be delivered, + * effectively throwing us into an infinite loop. + * Thus I stop processing Rx in this case. */ + priv->dma_ctrl &= ~METH_DMA_RX_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + DPRINTK("Disabled meth Rx DMA temporarily\n"); + spin_unlock_irqrestore(&priv->meth_lock, flags); + } + mace->eth.int_stat = METH_INT_ERROR; +} + +/* + * The typical interrupt entry point + */ +static irqreturn_t meth_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct meth_private *priv = netdev_priv(dev); + unsigned long status; + + status = mace->eth.int_stat; + while (status & 0xff) { + /* First handle errors - if we get Rx underflow, + * Rx DMA will be disabled, and Rx handler will reenable + * it. I don't think it's possible to get Rx underflow, + * without getting Rx interrupt */ + if (status & METH_INT_ERROR) { + meth_error(dev, status); + } + if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) { + /* a transmission is over: free the skb */ + meth_tx_cleanup(dev, status); + } + if (status & METH_INT_RX_THRESHOLD) { + if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN)) + break; + /* send it to meth_rx for handling */ + meth_rx(dev, status); + } + status = mace->eth.int_stat; + } + + return IRQ_HANDLED; +} + +/* + * Transmits packets that fit into TX descriptor (are <=120B) + */ +static void meth_tx_short_prepare(struct meth_private *priv, + struct sk_buff *skb) +{ + tx_packet *desc = &priv->tx_ring[priv->tx_write]; + int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; + + desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); + /* maybe I should set whole thing to 0 first... */ + skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len); + if (skb->len < len) + memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); +} +#define TX_CATBUF1 BIT(25) +static void meth_tx_1page_prepare(struct meth_private *priv, + struct sk_buff *skb) +{ + tx_packet *desc = &priv->tx_ring[priv->tx_write]; + void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7); + int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data); + int buffer_len = skb->len - unaligned_len; + dma_addr_t catbuf; + + desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1); + + /* unaligned part */ + if (unaligned_len) { + skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), + unaligned_len); + desc->header.raw |= (128 - unaligned_len) << 16; + } + + /* first page */ + catbuf = dma_map_single(&priv->pdev->dev, buffer_data, buffer_len, + DMA_TO_DEVICE); + desc->data.cat_buf[0].form.start_addr = catbuf >> 3; + desc->data.cat_buf[0].form.len = buffer_len - 1; +} +#define TX_CATBUF2 BIT(26) +static void meth_tx_2page_prepare(struct meth_private *priv, + struct sk_buff *skb) +{ + tx_packet *desc = &priv->tx_ring[priv->tx_write]; + void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7); + void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data); + int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data); + int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data); + int buffer2_len = skb->len - buffer1_len - unaligned_len; + dma_addr_t catbuf1, catbuf2; + + desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); + /* unaligned part */ + if (unaligned_len){ + skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), + unaligned_len); + desc->header.raw |= (128 - unaligned_len) << 16; + } + + /* first page */ + catbuf1 = dma_map_single(&priv->pdev->dev, buffer1_data, buffer1_len, + DMA_TO_DEVICE); + desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3; + desc->data.cat_buf[0].form.len = buffer1_len - 1; + /* second page */ + catbuf2 = dma_map_single(&priv->pdev->dev, buffer2_data, buffer2_len, + DMA_TO_DEVICE); + desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3; + desc->data.cat_buf[1].form.len = buffer2_len - 1; +} + +static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) +{ + /* Remember the skb, so we can free it at interrupt time */ + priv->tx_skbs[priv->tx_write] = skb; + if (skb->len <= 120) { + /* Whole packet fits into descriptor */ + meth_tx_short_prepare(priv, skb); + } else if (PAGE_ALIGN((unsigned long)skb->data) != + PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) { + /* Packet crosses page boundary */ + meth_tx_2page_prepare(priv, skb); + } else { + /* Packet is in one page */ + meth_tx_1page_prepare(priv, skb); + } + priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1); + mace->eth.tx_info = priv->tx_write; + priv->tx_count++; +} + +/* + * Transmit a packet (called by the kernel) + */ +static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->meth_lock, flags); + /* Stop DMA notification */ + priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); + mace->eth.dma_ctrl = priv->dma_ctrl; + + meth_add_to_tx_ring(priv, skb); + netif_trans_update(dev); /* save the timestamp */ + + /* If TX ring is full, tell the upper layer to stop sending packets */ + if (meth_tx_full(dev)) { + printk(KERN_DEBUG "TX full: stopping\n"); + netif_stop_queue(dev); + } + + /* Restart DMA notification */ + priv->dma_ctrl |= METH_DMA_TX_INT_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + + spin_unlock_irqrestore(&priv->meth_lock, flags); + + return NETDEV_TX_OK; +} + +/* + * Deal with a transmit timeout. + */ +static void meth_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long flags; + + printk(KERN_WARNING "%s: transmit timed out\n", dev->name); + + /* Protect against concurrent rx interrupts */ + spin_lock_irqsave(&priv->meth_lock,flags); + + /* Try to reset the interface. */ + meth_reset(dev); + + dev->stats.tx_errors++; + + /* Clear all rings */ + meth_free_tx_ring(priv); + meth_free_rx_ring(priv); + meth_init_tx_ring(priv); + meth_init_rx_ring(priv); + + /* Restart dma */ + priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + + /* Enable interrupt */ + spin_unlock_irqrestore(&priv->meth_lock, flags); + + netif_trans_update(dev); /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * Ioctl commands + */ +static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + /* XXX Not yet implemented */ + switch(cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } +} + +static void meth_set_rx_mode(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long flags; + + netif_stop_queue(dev); + spin_lock_irqsave(&priv->meth_lock, flags); + priv->mac_ctrl &= ~METH_PROMISC; + + if (dev->flags & IFF_PROMISC) { + priv->mac_ctrl |= METH_PROMISC; + priv->mcast_filter = 0xffffffffffffffffUL; + } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) || + (dev->flags & IFF_ALLMULTI)) { + priv->mac_ctrl |= METH_ACCEPT_AMCAST; + priv->mcast_filter = 0xffffffffffffffffUL; + } else { + struct netdev_hw_addr *ha; + priv->mac_ctrl |= METH_ACCEPT_MCAST; + + netdev_for_each_mc_addr(ha, dev) + set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26), + (volatile unsigned long *)&priv->mcast_filter); + } + + /* Write the changes to the chip registers. */ + mace->eth.mac_ctrl = priv->mac_ctrl; + mace->eth.mcast_filter = priv->mcast_filter; + + /* Done! */ + spin_unlock_irqrestore(&priv->meth_lock, flags); + netif_wake_queue(dev); +} + +static const struct net_device_ops meth_netdev_ops = { + .ndo_open = meth_open, + .ndo_stop = meth_release, + .ndo_start_xmit = meth_tx, + .ndo_do_ioctl = meth_ioctl, + .ndo_tx_timeout = meth_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_rx_mode = meth_set_rx_mode, +}; + +/* + * The init function. + */ +static int meth_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct meth_private *priv; + int err; + + dev = alloc_etherdev(sizeof(struct meth_private)); + if (!dev) + return -ENOMEM; + + dev->netdev_ops = &meth_netdev_ops; + dev->watchdog_timeo = timeout; + dev->irq = MACE_ETHERNET_IRQ; + dev->base_addr = (unsigned long)&mace->eth; + memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN); + + priv = netdev_priv(dev); + priv->pdev = pdev; + spin_lock_init(&priv->meth_lock); + SET_NETDEV_DEV(dev, &pdev->dev); + + err = register_netdev(dev); + if (err) { + free_netdev(dev); + return err; + } + + printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n", + dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29)); + return 0; +} + +static int meth_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + unregister_netdev(dev); + free_netdev(dev); + + return 0; +} + +static struct platform_driver meth_driver = { + .probe = meth_probe, + .remove = meth_remove, + .driver = { + .name = "meth", + } +}; + +module_platform_driver(meth_driver); + +MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>"); +MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:meth"); diff --git a/drivers/net/ethernet/sgi/meth.h b/drivers/net/ethernet/sgi/meth.h new file mode 100644 index 000000000..2ba15c263 --- /dev/null +++ b/drivers/net/ethernet/sgi/meth.h @@ -0,0 +1,227 @@ +/* version dependencies have been confined to a separate file */ + +/* Tunable parameters */ +#define TX_RING_ENTRIES 64 /* 64-512?*/ + +#define RX_RING_ENTRIES 16 /* Do not change */ +/* Internal constants */ +#define TX_RING_BUFFER_SIZE (TX_RING_ENTRIES*sizeof(tx_packet)) +#define RX_BUFFER_SIZE 1546 /* ethenet packet size */ +#define METH_RX_BUFF_SIZE 4096 +#define METH_RX_HEAD 34 /* status + 3 quad garbage-fill + 2 byte zero-pad */ +#define RX_BUFFER_OFFSET (sizeof(rx_status_vector)+2) /* staus vector + 2 bytes of padding */ +#define RX_BUCKET_SIZE 256 + +/* For more detailed explanations of what each field menas, + see Nick's great comments to #defines below (or docs, if + you are lucky enough toget hold of them :)*/ + +/* tx status vector is written over tx command header upon + dma completion. */ + +typedef struct tx_status_vector { + u64 sent:1; /* always set to 1...*/ + u64 pad0:34;/* always set to 0 */ + u64 flags:9; /*I'm too lazy to specify each one separately at the moment*/ + u64 col_retry_cnt:4; /*collision retry count*/ + u64 len:16; /*Transmit length in bytes*/ +} tx_status_vector; + +/* + * Each packet is 128 bytes long. + * It consists of header, 0-3 concatination + * buffer pointers and up to 120 data bytes. + */ +typedef struct tx_packet_hdr { + u64 pad1:36; /*should be filled with 0 */ + u64 cat_ptr3_valid:1, /*Concatination pointer valid flags*/ + cat_ptr2_valid:1, + cat_ptr1_valid:1; + u64 tx_int_flag:1; /*Generate TX intrrupt when packet has been sent*/ + u64 term_dma_flag:1; /*Terminate transmit DMA on transmit abort conditions*/ + u64 data_offset:7; /*Starting byte offset in ring data block*/ + u64 data_len:16; /*Length of valid data in bytes-1*/ +} tx_packet_hdr; +typedef union tx_cat_ptr { + struct { + u64 pad2:16; /* should be 0 */ + u64 len:16; /*length of buffer data - 1*/ + u64 start_addr:29; /*Physical starting address*/ + u64 pad1:3; /* should be zero */ + } form; + u64 raw; +} tx_cat_ptr; + +typedef struct tx_packet { + union { + tx_packet_hdr header; + tx_status_vector res; + u64 raw; + }header; + union { + tx_cat_ptr cat_buf[3]; + char dt[120]; + } data; +} tx_packet; + +typedef union rx_status_vector { + volatile struct { + u64 pad1:1;/*fill it with ones*/ + u64 pad2:15;/*fill with 0*/ + u64 ip_chk_sum:16; + u64 seq_num:5; + u64 mac_addr_match:1; + u64 mcast_addr_match:1; + u64 carrier_event_seen:1; + u64 bad_packet:1; + u64 long_event_seen:1; + u64 invalid_preamble:1; + u64 broadcast:1; + u64 multicast:1; + u64 crc_error:1; + u64 huh:1;/*???*/ + u64 rx_code_violation:1; + u64 rx_len:16; + } parsed; + volatile u64 raw; +} rx_status_vector; + +typedef struct rx_packet { + rx_status_vector status; + u64 pad[3]; /* For whatever reason, there needs to be 4 double-word offset */ + u16 pad2; + char buf[METH_RX_BUFF_SIZE-sizeof(rx_status_vector)-3*sizeof(u64)-sizeof(u16)];/* data */ +} rx_packet; + +#define TX_INFO_RPTR 0x00FF0000 +#define TX_INFO_WPTR 0x000000FF + + /* Bits in METH_MAC */ + +#define SGI_MAC_RESET BIT(0) /* 0: MAC110 active in run mode, 1: Global reset signal to MAC110 core is active */ +#define METH_PHY_FDX BIT(1) /* 0: Disable full duplex, 1: Enable full duplex */ +#define METH_PHY_LOOP BIT(2) /* 0: Normal operation, follows 10/100mbit and M10T/MII select, 1: loops internal MII bus */ + /* selects ignored */ +#define METH_100MBIT BIT(3) /* 0: 10meg mode, 1: 100meg mode */ +#define METH_PHY_MII BIT(4) /* 0: MII selected, 1: SIA selected */ + /* Note: when loopback is set this bit becomes collision control. Setting this bit will */ + /* cause a collision to be reported. */ + + /* Bits 5 and 6 are used to determine the Destination address filter mode */ +#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */ +#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */ +#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */ +#define METH_PROMISC 0x60 /* 11: Promiscious mode */ + +#define METH_PHY_LINK_FAIL BIT(7) /* 0: Link failure detection disabled, 1: Hardware scans for link failure in PHY */ + +#define METH_MAC_IPG 0x1ffff00 + +#define METH_DEFAULT_IPG ((17<<15) | (11<<22) | (21<<8)) + /* 0x172e5c00 */ /* 23, 23, 23 */ /*0x54A9500 *//*21,21,21*/ + /* Bits 8 through 14 are used to determine Inter-Packet Gap between "Back to Back" packets */ + /* The gap depends on the clock speed of the link, 80ns per increment for 100baseT, 800ns */ + /* per increment for 10BaseT */ + + /* Bits 15 through 21 are used to determine IPGR1 */ + + /* Bits 22 through 28 are used to determine IPGR2 */ + +#define METH_REV_SHIFT 29 /* Bits 29 through 31 are used to determine the revision */ + /* 000: Initial revision */ + /* 001: First revision, Improved TX concatenation */ + + +/* DMA control bits */ +#define METH_RX_OFFSET_SHIFT 12 /* Bits 12:14 of DMA control register indicate starting offset of packet data for RX operation */ +#define METH_RX_DEPTH_SHIFT 4 /* Bits 8:4 define RX fifo depth -- when # of RX fifo entries != depth, interrupt is generted */ + +#define METH_DMA_TX_EN BIT(1) /* enable TX DMA */ +#define METH_DMA_TX_INT_EN BIT(0) /* enable TX Buffer Empty interrupt */ +#define METH_DMA_RX_EN BIT(15) /* Enable RX */ +#define METH_DMA_RX_INT_EN BIT(9) /* Enable interrupt on RX packet */ + +/* RX FIFO MCL Info bits */ +#define METH_RX_FIFO_WPTR(x) (((x)>>16)&0xf) +#define METH_RX_FIFO_RPTR(x) (((x)>>8)&0xf) +#define METH_RX_FIFO_DEPTH(x) ((x)&0x1f) + +/* RX status bits */ + +#define METH_RX_ST_VALID BIT(63) +#define METH_RX_ST_RCV_CODE_VIOLATION BIT(16) +#define METH_RX_ST_DRBL_NBL BIT(17) +#define METH_RX_ST_CRC_ERR BIT(18) +#define METH_RX_ST_MCAST_PKT BIT(19) +#define METH_RX_ST_BCAST_PKT BIT(20) +#define METH_RX_ST_INV_PREAMBLE_CTX BIT(21) +#define METH_RX_ST_LONG_EVT_SEEN BIT(22) +#define METH_RX_ST_BAD_PACKET BIT(23) +#define METH_RX_ST_CARRIER_EVT_SEEN BIT(24) +#define METH_RX_ST_MCAST_FILTER_MATCH BIT(25) +#define METH_RX_ST_PHYS_ADDR_MATCH BIT(26) + +#define METH_RX_STATUS_ERRORS \ + ( \ + METH_RX_ST_RCV_CODE_VIOLATION| \ + METH_RX_ST_CRC_ERR| \ + METH_RX_ST_INV_PREAMBLE_CTX| \ + METH_RX_ST_LONG_EVT_SEEN| \ + METH_RX_ST_BAD_PACKET| \ + METH_RX_ST_CARRIER_EVT_SEEN \ + ) + /* Bits in METH_INT */ + /* Write _1_ to corresponding bit to clear */ +#define METH_INT_TX_EMPTY BIT(0) /* 0: No interrupt pending, 1: The TX ring buffer is empty */ +#define METH_INT_TX_PKT BIT(1) /* 0: No interrupt pending */ + /* 1: A TX message had the INT request bit set, the packet has been sent. */ +#define METH_INT_TX_LINK_FAIL BIT(2) /* 0: No interrupt pending, 1: PHY has reported a link failure */ +#define METH_INT_MEM_ERROR BIT(3) /* 0: No interrupt pending */ + /* 1: A memory error occurred during DMA, DMA stopped, Fatal */ +#define METH_INT_TX_ABORT BIT(4) /* 0: No interrupt pending, 1: The TX aborted operation, DMA stopped, FATAL */ +#define METH_INT_RX_THRESHOLD BIT(5) /* 0: No interrupt pending, 1: Selected receive threshold condition Valid */ +#define METH_INT_RX_UNDERFLOW BIT(6) /* 0: No interrupt pending, 1: FIFO was empty, packet could not be queued */ +#define METH_INT_RX_OVERFLOW BIT(7) /* 0: No interrupt pending, 1: DMA FIFO Overflow, DMA stopped, FATAL */ + +/*#define METH_INT_RX_RPTR_MASK 0x0001F00*/ /* Bits 8 through 12 alias of RX read-pointer */ +#define METH_INT_RX_RPTR_MASK 0x0000F00 /* Bits 8 through 11 alias of RX read-pointer - so, is Rx FIFO 16 or 32 entry?*/ + + /* Bits 13 through 15 are always 0. */ + +#define METH_INT_TX_RPTR_MASK 0x1FF0000 /* Bits 16 through 24 alias of TX read-pointer */ + +#define METH_INT_RX_SEQ_MASK 0x2E000000 /* Bits 25 through 29 are the starting seq number for the message at the */ + + /* top of the queue */ + +#define METH_INT_ERROR (METH_INT_TX_LINK_FAIL| \ + METH_INT_MEM_ERROR| \ + METH_INT_TX_ABORT| \ + METH_INT_RX_OVERFLOW| \ + METH_INT_RX_UNDERFLOW) + +#define METH_INT_MCAST_HASH BIT(30) /* If RX DMA is enabled the hash select logic output is latched here */ + +/* TX status bits */ +#define METH_TX_ST_DONE BIT(63) /* TX complete */ +#define METH_TX_ST_SUCCESS BIT(23) /* Packet was transmitted successfully */ +#define METH_TX_ST_TOOLONG BIT(24) /* TX abort due to excessive length */ +#define METH_TX_ST_UNDERRUN BIT(25) /* TX abort due to underrun (?) */ +#define METH_TX_ST_EXCCOLL BIT(26) /* TX abort due to excess collisions */ +#define METH_TX_ST_DEFER BIT(27) /* TX abort due to excess deferals */ +#define METH_TX_ST_LATECOLL BIT(28) /* TX abort due to late collision */ + + +/* Tx command header bits */ +#define METH_TX_CMD_INT_EN BIT(24) /* Generate TX interrupt when packet is sent */ + +/* Phy MDIO interface busy flag */ +#define MDIO_BUSY BIT(16) +#define MDIO_DATA_MASK 0xFFFF +/* PHY defines */ +#define PHY_QS6612X 0x0181441 /* Quality TX */ +#define PHY_ICS1889 0x0015F41 /* ICS FX */ +#define PHY_ICS1890 0x0015F42 /* ICS TX */ +#define PHY_DP83840 0x20005C0 /* National TX */ + +#define ADVANCE_RX_PTR(x) x=(x+1)&(RX_RING_ENTRIES-1) |