diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/wan | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/wan')
30 files changed, 16671 insertions, 0 deletions
diff --git a/drivers/net/wan/.gitignore b/drivers/net/wan/.gitignore new file mode 100644 index 000000000..247bfbf10 --- /dev/null +++ b/drivers/net/wan/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +wanxlfw.inc diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig new file mode 100644 index 000000000..dcb069dde --- /dev/null +++ b/drivers/net/wan/Kconfig @@ -0,0 +1,248 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# wan devices configuration +# + +menuconfig WAN + bool "Wan interfaces support" + help + Wide Area Networks (WANs), such as X.25, Frame Relay and leased + lines, are used to interconnect Local Area Networks (LANs) over vast + distances with data transfer rates significantly higher than those + achievable with commonly used asynchronous modem connections. + + Usually, a quite expensive external device called a `WAN router' is + needed to connect to a WAN. As an alternative, a relatively + inexpensive WAN interface card can allow your Linux box to directly + connect to a WAN. + + If you have one of those cards and wish to use it under Linux, + say Y here and also to the WAN driver for your card. + + If unsure, say N. + +if WAN + +# Generic HDLC +config HDLC + tristate "Generic HDLC layer" + help + Say Y to this option if your Linux box contains a WAN (Wide Area + Network) card supported by this driver and you are planning to + connect the box to a WAN. + + You will need supporting software from + <http://www.kernel.org/pub/linux/utils/net/hdlc/>. + Generic HDLC driver currently supports raw HDLC, Cisco HDLC, Frame + Relay, synchronous Point-to-Point Protocol (PPP) and X.25. + + To compile this driver as a module, choose M here: the + module will be called hdlc. + + If unsure, say N. + +config HDLC_RAW + tristate "Raw HDLC support" + depends on HDLC + help + Generic HDLC driver supporting raw HDLC over WAN connections. + + If unsure, say N. + +config HDLC_RAW_ETH + tristate "Raw HDLC Ethernet device support" + depends on HDLC + help + Generic HDLC driver supporting raw HDLC Ethernet device emulation + over WAN connections. + + You will need it for Ethernet over HDLC bridges. + + If unsure, say N. + +config HDLC_CISCO + tristate "Cisco HDLC support" + depends on HDLC + help + Generic HDLC driver supporting Cisco HDLC over WAN connections. + + If unsure, say N. + +config HDLC_FR + tristate "Frame Relay support" + depends on HDLC + help + Generic HDLC driver supporting Frame Relay over WAN connections. + + If unsure, say N. + +config HDLC_PPP + tristate "Synchronous Point-to-Point Protocol (PPP) support" + depends on HDLC + help + Generic HDLC driver supporting PPP over WAN connections. + + If unsure, say N. + +config HDLC_X25 + tristate "X.25 protocol support" + depends on HDLC && (LAPB=m && HDLC=m || LAPB=y) + help + Generic HDLC driver supporting X.25 over WAN connections. + + If unsure, say N. + +comment "X.25/LAPB support is disabled" + depends on HDLC && (LAPB!=m || HDLC!=m) && LAPB!=y + +config PCI200SYN + tristate "Goramo PCI200SYN support" + depends on HDLC && PCI + help + Driver for PCI200SYN cards by Goramo sp. j. + + If you have such a card, say Y here and see + <http://www.kernel.org/pub/linux/utils/net/hdlc/>. + + To compile this as a module, choose M here: the + module will be called pci200syn. + + If unsure, say N. + +config WANXL + tristate "SBE Inc. wanXL support" + depends on HDLC && PCI + help + Driver for wanXL PCI cards by SBE Inc. + + If you have such a card, say Y here and see + <http://www.kernel.org/pub/linux/utils/net/hdlc/>. + + To compile this as a module, choose M here: the + module will be called wanxl. + + If unsure, say N. + +config WANXL_BUILD_FIRMWARE + bool "rebuild wanXL firmware" + depends on WANXL && !PREVENT_FIRMWARE_BUILD + help + Allows you to rebuild firmware run by the QUICC processor. + It requires m68k toolchains and hexdump programs. + + You should never need this option, say N. + +config PC300TOO + tristate "Cyclades PC300 RSV/X21 alternative support" + depends on HDLC && PCI + help + Alternative driver for PC300 RSV/X21 PCI cards made by + Cyclades, Inc. If you have such a card, say Y here and see + <http://www.kernel.org/pub/linux/utils/net/hdlc/>. + + To compile this as a module, choose M here: the module + will be called pc300too. + + If unsure, say N here. + +config N2 + tristate "SDL RISCom/N2 support" + depends on HDLC && ISA + help + Driver for RISCom/N2 single or dual channel ISA cards by + SDL Communications Inc. + + If you have such a card, say Y here and see + <http://www.kernel.org/pub/linux/utils/net/hdlc/>. + + Note that N2csu and N2dds cards are not supported by this driver. + + To compile this driver as a module, choose M here: the module + will be called n2. + + If unsure, say N. + +config C101 + tristate "Moxa C101 support" + depends on HDLC && ISA + help + Driver for C101 SuperSync ISA cards by Moxa Technologies Co., Ltd. + + If you have such a card, say Y here and see + <http://www.kernel.org/pub/linux/utils/net/hdlc/>. + + To compile this driver as a module, choose M here: the + module will be called c101. + + If unsure, say N. + +config FARSYNC + tristate "FarSync T-Series support" + depends on HDLC && PCI + help + Support for the FarSync T-Series X.21 (and V.35/V.24) cards by + FarSite Communications Ltd. + + Synchronous communication is supported on all ports at speeds up to + 8Mb/s (128K on V.24) using synchronous PPP, Cisco HDLC, raw HDLC, + Frame Relay or X.25/LAPB. + + If you want the module to be automatically loaded when the interface + is referenced then you should add "alias hdlcX farsync" to a file + in /etc/modprobe.d/ for each interface, where X is 0, 1, 2, ..., or + simply use "alias hdlc* farsync" to indicate all of them. + + To compile this driver as a module, choose M here: the + module will be called farsync. + +config FSL_UCC_HDLC + tristate "Freescale QUICC Engine HDLC support" + depends on HDLC + depends on QUICC_ENGINE + help + Driver for Freescale QUICC Engine HDLC controller. The driver + supports HDLC in NMSI and TDM mode. + + To compile this driver as a module, choose M here: the + module will be called fsl_ucc_hdlc. + +config SLIC_DS26522 + tristate "Slic Maxim ds26522 card support" + depends on SPI + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST + select BITREVERSE + help + This module initializes and configures the slic maxim card + in T1 or E1 mode. + + To compile this driver as a module, choose M here: the + module will be called slic_ds26522. + +config IXP4XX_HSS + tristate "Intel IXP4xx HSS (synchronous serial port) support" + depends on HDLC && IXP4XX_NPE && IXP4XX_QMGR + depends on ARCH_IXP4XX && OF + select MFD_SYSCON + help + Say Y here if you want to use built-in HSS ports + on IXP4xx processor. + +# X.25 network drivers +config LAPBETHER + tristate "LAPB over Ethernet driver" + depends on LAPB && X25 + help + Driver for a pseudo device (typically called /dev/lapb0) which allows + you to open an LAPB point-to-point connection to some other computer + on your Ethernet network. + + In order to do this, you need to say Y or M to the driver for your + Ethernet card as well as to "LAPB Data Link Driver". + + To compile this driver as a module, choose M here: the + module will be called lapbether. + + + If unsure, say N. + +endif # WAN diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile new file mode 100644 index 000000000..5bec8fae4 --- /dev/null +++ b/drivers/net/wan/Makefile @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux network (wan) device drivers. +# +# 3 Aug 2000, Christoph Hellwig <hch@infradead.org> +# Rewritten to use lists instead of if-statements. +# + +obj-$(CONFIG_HDLC) += hdlc.o +obj-$(CONFIG_HDLC_RAW) += hdlc_raw.o +obj-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o +obj-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o +obj-$(CONFIG_HDLC_FR) += hdlc_fr.o +obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o +obj-$(CONFIG_HDLC_X25) += hdlc_x25.o + +obj-$(CONFIG_FARSYNC) += farsync.o + +obj-$(CONFIG_LAPBETHER) += lapbether.o +obj-$(CONFIG_N2) += n2.o +obj-$(CONFIG_C101) += c101.o +obj-$(CONFIG_WANXL) += wanxl.o +obj-$(CONFIG_PCI200SYN) += pci200syn.o +obj-$(CONFIG_PC300TOO) += pc300too.o +obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o +obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o +obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o + +clean-files := wanxlfw.inc +$(obj)/wanxl.o: $(obj)/wanxlfw.inc + +CROSS_COMPILE_M68K = m68k-linux-gnu- + +ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y) +ifeq ($(ARCH),m68k) + M68KCC = $(CC) + M68KLD = $(LD) +else + M68KCC = $(CROSS_COMPILE_M68K)gcc + M68KLD = $(CROSS_COMPILE_M68K)ld +endif + +quiet_cmd_build_wanxlfw = BLDFW $@ + cmd_build_wanxlfw = hexdump -ve '"\n" 16/1 "0x%02X,"' $< | \ + sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' > $@ + +$(obj)/wanxlfw.inc: $(obj)/wanxlfw.bin FORCE + $(call if_changed,build_wanxlfw) + +quiet_cmd_m68kld_bin_o = M68KLD $@ + cmd_m68kld_bin_o = $(M68KLD) --oformat binary -Ttext 0x1000 $< -o $@ + +$(obj)/wanxlfw.bin: $(obj)/wanxlfw.o FORCE + $(call if_changed,m68kld_bin_o) + +quiet_cmd_m68kas_o_S = M68KAS $@ + cmd_m68kas_o_S = $(M68KCC) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi -c -o $@ $< + +$(obj)/wanxlfw.o: $(src)/wanxlfw.S FORCE + $(call if_changed_dep,m68kas_o_S) +endif +targets += wanxlfw.inc wanxlfw.bin wanxlfw.o diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c new file mode 100644 index 000000000..8dd14d916 --- /dev/null +++ b/drivers/net/wan/c101.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Moxa C101 synchronous serial card driver for Linux + * + * Copyright (C) 2000-2003 Krzysztof Halasa <khc@pm.waw.pl> + * + * For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/> + * + * Sources of information: + * Hitachi HD64570 SCA User's Manual + * Moxa C101 User's Manual + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/capability.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/hdlc.h> +#include <linux/delay.h> +#include <asm/io.h> + +#include "hd64570.h" + +static const char *version = "Moxa C101 driver version: 1.15"; +static const char *devname = "C101"; + +#undef DEBUG_PKT +#define DEBUG_RINGS + +#define C101_PAGE 0x1D00 +#define C101_DTR 0x1E00 +#define C101_SCA 0x1F00 +#define C101_WINDOW_SIZE 0x2000 +#define C101_MAPPED_RAM_SIZE 0x4000 + +#define RAM_SIZE (256 * 1024) +#define TX_RING_BUFFERS 10 +#define RX_RING_BUFFERS ((RAM_SIZE - C101_WINDOW_SIZE) / \ + (sizeof(pkt_desc) + HDLC_MAX_MRU) - TX_RING_BUFFERS) + +#define CLOCK_BASE 9830400 /* 9.8304 MHz */ +#define PAGE0_ALWAYS_MAPPED + +static char *hw; /* pointer to hw=xxx command line string */ + +typedef struct card_s { + struct net_device *dev; + spinlock_t lock; /* TX lock */ + u8 __iomem *win0base; /* ISA window base address */ + u32 phy_winbase; /* ISA physical base address */ + sync_serial_settings settings; + int rxpart; /* partial frame received, next frame invalid*/ + unsigned short encoding; + unsigned short parity; + u16 rx_ring_buffers; /* number of buffers in a ring */ + u16 tx_ring_buffers; + u16 buff_offset; /* offset of first buffer of first channel */ + u16 rxin; /* rx ring buffer 'in' pointer */ + u16 txin; /* tx ring buffer 'in' and 'last' pointers */ + u16 txlast; + u8 rxs, txs, tmc; /* SCA registers */ + u8 irq; /* IRQ (3-15) */ + u8 page; + + struct card_s *next_card; +} card_t; + +typedef card_t port_t; + +static card_t *first_card; +static card_t **new_card = &first_card; + +#define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg)) +#define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg)) +#define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg)) + +/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */ +#define sca_outw(value, reg, card) do { \ + writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \ + writeb((value >> 8) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\ +} while (0) + +#define port_to_card(port) (port) +#define log_node(port) (0) +#define phy_node(port) (0) +#define winsize(card) (C101_WINDOW_SIZE) +#define win0base(card) ((card)->win0base) +#define winbase(card) ((card)->win0base + 0x2000) +#define get_port(card, port) (card) +static void sca_msci_intr(port_t *port); + +static inline u8 sca_get_page(card_t *card) +{ + return card->page; +} + +static inline void openwin(card_t *card, u8 page) +{ + card->page = page; + writeb(page, card->win0base + C101_PAGE); +} + +#include "hd64570.c" + +static inline void set_carrier(port_t *port) +{ + if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD)) + netif_carrier_on(port_to_dev(port)); + else + netif_carrier_off(port_to_dev(port)); +} + +static void sca_msci_intr(port_t *port) +{ + u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */ + + /* Reset MSCI TX underrun and CDCD (ignored) status bit */ + sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port); + + if (stat & ST1_UDRN) { + /* TX Underrun error detected */ + port_to_dev(port)->stats.tx_errors++; + port_to_dev(port)->stats.tx_fifo_errors++; + } + + stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */ + /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ + sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); + + if (stat & ST1_CDCD) + set_carrier(port); +} + +static void c101_set_iface(port_t *port) +{ + u8 rxs = port->rxs & CLK_BRG_MASK; + u8 txs = port->txs & CLK_BRG_MASK; + + switch (port->settings.clock_type) { + case CLOCK_INT: + rxs |= CLK_BRG_RX; /* TX clock */ + txs |= CLK_RXCLK_TX; /* BRG output */ + break; + + case CLOCK_TXINT: + rxs |= CLK_LINE_RX; /* RXC input */ + txs |= CLK_BRG_TX; /* BRG output */ + break; + + case CLOCK_TXFROMRX: + rxs |= CLK_LINE_RX; /* RXC input */ + txs |= CLK_RXCLK_TX; /* RX clock */ + break; + + default: /* EXTernal clock */ + rxs |= CLK_LINE_RX; /* RXC input */ + txs |= CLK_LINE_TX; /* TXC input */ + } + + port->rxs = rxs; + port->txs = txs; + sca_out(rxs, MSCI1_OFFSET + RXS, port); + sca_out(txs, MSCI1_OFFSET + TXS, port); + sca_set_port(port); +} + +static int c101_open(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + int result; + + result = hdlc_open(dev); + if (result) + return result; + + writeb(1, port->win0base + C101_DTR); + sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */ + sca_open(dev); + /* DCD is connected to port 2 !@#$%^& - disable MSCI0 CDCD interrupt */ + sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port); + sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); + + set_carrier(port); + + /* enable MSCI1 CDCD interrupt */ + sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); + sca_out(IE0_RXINTA, MSCI1_OFFSET + IE0, port); + sca_out(0x48, IER0, port); /* TXINT #0 and RXINT #1 */ + c101_set_iface(port); + return 0; +} + +static int c101_close(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + + sca_close(dev); + writeb(0, port->win0base + C101_DTR); + sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port); + hdlc_close(dev); + return 0; +} + +static int c101_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) +{ +#ifdef DEBUG_RINGS + port_t *port = dev_to_port(dev); + + if (cmd == SIOCDEVPRIVATE) { + sca_dump_rings(dev); + printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n", + sca_in(MSCI1_OFFSET + ST0, port), + sca_in(MSCI1_OFFSET + ST1, port), + sca_in(MSCI1_OFFSET + ST2, port), + sca_in(MSCI1_OFFSET + ST3, port)); + return 0; + } +#endif + + return -EOPNOTSUPP; +} + +static int c101_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifs->ifs_ifsu.sync; + port_t *port = dev_to_port(dev); + + switch (ifs->type) { + case IF_GET_IFACE: + ifs->type = IF_IFACE_SYNC_SERIAL; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(line, &port->settings, size)) + return -EFAULT; + return 0; + + case IF_IFACE_SYNC_SERIAL: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&new_line, line, size)) + return -EFAULT; + + if (new_line.clock_type != CLOCK_EXT && + new_line.clock_type != CLOCK_TXFROMRX && + new_line.clock_type != CLOCK_INT && + new_line.clock_type != CLOCK_TXINT) + return -EINVAL; /* No such clock setting */ + + if (new_line.loopback != 0 && new_line.loopback != 1) + return -EINVAL; + + memcpy(&port->settings, &new_line, size); /* Update settings */ + c101_set_iface(port); + return 0; + + default: + return hdlc_ioctl(dev, ifs); + } +} + +static void c101_destroy_card(card_t *card) +{ + readb(card->win0base + C101_PAGE); /* Resets SCA? */ + + if (card->irq) + free_irq(card->irq, card); + + if (card->win0base) { + iounmap(card->win0base); + release_mem_region(card->phy_winbase, C101_MAPPED_RAM_SIZE); + } + + free_netdev(card->dev); + + kfree(card); +} + +static const struct net_device_ops c101_ops = { + .ndo_open = c101_open, + .ndo_stop = c101_close, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_siocwandev = c101_ioctl, + .ndo_siocdevprivate = c101_siocdevprivate, +}; + +static int __init c101_run(unsigned long irq, unsigned long winbase) +{ + struct net_device *dev; + hdlc_device *hdlc; + card_t *card; + int result; + + if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ { + pr_err("invalid IRQ value\n"); + return -ENODEV; + } + + if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) != 0) { + pr_err("invalid RAM value\n"); + return -ENODEV; + } + + card = kzalloc(sizeof(card_t), GFP_KERNEL); + if (!card) + return -ENOBUFS; + + card->dev = alloc_hdlcdev(card); + if (!card->dev) { + pr_err("unable to allocate memory\n"); + kfree(card); + return -ENOBUFS; + } + + if (request_irq(irq, sca_intr, 0, devname, card)) { + pr_err("could not allocate IRQ\n"); + c101_destroy_card(card); + return -EBUSY; + } + card->irq = irq; + + if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) { + pr_err("could not request RAM window\n"); + c101_destroy_card(card); + return -EBUSY; + } + card->phy_winbase = winbase; + card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE); + if (!card->win0base) { + pr_err("could not map I/O address\n"); + c101_destroy_card(card); + return -EFAULT; + } + + card->tx_ring_buffers = TX_RING_BUFFERS; + card->rx_ring_buffers = RX_RING_BUFFERS; + card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */ + + readb(card->win0base + C101_PAGE); /* Resets SCA? */ + udelay(100); + writeb(0, card->win0base + C101_PAGE); + writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */ + + sca_init(card, 0); + + dev = port_to_dev(card); + hdlc = dev_to_hdlc(dev); + + spin_lock_init(&card->lock); + dev->irq = irq; + dev->mem_start = winbase; + dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1; + dev->tx_queue_len = 50; + dev->netdev_ops = &c101_ops; + hdlc->attach = sca_attach; + hdlc->xmit = sca_xmit; + card->settings.clock_type = CLOCK_EXT; + + result = register_hdlc_device(dev); + if (result) { + pr_warn("unable to register hdlc device\n"); + c101_destroy_card(card); + return result; + } + + sca_init_port(card); /* Set up C101 memory */ + set_carrier(card); + + netdev_info(dev, "Moxa C101 on IRQ%u, using %u TX + %u RX packets rings\n", + card->irq, card->tx_ring_buffers, card->rx_ring_buffers); + + *new_card = card; + new_card = &card->next_card; + return 0; +} + +static int __init c101_init(void) +{ + if (!hw) { +#ifdef MODULE + pr_info("no card initialized\n"); +#endif + return -EINVAL; /* no parameters specified, abort */ + } + + pr_info("%s\n", version); + + do { + unsigned long irq, ram; + + irq = simple_strtoul(hw, &hw, 0); + + if (*hw++ != ',') + break; + ram = simple_strtoul(hw, &hw, 0); + + if (*hw == ':' || *hw == '\x0') + c101_run(irq, ram); + + if (*hw == '\x0') + return first_card ? 0 : -EINVAL; + } while (*hw++ == ':'); + + pr_err("invalid hardware parameters\n"); + return first_card ? 0 : -EINVAL; +} + +static void __exit c101_cleanup(void) +{ + card_t *card = first_card; + + while (card) { + card_t *ptr = card; + + card = card->next_card; + unregister_hdlc_device(port_to_dev(ptr)); + c101_destroy_card(ptr); + } +} + +module_init(c101_init); +module_exit(c101_cleanup); + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("Moxa C101 serial port driver"); +MODULE_LICENSE("GPL v2"); +module_param(hw, charp, 0444); +MODULE_PARM_DESC(hw, "irq,ram:irq,..."); diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c new file mode 100644 index 000000000..5b01642ca --- /dev/null +++ b/drivers/net/wan/farsync.c @@ -0,0 +1,2596 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* FarSync WAN driver for Linux (2.6.x kernel version) + * + * Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards + * + * Copyright (C) 2001-2004 FarSite Communications Ltd. + * www.farsite.co.uk + * + * Author: R.J.Dunlop <bob.dunlop@farsite.co.uk> + * Maintainer: Kevin Curtis <kevin.curtis@farsite.co.uk> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/version.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/ioport.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/if.h> +#include <linux/hdlc.h> +#include <asm/io.h> +#include <linux/uaccess.h> + +#include "farsync.h" + +/* Module info + */ +MODULE_AUTHOR("R.J.Dunlop <bob.dunlop@farsite.co.uk>"); +MODULE_DESCRIPTION("FarSync T-Series WAN driver. FarSite Communications Ltd."); +MODULE_LICENSE("GPL"); + +/* Driver configuration and global parameters + * ========================================== + */ + +/* Number of ports (per card) and cards supported + */ +#define FST_MAX_PORTS 4 +#define FST_MAX_CARDS 32 + +/* Default parameters for the link + */ +#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is + * useful + */ +#define FST_TXQ_DEPTH 16 /* This one is for the buffering + * of frames on the way down to the card + * so that we can keep the card busy + * and maximise throughput + */ +#define FST_HIGH_WATER_MARK 12 /* Point at which we flow control + * network layer + */ +#define FST_LOW_WATER_MARK 8 /* Point at which we remove flow + * control from network layer + */ +#define FST_MAX_MTU 8000 /* Huge but possible */ +#define FST_DEF_MTU 1500 /* Common sane value */ + +#define FST_TX_TIMEOUT (2 * HZ) + +#ifdef ARPHRD_RAWHDLC +#define ARPHRD_MYTYPE ARPHRD_RAWHDLC /* Raw frames */ +#else +#define ARPHRD_MYTYPE ARPHRD_HDLC /* Cisco-HDLC (keepalives etc) */ +#endif + +/* Modules parameters and associated variables + */ +static int fst_txq_low = FST_LOW_WATER_MARK; +static int fst_txq_high = FST_HIGH_WATER_MARK; +static int fst_max_reads = 7; +static int fst_excluded_cards; +static int fst_excluded_list[FST_MAX_CARDS]; + +module_param(fst_txq_low, int, 0); +module_param(fst_txq_high, int, 0); +module_param(fst_max_reads, int, 0); +module_param(fst_excluded_cards, int, 0); +module_param_array(fst_excluded_list, int, NULL, 0); + +/* Card shared memory layout + * ========================= + */ +#pragma pack(1) + +/* This information is derived in part from the FarSite FarSync Smc.h + * file. Unfortunately various name clashes and the non-portability of the + * bit field declarations in that file have meant that I have chosen to + * recreate the information here. + * + * The SMC (Shared Memory Configuration) has a version number that is + * incremented every time there is a significant change. This number can + * be used to check that we have not got out of step with the firmware + * contained in the .CDE files. + */ +#define SMC_VERSION 24 + +#define FST_MEMSIZE 0x100000 /* Size of card memory (1Mb) */ + +#define SMC_BASE 0x00002000L /* Base offset of the shared memory window main + * configuration structure + */ +#define BFM_BASE 0x00010000L /* Base offset of the shared memory window DMA + * buffers + */ + +#define LEN_TX_BUFFER 8192 /* Size of packet buffers */ +#define LEN_RX_BUFFER 8192 + +#define LEN_SMALL_TX_BUFFER 256 /* Size of obsolete buffs used for DOS diags */ +#define LEN_SMALL_RX_BUFFER 256 + +#define NUM_TX_BUFFER 2 /* Must be power of 2. Fixed by firmware */ +#define NUM_RX_BUFFER 8 + +/* Interrupt retry time in milliseconds */ +#define INT_RETRY_TIME 2 + +/* The Am186CH/CC processors support a SmartDMA mode using circular pools + * of buffer descriptors. The structure is almost identical to that used + * in the LANCE Ethernet controllers. Details available as PDF from the + * AMD web site: https://www.amd.com/products/epd/processors/\ + * 2.16bitcont/3.am186cxfa/a21914/21914.pdf + */ +struct txdesc { /* Transmit descriptor */ + volatile u16 ladr; /* Low order address of packet. This is a + * linear address in the Am186 memory space + */ + volatile u8 hadr; /* High order address. Low 4 bits only, high 4 + * bits must be zero + */ + volatile u8 bits; /* Status and config */ + volatile u16 bcnt; /* 2s complement of packet size in low 15 bits. + * Transmit terminal count interrupt enable in + * top bit. + */ + u16 unused; /* Not used in Tx */ +}; + +struct rxdesc { /* Receive descriptor */ + volatile u16 ladr; /* Low order address of packet */ + volatile u8 hadr; /* High order address */ + volatile u8 bits; /* Status and config */ + volatile u16 bcnt; /* 2s complement of buffer size in low 15 bits. + * Receive terminal count interrupt enable in + * top bit. + */ + volatile u16 mcnt; /* Message byte count (15 bits) */ +}; + +/* Convert a length into the 15 bit 2's complement */ +/* #define cnv_bcnt(len) (( ~(len) + 1 ) & 0x7FFF ) */ +/* Since we need to set the high bit to enable the completion interrupt this + * can be made a lot simpler + */ +#define cnv_bcnt(len) (-(len)) + +/* Status and config bits for the above */ +#define DMA_OWN 0x80 /* SmartDMA owns the descriptor */ +#define TX_STP 0x02 /* Tx: start of packet */ +#define TX_ENP 0x01 /* Tx: end of packet */ +#define RX_ERR 0x40 /* Rx: error (OR of next 4 bits) */ +#define RX_FRAM 0x20 /* Rx: framing error */ +#define RX_OFLO 0x10 /* Rx: overflow error */ +#define RX_CRC 0x08 /* Rx: CRC error */ +#define RX_HBUF 0x04 /* Rx: buffer error */ +#define RX_STP 0x02 /* Rx: start of packet */ +#define RX_ENP 0x01 /* Rx: end of packet */ + +/* Interrupts from the card are caused by various events which are presented + * in a circular buffer as several events may be processed on one physical int + */ +#define MAX_CIRBUFF 32 + +struct cirbuff { + u8 rdindex; /* read, then increment and wrap */ + u8 wrindex; /* write, then increment and wrap */ + u8 evntbuff[MAX_CIRBUFF]; +}; + +/* Interrupt event codes. + * Where appropriate the two low order bits indicate the port number + */ +#define CTLA_CHG 0x18 /* Control signal changed */ +#define CTLB_CHG 0x19 +#define CTLC_CHG 0x1A +#define CTLD_CHG 0x1B + +#define INIT_CPLT 0x20 /* Initialisation complete */ +#define INIT_FAIL 0x21 /* Initialisation failed */ + +#define ABTA_SENT 0x24 /* Abort sent */ +#define ABTB_SENT 0x25 +#define ABTC_SENT 0x26 +#define ABTD_SENT 0x27 + +#define TXA_UNDF 0x28 /* Transmission underflow */ +#define TXB_UNDF 0x29 +#define TXC_UNDF 0x2A +#define TXD_UNDF 0x2B + +#define F56_INT 0x2C +#define M32_INT 0x2D + +#define TE1_ALMA 0x30 + +/* Port physical configuration. See farsync.h for field values */ +struct port_cfg { + u16 lineInterface; /* Physical interface type */ + u8 x25op; /* Unused at present */ + u8 internalClock; /* 1 => internal clock, 0 => external */ + u8 transparentMode; /* 1 => on, 0 => off */ + u8 invertClock; /* 0 => normal, 1 => inverted */ + u8 padBytes[6]; /* Padding */ + u32 lineSpeed; /* Speed in bps */ +}; + +/* TE1 port physical configuration */ +struct su_config { + u32 dataRate; + u8 clocking; + u8 framing; + u8 structure; + u8 interface; + u8 coding; + u8 lineBuildOut; + u8 equalizer; + u8 transparentMode; + u8 loopMode; + u8 range; + u8 txBufferMode; + u8 rxBufferMode; + u8 startingSlot; + u8 losThreshold; + u8 enableIdleCode; + u8 idleCode; + u8 spare[44]; +}; + +/* TE1 Status */ +struct su_status { + u32 receiveBufferDelay; + u32 framingErrorCount; + u32 codeViolationCount; + u32 crcErrorCount; + u32 lineAttenuation; + u8 portStarted; + u8 lossOfSignal; + u8 receiveRemoteAlarm; + u8 alarmIndicationSignal; + u8 spare[40]; +}; + +/* Finally sling all the above together into the shared memory structure. + * Sorry it's a hodge podge of arrays, structures and unused bits, it's been + * evolving under NT for some time so I guess we're stuck with it. + * The structure starts at offset SMC_BASE. + * See farsync.h for some field values. + */ +struct fst_shared { + /* DMA descriptor rings */ + struct rxdesc rxDescrRing[FST_MAX_PORTS][NUM_RX_BUFFER]; + struct txdesc txDescrRing[FST_MAX_PORTS][NUM_TX_BUFFER]; + + /* Obsolete small buffers */ + u8 smallRxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_SMALL_RX_BUFFER]; + u8 smallTxBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_SMALL_TX_BUFFER]; + + u8 taskStatus; /* 0x00 => initialising, 0x01 => running, + * 0xFF => halted + */ + + u8 interruptHandshake; /* Set to 0x01 by adapter to signal interrupt, + * set to 0xEE by host to acknowledge interrupt + */ + + u16 smcVersion; /* Must match SMC_VERSION */ + + u32 smcFirmwareVersion; /* 0xIIVVRRBB where II = product ID, VV = major + * version, RR = revision and BB = build + */ + + u16 txa_done; /* Obsolete completion flags */ + u16 rxa_done; + u16 txb_done; + u16 rxb_done; + u16 txc_done; + u16 rxc_done; + u16 txd_done; + u16 rxd_done; + + u16 mailbox[4]; /* Diagnostics mailbox. Not used */ + + struct cirbuff interruptEvent; /* interrupt causes */ + + u32 v24IpSts[FST_MAX_PORTS]; /* V.24 control input status */ + u32 v24OpSts[FST_MAX_PORTS]; /* V.24 control output status */ + + struct port_cfg portConfig[FST_MAX_PORTS]; + + u16 clockStatus[FST_MAX_PORTS]; /* lsb: 0=> present, 1=> absent */ + + u16 cableStatus; /* lsb: 0=> present, 1=> absent */ + + u16 txDescrIndex[FST_MAX_PORTS]; /* transmit descriptor ring index */ + u16 rxDescrIndex[FST_MAX_PORTS]; /* receive descriptor ring index */ + + u16 portMailbox[FST_MAX_PORTS][2]; /* command, modifier */ + u16 cardMailbox[4]; /* Not used */ + + /* Number of times the card thinks the host has + * missed an interrupt by not acknowledging + * within 2mS (I guess NT has problems) + */ + u32 interruptRetryCount; + + /* Driver private data used as an ID. We'll not + * use this as I'd rather keep such things + * in main memory rather than on the PCI bus + */ + u32 portHandle[FST_MAX_PORTS]; + + /* Count of Tx underflows for stats */ + u32 transmitBufferUnderflow[FST_MAX_PORTS]; + + /* Debounced V.24 control input status */ + u32 v24DebouncedSts[FST_MAX_PORTS]; + + /* Adapter debounce timers. Don't touch */ + u32 ctsTimer[FST_MAX_PORTS]; + u32 ctsTimerRun[FST_MAX_PORTS]; + u32 dcdTimer[FST_MAX_PORTS]; + u32 dcdTimerRun[FST_MAX_PORTS]; + + u32 numberOfPorts; /* Number of ports detected at startup */ + + u16 _reserved[64]; + + u16 cardMode; /* Bit-mask to enable features: + * Bit 0: 1 enables LED identify mode + */ + + u16 portScheduleOffset; + + struct su_config suConfig; /* TE1 Bits */ + struct su_status suStatus; + + u32 endOfSmcSignature; /* endOfSmcSignature MUST be the last member of + * the structure and marks the end of shared + * memory. Adapter code initializes it as + * END_SIG. + */ +}; + +/* endOfSmcSignature value */ +#define END_SIG 0x12345678 + +/* Mailbox values. (portMailbox) */ +#define NOP 0 /* No operation */ +#define ACK 1 /* Positive acknowledgement to PC driver */ +#define NAK 2 /* Negative acknowledgement to PC driver */ +#define STARTPORT 3 /* Start an HDLC port */ +#define STOPPORT 4 /* Stop an HDLC port */ +#define ABORTTX 5 /* Abort the transmitter for a port */ +#define SETV24O 6 /* Set V24 outputs */ + +/* PLX Chip Register Offsets */ +#define CNTRL_9052 0x50 /* Control Register */ +#define CNTRL_9054 0x6c /* Control Register */ + +#define INTCSR_9052 0x4c /* Interrupt control/status register */ +#define INTCSR_9054 0x68 /* Interrupt control/status register */ + +/* 9054 DMA Registers */ +/* Note that we will be using DMA Channel 0 for copying rx data + * and Channel 1 for copying tx data + */ +#define DMAMODE0 0x80 +#define DMAPADR0 0x84 +#define DMALADR0 0x88 +#define DMASIZ0 0x8c +#define DMADPR0 0x90 +#define DMAMODE1 0x94 +#define DMAPADR1 0x98 +#define DMALADR1 0x9c +#define DMASIZ1 0xa0 +#define DMADPR1 0xa4 +#define DMACSR0 0xa8 +#define DMACSR1 0xa9 +#define DMAARB 0xac +#define DMATHR 0xb0 +#define DMADAC0 0xb4 +#define DMADAC1 0xb8 +#define DMAMARBR 0xac + +#define FST_MIN_DMA_LEN 64 +#define FST_RX_DMA_INT 0x01 +#define FST_TX_DMA_INT 0x02 +#define FST_CARD_INT 0x04 + +/* Larger buffers are positioned in memory at offset BFM_BASE */ +struct buf_window { + u8 txBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_TX_BUFFER]; + u8 rxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_RX_BUFFER]; +}; + +/* Calculate offset of a buffer object within the shared memory window */ +#define BUF_OFFSET(X) (BFM_BASE + offsetof(struct buf_window, X)) + +#pragma pack() + +/* Device driver private information + * ================================= + */ +/* Per port (line or channel) information + */ +struct fst_port_info { + struct net_device *dev; /* Device struct - must be first */ + struct fst_card_info *card; /* Card we're associated with */ + int index; /* Port index on the card */ + int hwif; /* Line hardware (lineInterface copy) */ + int run; /* Port is running */ + int mode; /* Normal or FarSync raw */ + int rxpos; /* Next Rx buffer to use */ + int txpos; /* Next Tx buffer to use */ + int txipos; /* Next Tx buffer to check for free */ + int start; /* Indication of start/stop to network */ + /* A sixteen entry transmit queue + */ + int txqs; /* index to get next buffer to tx */ + int txqe; /* index to queue next packet */ + struct sk_buff *txq[FST_TXQ_DEPTH]; /* The queue */ + int rxqdepth; +}; + +/* Per card information + */ +struct fst_card_info { + char __iomem *mem; /* Card memory mapped to kernel space */ + char __iomem *ctlmem; /* Control memory for PCI cards */ + unsigned int phys_mem; /* Physical memory window address */ + unsigned int phys_ctlmem; /* Physical control memory address */ + unsigned int irq; /* Interrupt request line number */ + unsigned int nports; /* Number of serial ports */ + unsigned int type; /* Type index of card */ + unsigned int state; /* State of card */ + spinlock_t card_lock; /* Lock for SMP access */ + unsigned short pci_conf; /* PCI card config in I/O space */ + /* Per port info */ + struct fst_port_info ports[FST_MAX_PORTS]; + struct pci_dev *device; /* Information about the pci device */ + int card_no; /* Inst of the card on the system */ + int family; /* TxP or TxU */ + int dmarx_in_progress; + int dmatx_in_progress; + unsigned long int_count; + unsigned long int_time_ave; + void *rx_dma_handle_host; + dma_addr_t rx_dma_handle_card; + void *tx_dma_handle_host; + dma_addr_t tx_dma_handle_card; + struct sk_buff *dma_skb_rx; + struct fst_port_info *dma_port_rx; + struct fst_port_info *dma_port_tx; + int dma_len_rx; + int dma_len_tx; + int dma_txpos; + int dma_rxpos; +}; + +/* Convert an HDLC device pointer into a port info pointer and similar */ +#define dev_to_port(D) (dev_to_hdlc(D)->priv) +#define port_to_dev(P) ((P)->dev) + +/* Shared memory window access macros + * + * We have a nice memory based structure above, which could be directly + * mapped on i386 but might not work on other architectures unless we use + * the readb,w,l and writeb,w,l macros. Unfortunately these macros take + * physical offsets so we have to convert. The only saving grace is that + * this should all collapse back to a simple indirection eventually. + */ +#define WIN_OFFSET(X) ((long)&(((struct fst_shared *)SMC_BASE)->X)) + +#define FST_RDB(C, E) (readb((C)->mem + WIN_OFFSET(E))) +#define FST_RDW(C, E) (readw((C)->mem + WIN_OFFSET(E))) +#define FST_RDL(C, E) (readl((C)->mem + WIN_OFFSET(E))) + +#define FST_WRB(C, E, B) (writeb((B), (C)->mem + WIN_OFFSET(E))) +#define FST_WRW(C, E, W) (writew((W), (C)->mem + WIN_OFFSET(E))) +#define FST_WRL(C, E, L) (writel((L), (C)->mem + WIN_OFFSET(E))) + +/* Debug support + */ +#if FST_DEBUG + +static int fst_debug_mask = { FST_DEBUG }; + +/* Most common debug activity is to print something if the corresponding bit + * is set in the debug mask. Note: this uses a non-ANSI extension in GCC to + * support variable numbers of macro parameters. The inverted if prevents us + * eating someone else's else clause. + */ +#define dbg(F, fmt, args...) \ +do { \ + if (fst_debug_mask & (F)) \ + printk(KERN_DEBUG pr_fmt(fmt), ##args); \ +} while (0) +#else +#define dbg(F, fmt, args...) \ +do { \ + if (0) \ + printk(KERN_DEBUG pr_fmt(fmt), ##args); \ +} while (0) +#endif + +/* PCI ID lookup table + */ +static const struct pci_device_id fst_pci_dev_id[] = { + {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, FST_TYPE_T2P}, + + {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4P, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, FST_TYPE_T4P}, + + {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T1U, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, FST_TYPE_T1U}, + + {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2U, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, FST_TYPE_T2U}, + + {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4U, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, FST_TYPE_T4U}, + + {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, FST_TYPE_TE1}, + + {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1C, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, FST_TYPE_TE1}, + {0,} /* End */ +}; + +MODULE_DEVICE_TABLE(pci, fst_pci_dev_id); + +/* Device Driver Work Queues + * + * So that we don't spend too much time processing events in the + * Interrupt Service routine, we will declare a work queue per Card + * and make the ISR schedule a task in the queue for later execution. + * In the 2.4 Kernel we used to use the immediate queue for BH's + * Now that they are gone, tasklets seem to be much better than work + * queues. + */ + +static void do_bottom_half_tx(struct fst_card_info *card); +static void do_bottom_half_rx(struct fst_card_info *card); +static void fst_process_tx_work_q(struct tasklet_struct *unused); +static void fst_process_int_work_q(struct tasklet_struct *unused); + +static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q); +static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q); + +static struct fst_card_info *fst_card_array[FST_MAX_CARDS]; +static DEFINE_SPINLOCK(fst_work_q_lock); +static u64 fst_work_txq; +static u64 fst_work_intq; + +static void +fst_q_work_item(u64 *queue, int card_index) +{ + unsigned long flags; + u64 mask; + + /* Grab the queue exclusively + */ + spin_lock_irqsave(&fst_work_q_lock, flags); + + /* Making an entry in the queue is simply a matter of setting + * a bit for the card indicating that there is work to do in the + * bottom half for the card. Note the limitation of 64 cards. + * That ought to be enough + */ + mask = (u64)1 << card_index; + *queue |= mask; + spin_unlock_irqrestore(&fst_work_q_lock, flags); +} + +static void +fst_process_tx_work_q(struct tasklet_struct *unused) +{ + unsigned long flags; + u64 work_txq; + int i; + + /* Grab the queue exclusively + */ + dbg(DBG_TX, "fst_process_tx_work_q\n"); + spin_lock_irqsave(&fst_work_q_lock, flags); + work_txq = fst_work_txq; + fst_work_txq = 0; + spin_unlock_irqrestore(&fst_work_q_lock, flags); + + /* Call the bottom half for each card with work waiting + */ + for (i = 0; i < FST_MAX_CARDS; i++) { + if (work_txq & 0x01) { + if (fst_card_array[i]) { + dbg(DBG_TX, "Calling tx bh for card %d\n", i); + do_bottom_half_tx(fst_card_array[i]); + } + } + work_txq = work_txq >> 1; + } +} + +static void +fst_process_int_work_q(struct tasklet_struct *unused) +{ + unsigned long flags; + u64 work_intq; + int i; + + /* Grab the queue exclusively + */ + dbg(DBG_INTR, "fst_process_int_work_q\n"); + spin_lock_irqsave(&fst_work_q_lock, flags); + work_intq = fst_work_intq; + fst_work_intq = 0; + spin_unlock_irqrestore(&fst_work_q_lock, flags); + + /* Call the bottom half for each card with work waiting + */ + for (i = 0; i < FST_MAX_CARDS; i++) { + if (work_intq & 0x01) { + if (fst_card_array[i]) { + dbg(DBG_INTR, + "Calling rx & tx bh for card %d\n", i); + do_bottom_half_rx(fst_card_array[i]); + do_bottom_half_tx(fst_card_array[i]); + } + } + work_intq = work_intq >> 1; + } +} + +/* Card control functions + * ====================== + */ +/* Place the processor in reset state + * + * Used to be a simple write to card control space but a glitch in the latest + * AMD Am186CH processor means that we now have to do it by asserting and de- + * asserting the PLX chip PCI Adapter Software Reset. Bit 30 in CNTRL register + * at offset 9052_CNTRL. Note the updates for the TXU. + */ +static inline void +fst_cpureset(struct fst_card_info *card) +{ + unsigned char interrupt_line_register; + unsigned int regval; + + if (card->family == FST_FAMILY_TXU) { + if (pci_read_config_byte + (card->device, PCI_INTERRUPT_LINE, &interrupt_line_register)) { + dbg(DBG_ASS, + "Error in reading interrupt line register\n"); + } + /* Assert PLX software reset and Am186 hardware reset + * and then deassert the PLX software reset but 186 still in reset + */ + outw(0x440f, card->pci_conf + CNTRL_9054 + 2); + outw(0x040f, card->pci_conf + CNTRL_9054 + 2); + /* We are delaying here to allow the 9054 to reset itself + */ + usleep_range(10, 20); + outw(0x240f, card->pci_conf + CNTRL_9054 + 2); + /* We are delaying here to allow the 9054 to reload its eeprom + */ + usleep_range(10, 20); + outw(0x040f, card->pci_conf + CNTRL_9054 + 2); + + if (pci_write_config_byte + (card->device, PCI_INTERRUPT_LINE, interrupt_line_register)) { + dbg(DBG_ASS, + "Error in writing interrupt line register\n"); + } + + } else { + regval = inl(card->pci_conf + CNTRL_9052); + + outl(regval | 0x40000000, card->pci_conf + CNTRL_9052); + outl(regval & ~0x40000000, card->pci_conf + CNTRL_9052); + } +} + +/* Release the processor from reset + */ +static inline void +fst_cpurelease(struct fst_card_info *card) +{ + if (card->family == FST_FAMILY_TXU) { + /* Force posted writes to complete + */ + (void)readb(card->mem); + + /* Release LRESET DO = 1 + * Then release Local Hold, DO = 1 + */ + outw(0x040e, card->pci_conf + CNTRL_9054 + 2); + outw(0x040f, card->pci_conf + CNTRL_9054 + 2); + } else { + (void)readb(card->ctlmem); + } +} + +/* Clear the cards interrupt flag + */ +static inline void +fst_clear_intr(struct fst_card_info *card) +{ + if (card->family == FST_FAMILY_TXU) { + (void)readb(card->ctlmem); + } else { + /* Poke the appropriate PLX chip register (same as enabling interrupts) + */ + outw(0x0543, card->pci_conf + INTCSR_9052); + } +} + +/* Enable card interrupts + */ +static inline void +fst_enable_intr(struct fst_card_info *card) +{ + if (card->family == FST_FAMILY_TXU) + outl(0x0f0c0900, card->pci_conf + INTCSR_9054); + else + outw(0x0543, card->pci_conf + INTCSR_9052); +} + +/* Disable card interrupts + */ +static inline void +fst_disable_intr(struct fst_card_info *card) +{ + if (card->family == FST_FAMILY_TXU) + outl(0x00000000, card->pci_conf + INTCSR_9054); + else + outw(0x0000, card->pci_conf + INTCSR_9052); +} + +/* Process the result of trying to pass a received frame up the stack + */ +static void +fst_process_rx_status(int rx_status, char *name) +{ + switch (rx_status) { + case NET_RX_SUCCESS: + { + /* Nothing to do here + */ + break; + } + case NET_RX_DROP: + { + dbg(DBG_ASS, "%s: Received packet dropped\n", name); + break; + } + } +} + +/* Initilaise DMA for PLX 9054 + */ +static inline void +fst_init_dma(struct fst_card_info *card) +{ + /* This is only required for the PLX 9054 + */ + if (card->family == FST_FAMILY_TXU) { + pci_set_master(card->device); + outl(0x00020441, card->pci_conf + DMAMODE0); + outl(0x00020441, card->pci_conf + DMAMODE1); + outl(0x0, card->pci_conf + DMATHR); + } +} + +/* Tx dma complete interrupt + */ +static void +fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, + int len, int txpos) +{ + struct net_device *dev = port_to_dev(port); + + /* Everything is now set, just tell the card to go + */ + dbg(DBG_TX, "fst_tx_dma_complete\n"); + FST_WRB(card, txDescrRing[port->index][txpos].bits, + DMA_OWN | TX_STP | TX_ENP); + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; + netif_trans_update(dev); +} + +/* Mark it for our own raw sockets interface + */ +static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev) +{ + skb->dev = dev; + skb_reset_mac_header(skb); + skb->pkt_type = PACKET_HOST; + return htons(ETH_P_CUST); +} + +/* Rx dma complete interrupt + */ +static void +fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, + int len, struct sk_buff *skb, int rxp) +{ + struct net_device *dev = port_to_dev(port); + int pi; + int rx_status; + + dbg(DBG_TX, "fst_rx_dma_complete\n"); + pi = port->index; + skb_put_data(skb, card->rx_dma_handle_host, len); + + /* Reset buffer descriptor */ + FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); + + /* Update stats */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + + /* Push upstream */ + dbg(DBG_RX, "Pushing the frame up the stack\n"); + if (port->mode == FST_RAW) + skb->protocol = farsync_type_trans(skb, dev); + else + skb->protocol = hdlc_type_trans(skb, dev); + rx_status = netif_rx(skb); + fst_process_rx_status(rx_status, port_to_dev(port)->name); + if (rx_status == NET_RX_DROP) + dev->stats.rx_dropped++; +} + +/* Receive a frame through the DMA + */ +static inline void +fst_rx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len) +{ + /* This routine will setup the DMA and start it + */ + + dbg(DBG_RX, "In fst_rx_dma %x %x %d\n", (u32)dma, mem, len); + if (card->dmarx_in_progress) + dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n"); + + outl(dma, card->pci_conf + DMAPADR0); /* Copy to here */ + outl(mem, card->pci_conf + DMALADR0); /* from here */ + outl(len, card->pci_conf + DMASIZ0); /* for this length */ + outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */ + + /* We use the dmarx_in_progress flag to flag the channel as busy + */ + card->dmarx_in_progress = 1; + outb(0x03, card->pci_conf + DMACSR0); /* Start the transfer */ +} + +/* Send a frame through the DMA + */ +static inline void +fst_tx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len) +{ + /* This routine will setup the DMA and start it. + */ + + dbg(DBG_TX, "In fst_tx_dma %x %x %d\n", (u32)dma, mem, len); + if (card->dmatx_in_progress) + dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n"); + + outl(dma, card->pci_conf + DMAPADR1); /* Copy from here */ + outl(mem, card->pci_conf + DMALADR1); /* to here */ + outl(len, card->pci_conf + DMASIZ1); /* for this length */ + outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */ + + /* We use the dmatx_in_progress to flag the channel as busy + */ + card->dmatx_in_progress = 1; + outb(0x03, card->pci_conf + DMACSR1); /* Start the transfer */ +} + +/* Issue a Mailbox command for a port. + * Note we issue them on a fire and forget basis, not expecting to see an + * error and not waiting for completion. + */ +static void +fst_issue_cmd(struct fst_port_info *port, unsigned short cmd) +{ + struct fst_card_info *card; + unsigned short mbval; + unsigned long flags; + int safety; + + card = port->card; + spin_lock_irqsave(&card->card_lock, flags); + mbval = FST_RDW(card, portMailbox[port->index][0]); + + safety = 0; + /* Wait for any previous command to complete */ + while (mbval > NAK) { + spin_unlock_irqrestore(&card->card_lock, flags); + schedule_timeout_uninterruptible(1); + spin_lock_irqsave(&card->card_lock, flags); + + if (++safety > 2000) { + pr_err("Mailbox safety timeout\n"); + break; + } + + mbval = FST_RDW(card, portMailbox[port->index][0]); + } + if (safety > 0) + dbg(DBG_CMD, "Mailbox clear after %d jiffies\n", safety); + + if (mbval == NAK) + dbg(DBG_CMD, "issue_cmd: previous command was NAK'd\n"); + + FST_WRW(card, portMailbox[port->index][0], cmd); + + if (cmd == ABORTTX || cmd == STARTPORT) { + port->txpos = 0; + port->txipos = 0; + port->start = 0; + } + + spin_unlock_irqrestore(&card->card_lock, flags); +} + +/* Port output signals control + */ +static inline void +fst_op_raise(struct fst_port_info *port, unsigned int outputs) +{ + outputs |= FST_RDL(port->card, v24OpSts[port->index]); + FST_WRL(port->card, v24OpSts[port->index], outputs); + + if (port->run) + fst_issue_cmd(port, SETV24O); +} + +static inline void +fst_op_lower(struct fst_port_info *port, unsigned int outputs) +{ + outputs = ~outputs & FST_RDL(port->card, v24OpSts[port->index]); + FST_WRL(port->card, v24OpSts[port->index], outputs); + + if (port->run) + fst_issue_cmd(port, SETV24O); +} + +/* Setup port Rx buffers + */ +static void +fst_rx_config(struct fst_port_info *port) +{ + int i; + int pi; + unsigned int offset; + unsigned long flags; + struct fst_card_info *card; + + pi = port->index; + card = port->card; + spin_lock_irqsave(&card->card_lock, flags); + for (i = 0; i < NUM_RX_BUFFER; i++) { + offset = BUF_OFFSET(rxBuffer[pi][i][0]); + + FST_WRW(card, rxDescrRing[pi][i].ladr, (u16)offset); + FST_WRB(card, rxDescrRing[pi][i].hadr, (u8)(offset >> 16)); + FST_WRW(card, rxDescrRing[pi][i].bcnt, cnv_bcnt(LEN_RX_BUFFER)); + FST_WRW(card, rxDescrRing[pi][i].mcnt, LEN_RX_BUFFER); + FST_WRB(card, rxDescrRing[pi][i].bits, DMA_OWN); + } + port->rxpos = 0; + spin_unlock_irqrestore(&card->card_lock, flags); +} + +/* Setup port Tx buffers + */ +static void +fst_tx_config(struct fst_port_info *port) +{ + int i; + int pi; + unsigned int offset; + unsigned long flags; + struct fst_card_info *card; + + pi = port->index; + card = port->card; + spin_lock_irqsave(&card->card_lock, flags); + for (i = 0; i < NUM_TX_BUFFER; i++) { + offset = BUF_OFFSET(txBuffer[pi][i][0]); + + FST_WRW(card, txDescrRing[pi][i].ladr, (u16)offset); + FST_WRB(card, txDescrRing[pi][i].hadr, (u8)(offset >> 16)); + FST_WRW(card, txDescrRing[pi][i].bcnt, 0); + FST_WRB(card, txDescrRing[pi][i].bits, 0); + } + port->txpos = 0; + port->txipos = 0; + port->start = 0; + spin_unlock_irqrestore(&card->card_lock, flags); +} + +/* TE1 Alarm change interrupt event + */ +static void +fst_intr_te1_alarm(struct fst_card_info *card, struct fst_port_info *port) +{ + u8 los; + u8 rra; + u8 ais; + + los = FST_RDB(card, suStatus.lossOfSignal); + rra = FST_RDB(card, suStatus.receiveRemoteAlarm); + ais = FST_RDB(card, suStatus.alarmIndicationSignal); + + if (los) { + /* Lost the link + */ + if (netif_carrier_ok(port_to_dev(port))) { + dbg(DBG_INTR, "Net carrier off\n"); + netif_carrier_off(port_to_dev(port)); + } + } else { + /* Link available + */ + if (!netif_carrier_ok(port_to_dev(port))) { + dbg(DBG_INTR, "Net carrier on\n"); + netif_carrier_on(port_to_dev(port)); + } + } + + if (los) + dbg(DBG_INTR, "Assert LOS Alarm\n"); + else + dbg(DBG_INTR, "De-assert LOS Alarm\n"); + if (rra) + dbg(DBG_INTR, "Assert RRA Alarm\n"); + else + dbg(DBG_INTR, "De-assert RRA Alarm\n"); + + if (ais) + dbg(DBG_INTR, "Assert AIS Alarm\n"); + else + dbg(DBG_INTR, "De-assert AIS Alarm\n"); +} + +/* Control signal change interrupt event + */ +static void +fst_intr_ctlchg(struct fst_card_info *card, struct fst_port_info *port) +{ + int signals; + + signals = FST_RDL(card, v24DebouncedSts[port->index]); + + if (signals & ((port->hwif == X21 || port->hwif == X21D) + ? IPSTS_INDICATE : IPSTS_DCD)) { + if (!netif_carrier_ok(port_to_dev(port))) { + dbg(DBG_INTR, "DCD active\n"); + netif_carrier_on(port_to_dev(port)); + } + } else { + if (netif_carrier_ok(port_to_dev(port))) { + dbg(DBG_INTR, "DCD lost\n"); + netif_carrier_off(port_to_dev(port)); + } + } +} + +/* Log Rx Errors + */ +static void +fst_log_rx_error(struct fst_card_info *card, struct fst_port_info *port, + unsigned char dmabits, int rxp, unsigned short len) +{ + struct net_device *dev = port_to_dev(port); + + /* Increment the appropriate error counter + */ + dev->stats.rx_errors++; + if (dmabits & RX_OFLO) { + dev->stats.rx_fifo_errors++; + dbg(DBG_ASS, "Rx fifo error on card %d port %d buffer %d\n", + card->card_no, port->index, rxp); + } + if (dmabits & RX_CRC) { + dev->stats.rx_crc_errors++; + dbg(DBG_ASS, "Rx crc error on card %d port %d\n", + card->card_no, port->index); + } + if (dmabits & RX_FRAM) { + dev->stats.rx_frame_errors++; + dbg(DBG_ASS, "Rx frame error on card %d port %d\n", + card->card_no, port->index); + } + if (dmabits == (RX_STP | RX_ENP)) { + dev->stats.rx_length_errors++; + dbg(DBG_ASS, "Rx length error (%d) on card %d port %d\n", + len, card->card_no, port->index); + } +} + +/* Rx Error Recovery + */ +static void +fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port, + unsigned char dmabits, int rxp, unsigned short len) +{ + int i; + int pi; + + pi = port->index; + /* Discard buffer descriptors until we see the start of the + * next frame. Note that for long frames this could be in + * a subsequent interrupt. + */ + i = 0; + while ((dmabits & (DMA_OWN | RX_STP)) == 0) { + FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); + rxp = (rxp + 1) % NUM_RX_BUFFER; + if (++i > NUM_RX_BUFFER) { + dbg(DBG_ASS, "intr_rx: Discarding more bufs" + " than we have\n"); + break; + } + dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits); + dbg(DBG_ASS, "DMA Bits of next buffer was %x\n", dmabits); + } + dbg(DBG_ASS, "There were %d subsequent buffers in error\n", i); + + /* Discard the terminal buffer */ + if (!(dmabits & DMA_OWN)) { + FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); + rxp = (rxp + 1) % NUM_RX_BUFFER; + } + port->rxpos = rxp; +} + +/* Rx complete interrupt + */ +static void +fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port) +{ + unsigned char dmabits; + int pi; + int rxp; + int rx_status; + unsigned short len; + struct sk_buff *skb; + struct net_device *dev = port_to_dev(port); + + /* Check we have a buffer to process */ + pi = port->index; + rxp = port->rxpos; + dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits); + if (dmabits & DMA_OWN) { + dbg(DBG_RX | DBG_INTR, "intr_rx: No buffer port %d pos %d\n", + pi, rxp); + return; + } + if (card->dmarx_in_progress) + return; + + /* Get buffer length */ + len = FST_RDW(card, rxDescrRing[pi][rxp].mcnt); + /* Discard the CRC */ + len -= 2; + if (len == 0) { + /* This seems to happen on the TE1 interface sometimes + * so throw the frame away and log the event. + */ + pr_err("Frame received with 0 length. Card %d Port %d\n", + card->card_no, port->index); + /* Return descriptor to card */ + FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); + + rxp = (rxp + 1) % NUM_RX_BUFFER; + port->rxpos = rxp; + return; + } + + /* Check buffer length and for other errors. We insist on one packet + * in one buffer. This simplifies things greatly and since we've + * allocated 8K it shouldn't be a real world limitation + */ + dbg(DBG_RX, "intr_rx: %d,%d: flags %x len %d\n", pi, rxp, dmabits, len); + if (dmabits != (RX_STP | RX_ENP) || len > LEN_RX_BUFFER - 2) { + fst_log_rx_error(card, port, dmabits, rxp, len); + fst_recover_rx_error(card, port, dmabits, rxp, len); + return; + } + + /* Allocate SKB */ + skb = dev_alloc_skb(len); + if (!skb) { + dbg(DBG_RX, "intr_rx: can't allocate buffer\n"); + + dev->stats.rx_dropped++; + + /* Return descriptor to card */ + FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); + + rxp = (rxp + 1) % NUM_RX_BUFFER; + port->rxpos = rxp; + return; + } + + /* We know the length we need to receive, len. + * It's not worth using the DMA for reads of less than + * FST_MIN_DMA_LEN + */ + + if (len < FST_MIN_DMA_LEN || card->family == FST_FAMILY_TXP) { + memcpy_fromio(skb_put(skb, len), + card->mem + BUF_OFFSET(rxBuffer[pi][rxp][0]), + len); + + /* Reset buffer descriptor */ + FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); + + /* Update stats */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + + /* Push upstream */ + dbg(DBG_RX, "Pushing frame up the stack\n"); + if (port->mode == FST_RAW) + skb->protocol = farsync_type_trans(skb, dev); + else + skb->protocol = hdlc_type_trans(skb, dev); + rx_status = netif_rx(skb); + fst_process_rx_status(rx_status, port_to_dev(port)->name); + if (rx_status == NET_RX_DROP) + dev->stats.rx_dropped++; + } else { + card->dma_skb_rx = skb; + card->dma_port_rx = port; + card->dma_len_rx = len; + card->dma_rxpos = rxp; + fst_rx_dma(card, card->rx_dma_handle_card, + BUF_OFFSET(rxBuffer[pi][rxp][0]), len); + } + if (rxp != port->rxpos) { + dbg(DBG_ASS, "About to increment rxpos by more than 1\n"); + dbg(DBG_ASS, "rxp = %d rxpos = %d\n", rxp, port->rxpos); + } + rxp = (rxp + 1) % NUM_RX_BUFFER; + port->rxpos = rxp; +} + +/* The bottom half to the ISR + * + */ + +static void +do_bottom_half_tx(struct fst_card_info *card) +{ + struct fst_port_info *port; + int pi; + int txq_length; + struct sk_buff *skb; + unsigned long flags; + struct net_device *dev; + + /* Find a free buffer for the transmit + * Step through each port on this card + */ + + dbg(DBG_TX, "do_bottom_half_tx\n"); + for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) { + if (!port->run) + continue; + + dev = port_to_dev(port); + while (!(FST_RDB(card, txDescrRing[pi][port->txpos].bits) & + DMA_OWN) && + !(card->dmatx_in_progress)) { + /* There doesn't seem to be a txdone event per-se + * We seem to have to deduce it, by checking the DMA_OWN + * bit on the next buffer we think we can use + */ + spin_lock_irqsave(&card->card_lock, flags); + txq_length = port->txqe - port->txqs; + if (txq_length < 0) { + /* This is the case where one has wrapped and the + * maths gives us a negative number + */ + txq_length = txq_length + FST_TXQ_DEPTH; + } + spin_unlock_irqrestore(&card->card_lock, flags); + if (txq_length > 0) { + /* There is something to send + */ + spin_lock_irqsave(&card->card_lock, flags); + skb = port->txq[port->txqs]; + port->txqs++; + if (port->txqs == FST_TXQ_DEPTH) + port->txqs = 0; + + spin_unlock_irqrestore(&card->card_lock, flags); + /* copy the data and set the required indicators on the + * card. + */ + FST_WRW(card, txDescrRing[pi][port->txpos].bcnt, + cnv_bcnt(skb->len)); + if (skb->len < FST_MIN_DMA_LEN || + card->family == FST_FAMILY_TXP) { + /* Enqueue the packet with normal io */ + memcpy_toio(card->mem + + BUF_OFFSET(txBuffer[pi] + [port-> + txpos][0]), + skb->data, skb->len); + FST_WRB(card, + txDescrRing[pi][port->txpos]. + bits, + DMA_OWN | TX_STP | TX_ENP); + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + netif_trans_update(dev); + } else { + /* Or do it through dma */ + memcpy(card->tx_dma_handle_host, + skb->data, skb->len); + card->dma_port_tx = port; + card->dma_len_tx = skb->len; + card->dma_txpos = port->txpos; + fst_tx_dma(card, + card->tx_dma_handle_card, + BUF_OFFSET(txBuffer[pi] + [port->txpos][0]), + skb->len); + } + if (++port->txpos >= NUM_TX_BUFFER) + port->txpos = 0; + /* If we have flow control on, can we now release it? + */ + if (port->start) { + if (txq_length < fst_txq_low) { + netif_wake_queue(port_to_dev + (port)); + port->start = 0; + } + } + dev_kfree_skb(skb); + } else { + /* Nothing to send so break out of the while loop + */ + break; + } + } + } +} + +static void +do_bottom_half_rx(struct fst_card_info *card) +{ + struct fst_port_info *port; + int pi; + int rx_count = 0; + + /* Check for rx completions on all ports on this card */ + dbg(DBG_RX, "do_bottom_half_rx\n"); + for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) { + if (!port->run) + continue; + + while (!(FST_RDB(card, rxDescrRing[pi][port->rxpos].bits) + & DMA_OWN) && !(card->dmarx_in_progress)) { + if (rx_count > fst_max_reads) { + /* Don't spend forever in receive processing + * Schedule another event + */ + fst_q_work_item(&fst_work_intq, card->card_no); + tasklet_schedule(&fst_int_task); + break; /* Leave the loop */ + } + fst_intr_rx(card, port); + rx_count++; + } + } +} + +/* The interrupt service routine + * Dev_id is our fst_card_info pointer + */ +static irqreturn_t +fst_intr(int dummy, void *dev_id) +{ + struct fst_card_info *card = dev_id; + struct fst_port_info *port; + int rdidx; /* Event buffer indices */ + int wridx; + int event; /* Actual event for processing */ + unsigned int dma_intcsr = 0; + unsigned int do_card_interrupt; + unsigned int int_retry_count; + + /* Check to see if the interrupt was for this card + * return if not + * Note that the call to clear the interrupt is important + */ + dbg(DBG_INTR, "intr: %d %p\n", card->irq, card); + if (card->state != FST_RUNNING) { + pr_err("Interrupt received for card %d in a non running state (%d)\n", + card->card_no, card->state); + + /* It is possible to really be running, i.e. we have re-loaded + * a running card + * Clear and reprime the interrupt source + */ + fst_clear_intr(card); + return IRQ_HANDLED; + } + + /* Clear and reprime the interrupt source */ + fst_clear_intr(card); + + /* Is the interrupt for this card (handshake == 1) + */ + do_card_interrupt = 0; + if (FST_RDB(card, interruptHandshake) == 1) { + do_card_interrupt += FST_CARD_INT; + /* Set the software acknowledge */ + FST_WRB(card, interruptHandshake, 0xEE); + } + if (card->family == FST_FAMILY_TXU) { + /* Is it a DMA Interrupt + */ + dma_intcsr = inl(card->pci_conf + INTCSR_9054); + if (dma_intcsr & 0x00200000) { + /* DMA Channel 0 (Rx transfer complete) + */ + dbg(DBG_RX, "DMA Rx xfer complete\n"); + outb(0x8, card->pci_conf + DMACSR0); + fst_rx_dma_complete(card, card->dma_port_rx, + card->dma_len_rx, card->dma_skb_rx, + card->dma_rxpos); + card->dmarx_in_progress = 0; + do_card_interrupt += FST_RX_DMA_INT; + } + if (dma_intcsr & 0x00400000) { + /* DMA Channel 1 (Tx transfer complete) + */ + dbg(DBG_TX, "DMA Tx xfer complete\n"); + outb(0x8, card->pci_conf + DMACSR1); + fst_tx_dma_complete(card, card->dma_port_tx, + card->dma_len_tx, card->dma_txpos); + card->dmatx_in_progress = 0; + do_card_interrupt += FST_TX_DMA_INT; + } + } + + /* Have we been missing Interrupts + */ + int_retry_count = FST_RDL(card, interruptRetryCount); + if (int_retry_count) { + dbg(DBG_ASS, "Card %d int_retry_count is %d\n", + card->card_no, int_retry_count); + FST_WRL(card, interruptRetryCount, 0); + } + + if (!do_card_interrupt) + return IRQ_HANDLED; + + /* Scehdule the bottom half of the ISR */ + fst_q_work_item(&fst_work_intq, card->card_no); + tasklet_schedule(&fst_int_task); + + /* Drain the event queue */ + rdidx = FST_RDB(card, interruptEvent.rdindex) & 0x1f; + wridx = FST_RDB(card, interruptEvent.wrindex) & 0x1f; + while (rdidx != wridx) { + event = FST_RDB(card, interruptEvent.evntbuff[rdidx]); + port = &card->ports[event & 0x03]; + + dbg(DBG_INTR, "Processing Interrupt event: %x\n", event); + + switch (event) { + case TE1_ALMA: + dbg(DBG_INTR, "TE1 Alarm intr\n"); + if (port->run) + fst_intr_te1_alarm(card, port); + break; + + case CTLA_CHG: + case CTLB_CHG: + case CTLC_CHG: + case CTLD_CHG: + if (port->run) + fst_intr_ctlchg(card, port); + break; + + case ABTA_SENT: + case ABTB_SENT: + case ABTC_SENT: + case ABTD_SENT: + dbg(DBG_TX, "Abort complete port %d\n", port->index); + break; + + case TXA_UNDF: + case TXB_UNDF: + case TXC_UNDF: + case TXD_UNDF: + /* Difficult to see how we'd get this given that we + * always load up the entire packet for DMA. + */ + dbg(DBG_TX, "Tx underflow port %d\n", port->index); + port_to_dev(port)->stats.tx_errors++; + port_to_dev(port)->stats.tx_fifo_errors++; + dbg(DBG_ASS, "Tx underflow on card %d port %d\n", + card->card_no, port->index); + break; + + case INIT_CPLT: + dbg(DBG_INIT, "Card init OK intr\n"); + break; + + case INIT_FAIL: + dbg(DBG_INIT, "Card init FAILED intr\n"); + card->state = FST_IFAILED; + break; + + default: + pr_err("intr: unknown card event %d. ignored\n", event); + break; + } + + /* Bump and wrap the index */ + if (++rdidx >= MAX_CIRBUFF) + rdidx = 0; + } + FST_WRB(card, interruptEvent.rdindex, rdidx); + return IRQ_HANDLED; +} + +/* Check that the shared memory configuration is one that we can handle + * and that some basic parameters are correct + */ +static void +check_started_ok(struct fst_card_info *card) +{ + int i; + + /* Check structure version and end marker */ + if (FST_RDW(card, smcVersion) != SMC_VERSION) { + pr_err("Bad shared memory version %d expected %d\n", + FST_RDW(card, smcVersion), SMC_VERSION); + card->state = FST_BADVERSION; + return; + } + if (FST_RDL(card, endOfSmcSignature) != END_SIG) { + pr_err("Missing shared memory signature\n"); + card->state = FST_BADVERSION; + return; + } + /* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */ + i = FST_RDB(card, taskStatus); + if (i == 0x01) { + card->state = FST_RUNNING; + } else if (i == 0xFF) { + pr_err("Firmware initialisation failed. Card halted\n"); + card->state = FST_HALTED; + return; + } else if (i != 0x00) { + pr_err("Unknown firmware status 0x%x\n", i); + card->state = FST_HALTED; + return; + } + + /* Finally check the number of ports reported by firmware against the + * number we assumed at card detection. Should never happen with + * existing firmware etc so we just report it for the moment. + */ + if (FST_RDL(card, numberOfPorts) != card->nports) { + pr_warn("Port count mismatch on card %d. Firmware thinks %d we say %d\n", + card->card_no, + FST_RDL(card, numberOfPorts), card->nports); + } +} + +static int +set_conf_from_info(struct fst_card_info *card, struct fst_port_info *port, + struct fstioc_info *info) +{ + int err; + unsigned char my_framing; + + /* Set things according to the user set valid flags + * Several of the old options have been invalidated/replaced by the + * generic hdlc package. + */ + err = 0; + if (info->valid & FSTVAL_PROTO) { + if (info->proto == FST_RAW) + port->mode = FST_RAW; + else + port->mode = FST_GEN_HDLC; + } + + if (info->valid & FSTVAL_CABLE) + err = -EINVAL; + + if (info->valid & FSTVAL_SPEED) + err = -EINVAL; + + if (info->valid & FSTVAL_PHASE) + FST_WRB(card, portConfig[port->index].invertClock, + info->invertClock); + if (info->valid & FSTVAL_MODE) + FST_WRW(card, cardMode, info->cardMode); + if (info->valid & FSTVAL_TE1) { + FST_WRL(card, suConfig.dataRate, info->lineSpeed); + FST_WRB(card, suConfig.clocking, info->clockSource); + my_framing = FRAMING_E1; + if (info->framing == E1) + my_framing = FRAMING_E1; + if (info->framing == T1) + my_framing = FRAMING_T1; + if (info->framing == J1) + my_framing = FRAMING_J1; + FST_WRB(card, suConfig.framing, my_framing); + FST_WRB(card, suConfig.structure, info->structure); + FST_WRB(card, suConfig.interface, info->interface); + FST_WRB(card, suConfig.coding, info->coding); + FST_WRB(card, suConfig.lineBuildOut, info->lineBuildOut); + FST_WRB(card, suConfig.equalizer, info->equalizer); + FST_WRB(card, suConfig.transparentMode, info->transparentMode); + FST_WRB(card, suConfig.loopMode, info->loopMode); + FST_WRB(card, suConfig.range, info->range); + FST_WRB(card, suConfig.txBufferMode, info->txBufferMode); + FST_WRB(card, suConfig.rxBufferMode, info->rxBufferMode); + FST_WRB(card, suConfig.startingSlot, info->startingSlot); + FST_WRB(card, suConfig.losThreshold, info->losThreshold); + if (info->idleCode) + FST_WRB(card, suConfig.enableIdleCode, 1); + else + FST_WRB(card, suConfig.enableIdleCode, 0); + FST_WRB(card, suConfig.idleCode, info->idleCode); +#if FST_DEBUG + if (info->valid & FSTVAL_TE1) { + printk("Setting TE1 data\n"); + printk("Line Speed = %d\n", info->lineSpeed); + printk("Start slot = %d\n", info->startingSlot); + printk("Clock source = %d\n", info->clockSource); + printk("Framing = %d\n", my_framing); + printk("Structure = %d\n", info->structure); + printk("interface = %d\n", info->interface); + printk("Coding = %d\n", info->coding); + printk("Line build out = %d\n", info->lineBuildOut); + printk("Equaliser = %d\n", info->equalizer); + printk("Transparent mode = %d\n", + info->transparentMode); + printk("Loop mode = %d\n", info->loopMode); + printk("Range = %d\n", info->range); + printk("Tx Buffer mode = %d\n", info->txBufferMode); + printk("Rx Buffer mode = %d\n", info->rxBufferMode); + printk("LOS Threshold = %d\n", info->losThreshold); + printk("Idle Code = %d\n", info->idleCode); + } +#endif + } +#if FST_DEBUG + if (info->valid & FSTVAL_DEBUG) + fst_debug_mask = info->debug; +#endif + + return err; +} + +static void +gather_conf_info(struct fst_card_info *card, struct fst_port_info *port, + struct fstioc_info *info) +{ + int i; + + memset(info, 0, sizeof(struct fstioc_info)); + + i = port->index; + info->kernelVersion = LINUX_VERSION_CODE; + info->nports = card->nports; + info->type = card->type; + info->state = card->state; + info->proto = FST_GEN_HDLC; + info->index = i; +#if FST_DEBUG + info->debug = fst_debug_mask; +#endif + + /* Only mark information as valid if card is running. + * Copy the data anyway in case it is useful for diagnostics + */ + info->valid = ((card->state == FST_RUNNING) ? FSTVAL_ALL : FSTVAL_CARD) +#if FST_DEBUG + | FSTVAL_DEBUG +#endif + ; + + info->lineInterface = FST_RDW(card, portConfig[i].lineInterface); + info->internalClock = FST_RDB(card, portConfig[i].internalClock); + info->lineSpeed = FST_RDL(card, portConfig[i].lineSpeed); + info->invertClock = FST_RDB(card, portConfig[i].invertClock); + info->v24IpSts = FST_RDL(card, v24IpSts[i]); + info->v24OpSts = FST_RDL(card, v24OpSts[i]); + info->clockStatus = FST_RDW(card, clockStatus[i]); + info->cableStatus = FST_RDW(card, cableStatus); + info->cardMode = FST_RDW(card, cardMode); + info->smcFirmwareVersion = FST_RDL(card, smcFirmwareVersion); + + /* The T2U can report cable presence for both A or B + * in bits 0 and 1 of cableStatus. See which port we are and + * do the mapping. + */ + if (card->family == FST_FAMILY_TXU) { + if (port->index == 0) { + /* Port A + */ + info->cableStatus = info->cableStatus & 1; + } else { + /* Port B + */ + info->cableStatus = info->cableStatus >> 1; + info->cableStatus = info->cableStatus & 1; + } + } + /* Some additional bits if we are TE1 + */ + if (card->type == FST_TYPE_TE1) { + info->lineSpeed = FST_RDL(card, suConfig.dataRate); + info->clockSource = FST_RDB(card, suConfig.clocking); + info->framing = FST_RDB(card, suConfig.framing); + info->structure = FST_RDB(card, suConfig.structure); + info->interface = FST_RDB(card, suConfig.interface); + info->coding = FST_RDB(card, suConfig.coding); + info->lineBuildOut = FST_RDB(card, suConfig.lineBuildOut); + info->equalizer = FST_RDB(card, suConfig.equalizer); + info->loopMode = FST_RDB(card, suConfig.loopMode); + info->range = FST_RDB(card, suConfig.range); + info->txBufferMode = FST_RDB(card, suConfig.txBufferMode); + info->rxBufferMode = FST_RDB(card, suConfig.rxBufferMode); + info->startingSlot = FST_RDB(card, suConfig.startingSlot); + info->losThreshold = FST_RDB(card, suConfig.losThreshold); + if (FST_RDB(card, suConfig.enableIdleCode)) + info->idleCode = FST_RDB(card, suConfig.idleCode); + else + info->idleCode = 0; + info->receiveBufferDelay = + FST_RDL(card, suStatus.receiveBufferDelay); + info->framingErrorCount = + FST_RDL(card, suStatus.framingErrorCount); + info->codeViolationCount = + FST_RDL(card, suStatus.codeViolationCount); + info->crcErrorCount = FST_RDL(card, suStatus.crcErrorCount); + info->lineAttenuation = FST_RDL(card, suStatus.lineAttenuation); + info->lossOfSignal = FST_RDB(card, suStatus.lossOfSignal); + info->receiveRemoteAlarm = + FST_RDB(card, suStatus.receiveRemoteAlarm); + info->alarmIndicationSignal = + FST_RDB(card, suStatus.alarmIndicationSignal); + } +} + +static int +fst_set_iface(struct fst_card_info *card, struct fst_port_info *port, + struct if_settings *ifs) +{ + sync_serial_settings sync; + int i; + + if (ifs->size != sizeof(sync)) + return -ENOMEM; + + if (copy_from_user(&sync, ifs->ifs_ifsu.sync, sizeof(sync))) + return -EFAULT; + + if (sync.loopback) + return -EINVAL; + + i = port->index; + + switch (ifs->type) { + case IF_IFACE_V35: + FST_WRW(card, portConfig[i].lineInterface, V35); + port->hwif = V35; + break; + + case IF_IFACE_V24: + FST_WRW(card, portConfig[i].lineInterface, V24); + port->hwif = V24; + break; + + case IF_IFACE_X21: + FST_WRW(card, portConfig[i].lineInterface, X21); + port->hwif = X21; + break; + + case IF_IFACE_X21D: + FST_WRW(card, portConfig[i].lineInterface, X21D); + port->hwif = X21D; + break; + + case IF_IFACE_T1: + FST_WRW(card, portConfig[i].lineInterface, T1); + port->hwif = T1; + break; + + case IF_IFACE_E1: + FST_WRW(card, portConfig[i].lineInterface, E1); + port->hwif = E1; + break; + + case IF_IFACE_SYNC_SERIAL: + break; + + default: + return -EINVAL; + } + + switch (sync.clock_type) { + case CLOCK_EXT: + FST_WRB(card, portConfig[i].internalClock, EXTCLK); + break; + + case CLOCK_INT: + FST_WRB(card, portConfig[i].internalClock, INTCLK); + break; + + default: + return -EINVAL; + } + FST_WRL(card, portConfig[i].lineSpeed, sync.clock_rate); + return 0; +} + +static int +fst_get_iface(struct fst_card_info *card, struct fst_port_info *port, + struct if_settings *ifs) +{ + sync_serial_settings sync; + int i; + + /* First check what line type is set, we'll default to reporting X.21 + * if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be + * changed + */ + switch (port->hwif) { + case E1: + ifs->type = IF_IFACE_E1; + break; + case T1: + ifs->type = IF_IFACE_T1; + break; + case V35: + ifs->type = IF_IFACE_V35; + break; + case V24: + ifs->type = IF_IFACE_V24; + break; + case X21D: + ifs->type = IF_IFACE_X21D; + break; + case X21: + default: + ifs->type = IF_IFACE_X21; + break; + } + if (!ifs->size) + return 0; /* only type requested */ + + if (ifs->size < sizeof(sync)) + return -ENOMEM; + + i = port->index; + memset(&sync, 0, sizeof(sync)); + sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed); + /* Lucky card and linux use same encoding here */ + sync.clock_type = FST_RDB(card, portConfig[i].internalClock) == + INTCLK ? CLOCK_INT : CLOCK_EXT; + sync.loopback = 0; + + if (copy_to_user(ifs->ifs_ifsu.sync, &sync, sizeof(sync))) + return -EFAULT; + + ifs->size = sizeof(sync); + return 0; +} + +static int +fst_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) +{ + struct fst_card_info *card; + struct fst_port_info *port; + struct fstioc_write wrthdr; + struct fstioc_info info; + unsigned long flags; + void *buf; + + dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, data); + + port = dev_to_port(dev); + card = port->card; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + switch (cmd) { + case FSTCPURESET: + fst_cpureset(card); + card->state = FST_RESET; + return 0; + + case FSTCPURELEASE: + fst_cpurelease(card); + card->state = FST_STARTING; + return 0; + + case FSTWRITE: /* Code write (download) */ + + /* First copy in the header with the length and offset of data + * to write + */ + if (!data) + return -EINVAL; + + if (copy_from_user(&wrthdr, data, sizeof(struct fstioc_write))) + return -EFAULT; + + /* Sanity check the parameters. We don't support partial writes + * when going over the top + */ + if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE || + wrthdr.size + wrthdr.offset > FST_MEMSIZE) + return -ENXIO; + + /* Now copy the data to the card. */ + + buf = memdup_user(data + sizeof(struct fstioc_write), + wrthdr.size); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size); + kfree(buf); + + /* Writes to the memory of a card in the reset state constitute + * a download + */ + if (card->state == FST_RESET) + card->state = FST_DOWNLOAD; + + return 0; + + case FSTGETCONF: + + /* If card has just been started check the shared memory config + * version and marker + */ + if (card->state == FST_STARTING) { + check_started_ok(card); + + /* If everything checked out enable card interrupts */ + if (card->state == FST_RUNNING) { + spin_lock_irqsave(&card->card_lock, flags); + fst_enable_intr(card); + FST_WRB(card, interruptHandshake, 0xEE); + spin_unlock_irqrestore(&card->card_lock, flags); + } + } + + if (!data) + return -EINVAL; + + gather_conf_info(card, port, &info); + + if (copy_to_user(data, &info, sizeof(info))) + return -EFAULT; + + return 0; + + case FSTSETCONF: + /* Most of the settings have been moved to the generic ioctls + * this just covers debug and board ident now + */ + + if (card->state != FST_RUNNING) { + pr_err("Attempt to configure card %d in non-running state (%d)\n", + card->card_no, card->state); + return -EIO; + } + if (copy_from_user(&info, data, sizeof(info))) + return -EFAULT; + + return set_conf_from_info(card, port, &info); + default: + return -EINVAL; + } +} + +static int +fst_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + struct fst_card_info *card; + struct fst_port_info *port; + + dbg(DBG_IOCTL, "SIOCDEVPRIVATE, %x\n", ifs->type); + + port = dev_to_port(dev); + card = port->card; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + switch (ifs->type) { + case IF_GET_IFACE: + return fst_get_iface(card, port, ifs); + + case IF_IFACE_SYNC_SERIAL: + case IF_IFACE_V35: + case IF_IFACE_V24: + case IF_IFACE_X21: + case IF_IFACE_X21D: + case IF_IFACE_T1: + case IF_IFACE_E1: + return fst_set_iface(card, port, ifs); + + case IF_PROTO_RAW: + port->mode = FST_RAW; + return 0; + + case IF_GET_PROTO: + if (port->mode == FST_RAW) { + ifs->type = IF_PROTO_RAW; + return 0; + } + return hdlc_ioctl(dev, ifs); + + default: + port->mode = FST_GEN_HDLC; + dbg(DBG_IOCTL, "Passing this type to hdlc %x\n", + ifs->type); + return hdlc_ioctl(dev, ifs); + } +} + +static void +fst_openport(struct fst_port_info *port) +{ + int signals; + + /* Only init things if card is actually running. This allows open to + * succeed for downloads etc. + */ + if (port->card->state == FST_RUNNING) { + if (port->run) { + dbg(DBG_OPEN, "open: found port already running\n"); + + fst_issue_cmd(port, STOPPORT); + port->run = 0; + } + + fst_rx_config(port); + fst_tx_config(port); + fst_op_raise(port, OPSTS_RTS | OPSTS_DTR); + + fst_issue_cmd(port, STARTPORT); + port->run = 1; + + signals = FST_RDL(port->card, v24DebouncedSts[port->index]); + if (signals & ((port->hwif == X21 || port->hwif == X21D) + ? IPSTS_INDICATE : IPSTS_DCD)) + netif_carrier_on(port_to_dev(port)); + else + netif_carrier_off(port_to_dev(port)); + + port->txqe = 0; + port->txqs = 0; + } +} + +static void +fst_closeport(struct fst_port_info *port) +{ + if (port->card->state == FST_RUNNING) { + if (port->run) { + port->run = 0; + fst_op_lower(port, OPSTS_RTS | OPSTS_DTR); + + fst_issue_cmd(port, STOPPORT); + } else { + dbg(DBG_OPEN, "close: port not running\n"); + } + } +} + +static int +fst_open(struct net_device *dev) +{ + int err; + struct fst_port_info *port; + + port = dev_to_port(dev); + if (!try_module_get(THIS_MODULE)) + return -EBUSY; + + if (port->mode != FST_RAW) { + err = hdlc_open(dev); + if (err) { + module_put(THIS_MODULE); + return err; + } + } + + fst_openport(port); + netif_wake_queue(dev); + return 0; +} + +static int +fst_close(struct net_device *dev) +{ + struct fst_port_info *port; + struct fst_card_info *card; + unsigned char tx_dma_done; + unsigned char rx_dma_done; + + port = dev_to_port(dev); + card = port->card; + + tx_dma_done = inb(card->pci_conf + DMACSR1); + rx_dma_done = inb(card->pci_conf + DMACSR0); + dbg(DBG_OPEN, + "Port Close: tx_dma_in_progress = %d (%x) rx_dma_in_progress = %d (%x)\n", + card->dmatx_in_progress, tx_dma_done, card->dmarx_in_progress, + rx_dma_done); + + netif_stop_queue(dev); + fst_closeport(dev_to_port(dev)); + if (port->mode != FST_RAW) + hdlc_close(dev); + + module_put(THIS_MODULE); + return 0; +} + +static int +fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) +{ + /* Setting currently fixed in FarSync card so we check and forget + */ + if (encoding != ENCODING_NRZ || parity != PARITY_CRC16_PR1_CCITT) + return -EINVAL; + return 0; +} + +static void +fst_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct fst_port_info *port; + struct fst_card_info *card; + + port = dev_to_port(dev); + card = port->card; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + dbg(DBG_ASS, "Tx timeout card %d port %d\n", + card->card_no, port->index); + fst_issue_cmd(port, ABORTTX); + + netif_trans_update(dev); + netif_wake_queue(dev); + port->start = 0; +} + +static netdev_tx_t +fst_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct fst_card_info *card; + struct fst_port_info *port; + unsigned long flags; + int txq_length; + + port = dev_to_port(dev); + card = port->card; + dbg(DBG_TX, "fst_start_xmit: length = %d\n", skb->len); + + /* Drop packet with error if we don't have carrier */ + if (!netif_carrier_ok(dev)) { + dev_kfree_skb(skb); + dev->stats.tx_errors++; + dev->stats.tx_carrier_errors++; + dbg(DBG_ASS, + "Tried to transmit but no carrier on card %d port %d\n", + card->card_no, port->index); + return NETDEV_TX_OK; + } + + /* Drop it if it's too big! MTU failure ? */ + if (skb->len > LEN_TX_BUFFER) { + dbg(DBG_ASS, "Packet too large %d vs %d\n", skb->len, + LEN_TX_BUFFER); + dev_kfree_skb(skb); + dev->stats.tx_errors++; + return NETDEV_TX_OK; + } + + /* We are always going to queue the packet + * so that the bottom half is the only place we tx from + * Check there is room in the port txq + */ + spin_lock_irqsave(&card->card_lock, flags); + txq_length = port->txqe - port->txqs; + if (txq_length < 0) { + /* This is the case where the next free has wrapped but the + * last used hasn't + */ + txq_length = txq_length + FST_TXQ_DEPTH; + } + spin_unlock_irqrestore(&card->card_lock, flags); + if (txq_length > fst_txq_high) { + /* We have got enough buffers in the pipeline. Ask the network + * layer to stop sending frames down + */ + netif_stop_queue(dev); + port->start = 1; /* I'm using this to signal stop sent up */ + } + + if (txq_length == FST_TXQ_DEPTH - 1) { + /* This shouldn't have happened but such is life + */ + dev_kfree_skb(skb); + dev->stats.tx_errors++; + dbg(DBG_ASS, "Tx queue overflow card %d port %d\n", + card->card_no, port->index); + return NETDEV_TX_OK; + } + + /* queue the buffer + */ + spin_lock_irqsave(&card->card_lock, flags); + port->txq[port->txqe] = skb; + port->txqe++; + if (port->txqe == FST_TXQ_DEPTH) + port->txqe = 0; + spin_unlock_irqrestore(&card->card_lock, flags); + + /* Scehdule the bottom half which now does transmit processing */ + fst_q_work_item(&fst_work_txq, card->card_no); + tasklet_schedule(&fst_tx_task); + + return NETDEV_TX_OK; +} + +/* Card setup having checked hardware resources. + * Should be pretty bizarre if we get an error here (kernel memory + * exhaustion is one possibility). If we do see a problem we report it + * via a printk and leave the corresponding interface and all that follow + * disabled. + */ +static char *type_strings[] = { + "no hardware", /* Should never be seen */ + "FarSync T2P", + "FarSync T4P", + "FarSync T1U", + "FarSync T2U", + "FarSync T4U", + "FarSync TE1" +}; + +static int +fst_init_card(struct fst_card_info *card) +{ + int i; + int err; + + /* We're working on a number of ports based on the card ID. If the + * firmware detects something different later (should never happen) + * we'll have to revise it in some way then. + */ + for (i = 0; i < card->nports; i++) { + err = register_hdlc_device(card->ports[i].dev); + if (err < 0) { + pr_err("Cannot register HDLC device for port %d (errno %d)\n", + i, -err); + while (i--) + unregister_hdlc_device(card->ports[i].dev); + return err; + } + } + + pr_info("%s-%s: %s IRQ%d, %d ports\n", + port_to_dev(&card->ports[0])->name, + port_to_dev(&card->ports[card->nports - 1])->name, + type_strings[card->type], card->irq, card->nports); + return 0; +} + +static const struct net_device_ops fst_ops = { + .ndo_open = fst_open, + .ndo_stop = fst_close, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_siocwandev = fst_ioctl, + .ndo_siocdevprivate = fst_siocdevprivate, + .ndo_tx_timeout = fst_tx_timeout, +}; + +/* Initialise card when detected. + * Returns 0 to indicate success, or errno otherwise. + */ +static int +fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int no_of_cards_added; + struct fst_card_info *card; + int err = 0; + int i; + + printk_once(KERN_INFO + pr_fmt("FarSync WAN driver " FST_USER_VERSION + " (c) 2001-2004 FarSite Communications Ltd.\n")); +#if FST_DEBUG + dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask); +#endif + /* We are going to be clever and allow certain cards not to be + * configured. An exclude list can be provided in /etc/modules.conf + */ + if (fst_excluded_cards != 0) { + /* There are cards to exclude + * + */ + for (i = 0; i < fst_excluded_cards; i++) { + if (pdev->devfn >> 3 == fst_excluded_list[i]) { + pr_info("FarSync PCI device %d not assigned\n", + (pdev->devfn) >> 3); + return -EBUSY; + } + } + } + + /* Allocate driver private data */ + card = kzalloc(sizeof(struct fst_card_info), GFP_KERNEL); + if (!card) + return -ENOMEM; + + /* Try to enable the device */ + err = pci_enable_device(pdev); + if (err) { + pr_err("Failed to enable card. Err %d\n", -err); + goto enable_fail; + } + + err = pci_request_regions(pdev, "FarSync"); + if (err) { + pr_err("Failed to allocate regions. Err %d\n", -err); + goto regions_fail; + } + + /* Get virtual addresses of memory regions */ + card->pci_conf = pci_resource_start(pdev, 1); + card->phys_mem = pci_resource_start(pdev, 2); + card->phys_ctlmem = pci_resource_start(pdev, 3); + card->mem = ioremap(card->phys_mem, FST_MEMSIZE); + if (!card->mem) { + pr_err("Physical memory remap failed\n"); + err = -ENODEV; + goto ioremap_physmem_fail; + } + card->ctlmem = ioremap(card->phys_ctlmem, 0x10); + if (!card->ctlmem) { + pr_err("Control memory remap failed\n"); + err = -ENODEV; + goto ioremap_ctlmem_fail; + } + dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem); + + /* Register the interrupt handler */ + if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) { + pr_err("Unable to register interrupt %d\n", card->irq); + err = -ENODEV; + goto irq_fail; + } + + /* Record info we need */ + card->irq = pdev->irq; + card->type = ent->driver_data; + card->family = ((ent->driver_data == FST_TYPE_T2P) || + (ent->driver_data == FST_TYPE_T4P)) + ? FST_FAMILY_TXP : FST_FAMILY_TXU; + if (ent->driver_data == FST_TYPE_T1U || + ent->driver_data == FST_TYPE_TE1) + card->nports = 1; + else + card->nports = ((ent->driver_data == FST_TYPE_T2P) || + (ent->driver_data == FST_TYPE_T2U)) ? 2 : 4; + + card->state = FST_UNINIT; + spin_lock_init(&card->card_lock); + + for (i = 0; i < card->nports; i++) { + struct net_device *dev = alloc_hdlcdev(&card->ports[i]); + hdlc_device *hdlc; + + if (!dev) { + while (i--) + free_netdev(card->ports[i].dev); + pr_err("FarSync: out of memory\n"); + err = -ENOMEM; + goto hdlcdev_fail; + } + card->ports[i].dev = dev; + card->ports[i].card = card; + card->ports[i].index = i; + card->ports[i].run = 0; + + hdlc = dev_to_hdlc(dev); + + /* Fill in the net device info */ + /* Since this is a PCI setup this is purely + * informational. Give them the buffer addresses + * and basic card I/O. + */ + dev->mem_start = card->phys_mem + + BUF_OFFSET(txBuffer[i][0][0]); + dev->mem_end = card->phys_mem + + BUF_OFFSET(txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]); + dev->base_addr = card->pci_conf; + dev->irq = card->irq; + + dev->netdev_ops = &fst_ops; + dev->tx_queue_len = FST_TX_QUEUE_LEN; + dev->watchdog_timeo = FST_TX_TIMEOUT; + hdlc->attach = fst_attach; + hdlc->xmit = fst_start_xmit; + } + + card->device = pdev; + + dbg(DBG_PCI, "type %d nports %d irq %d\n", card->type, + card->nports, card->irq); + dbg(DBG_PCI, "conf %04x mem %08x ctlmem %08x\n", + card->pci_conf, card->phys_mem, card->phys_ctlmem); + + /* Reset the card's processor */ + fst_cpureset(card); + card->state = FST_RESET; + + /* Initialise DMA (if required) */ + fst_init_dma(card); + + /* Record driver data for later use */ + pci_set_drvdata(pdev, card); + + /* Remainder of card setup */ + if (no_of_cards_added >= FST_MAX_CARDS) { + pr_err("FarSync: too many cards\n"); + err = -ENOMEM; + goto card_array_fail; + } + fst_card_array[no_of_cards_added] = card; + card->card_no = no_of_cards_added++; /* Record instance and bump it */ + err = fst_init_card(card); + if (err) + goto init_card_fail; + if (card->family == FST_FAMILY_TXU) { + /* Allocate a dma buffer for transmit and receives + */ + card->rx_dma_handle_host = + dma_alloc_coherent(&card->device->dev, FST_MAX_MTU, + &card->rx_dma_handle_card, GFP_KERNEL); + if (!card->rx_dma_handle_host) { + pr_err("Could not allocate rx dma buffer\n"); + err = -ENOMEM; + goto rx_dma_fail; + } + card->tx_dma_handle_host = + dma_alloc_coherent(&card->device->dev, FST_MAX_MTU, + &card->tx_dma_handle_card, GFP_KERNEL); + if (!card->tx_dma_handle_host) { + pr_err("Could not allocate tx dma buffer\n"); + err = -ENOMEM; + goto tx_dma_fail; + } + } + return 0; /* Success */ + +tx_dma_fail: + dma_free_coherent(&card->device->dev, FST_MAX_MTU, + card->rx_dma_handle_host, card->rx_dma_handle_card); +rx_dma_fail: + fst_disable_intr(card); + for (i = 0 ; i < card->nports ; i++) + unregister_hdlc_device(card->ports[i].dev); +init_card_fail: + fst_card_array[card->card_no] = NULL; +card_array_fail: + for (i = 0 ; i < card->nports ; i++) + free_netdev(card->ports[i].dev); +hdlcdev_fail: + free_irq(card->irq, card); +irq_fail: + iounmap(card->ctlmem); +ioremap_ctlmem_fail: + iounmap(card->mem); +ioremap_physmem_fail: + pci_release_regions(pdev); +regions_fail: + pci_disable_device(pdev); +enable_fail: + kfree(card); + return err; +} + +/* Cleanup and close down a card + */ +static void +fst_remove_one(struct pci_dev *pdev) +{ + struct fst_card_info *card; + int i; + + card = pci_get_drvdata(pdev); + + for (i = 0; i < card->nports; i++) { + struct net_device *dev = port_to_dev(&card->ports[i]); + + unregister_hdlc_device(dev); + free_netdev(dev); + } + + fst_disable_intr(card); + free_irq(card->irq, card); + + iounmap(card->ctlmem); + iounmap(card->mem); + pci_release_regions(pdev); + if (card->family == FST_FAMILY_TXU) { + /* Free dma buffers + */ + dma_free_coherent(&card->device->dev, FST_MAX_MTU, + card->rx_dma_handle_host, + card->rx_dma_handle_card); + dma_free_coherent(&card->device->dev, FST_MAX_MTU, + card->tx_dma_handle_host, + card->tx_dma_handle_card); + } + fst_card_array[card->card_no] = NULL; + kfree(card); +} + +static struct pci_driver fst_driver = { + .name = FST_NAME, + .id_table = fst_pci_dev_id, + .probe = fst_add_one, + .remove = fst_remove_one, +}; + +static int __init +fst_init(void) +{ + int i; + + for (i = 0; i < FST_MAX_CARDS; i++) + fst_card_array[i] = NULL; + return pci_register_driver(&fst_driver); +} + +static void __exit +fst_cleanup_module(void) +{ + pr_info("FarSync WAN driver unloading\n"); + pci_unregister_driver(&fst_driver); +} + +module_init(fst_init); +module_exit(fst_cleanup_module); diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h new file mode 100644 index 000000000..63908dbbb --- /dev/null +++ b/drivers/net/wan/farsync.h @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * FarSync X21 driver for Linux + * + * Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards + * + * Copyright (C) 2001 FarSite Communications Ltd. + * www.farsite.co.uk + * + * Author: R.J.Dunlop <bob.dunlop@farsite.co.uk> + * + * For the most part this file only contains structures and information + * that is visible to applications outside the driver. Shared memory + * layout etc is internal to the driver and described within farsync.c. + * Overlap exists in that the values used for some fields within the + * ioctl interface extend into the cards firmware interface so values in + * this file may not be changed arbitrarily. + */ + +/* What's in a name + * + * The project name for this driver is Oscar. The driver is intended to be + * used with the FarSite T-Series cards (T2P & T4P) running in the high + * speed frame shifter mode. This is sometimes referred to as X.21 mode + * which is a complete misnomer as the card continues to support V.24 and + * V.35 as well as X.21. + * + * A short common prefix is useful for routines within the driver to avoid + * conflict with other similar drivers and I chosen to use "fst_" for this + * purpose (FarSite T-series). + * + * Finally the device driver needs a short network interface name. Since + * "hdlc" is already in use I've chosen the even less informative "sync" + * for the present. + */ +#define FST_NAME "fst" /* In debug/info etc */ +#define FST_NDEV_NAME "sync" /* For net interface */ +#define FST_DEV_NAME "farsync" /* For misc interfaces */ + + +/* User version number + * + * This version number is incremented with each official release of the + * package and is a simplified number for normal user reference. + * Individual files are tracked by the version control system and may + * have individual versions (or IDs) that move much faster than + * the release version as individual updates are tracked. + */ +#define FST_USER_VERSION "1.04" + + +/* Ioctl call command values + */ +#define FSTWRITE (SIOCDEVPRIVATE+10) +#define FSTCPURESET (SIOCDEVPRIVATE+11) +#define FSTCPURELEASE (SIOCDEVPRIVATE+12) +#define FSTGETCONF (SIOCDEVPRIVATE+13) +#define FSTSETCONF (SIOCDEVPRIVATE+14) + + +/* FSTWRITE + * + * Used to write a block of data (firmware etc) before the card is running + */ +struct fstioc_write { + unsigned int size; + unsigned int offset; + unsigned char data[]; +}; + + +/* FSTCPURESET and FSTCPURELEASE + * + * These take no additional data. + * FSTCPURESET forces the cards CPU into a reset state and holds it there. + * FSTCPURELEASE releases the CPU from this reset state allowing it to run, + * the reset vector should be setup before this ioctl is run. + */ + +/* FSTGETCONF and FSTSETCONF + * + * Get and set a card/ports configuration. + * In order to allow selective setting of items and for the kernel to + * indicate a partial status response the first field "valid" is a bitmask + * indicating which other fields in the structure are valid. + * Many of the field names in this structure match those used in the + * firmware shared memory configuration interface and come originally from + * the NT header file Smc.h + * + * When used with FSTGETCONF this structure should be zeroed before use. + * This is to allow for possible future expansion when some of the fields + * might be used to indicate a different (expanded) structure. + */ +struct fstioc_info { + unsigned int valid; /* Bits of structure that are valid */ + unsigned int nports; /* Number of serial ports */ + unsigned int type; /* Type index of card */ + unsigned int state; /* State of card */ + unsigned int index; /* Index of port ioctl was issued on */ + unsigned int smcFirmwareVersion; + unsigned long kernelVersion; /* What Kernel version we are working with */ + unsigned short lineInterface; /* Physical interface type */ + unsigned char proto; /* Line protocol */ + unsigned char internalClock; /* 1 => internal clock, 0 => external */ + unsigned int lineSpeed; /* Speed in bps */ + unsigned int v24IpSts; /* V.24 control input status */ + unsigned int v24OpSts; /* V.24 control output status */ + unsigned short clockStatus; /* lsb: 0=> present, 1=> absent */ + unsigned short cableStatus; /* lsb: 0=> present, 1=> absent */ + unsigned short cardMode; /* lsb: LED id mode */ + unsigned short debug; /* Debug flags */ + unsigned char transparentMode; /* Not used always 0 */ + unsigned char invertClock; /* Invert clock feature for syncing */ + unsigned char startingSlot; /* Time slot to use for start of tx */ + unsigned char clockSource; /* External or internal */ + unsigned char framing; /* E1, T1 or J1 */ + unsigned char structure; /* unframed, double, crc4, f4, f12, */ + /* f24 f72 */ + unsigned char interface; /* rj48c or bnc */ + unsigned char coding; /* hdb3 b8zs */ + unsigned char lineBuildOut; /* 0, -7.5, -15, -22 */ + unsigned char equalizer; /* short or lon haul settings */ + unsigned char loopMode; /* various loopbacks */ + unsigned char range; /* cable lengths */ + unsigned char txBufferMode; /* tx elastic buffer depth */ + unsigned char rxBufferMode; /* rx elastic buffer depth */ + unsigned char losThreshold; /* Attenuation on LOS signal */ + unsigned char idleCode; /* Value to send as idle timeslot */ + unsigned int receiveBufferDelay; /* delay thro rx buffer timeslots */ + unsigned int framingErrorCount; /* framing errors */ + unsigned int codeViolationCount; /* code violations */ + unsigned int crcErrorCount; /* CRC errors */ + int lineAttenuation; /* in dB*/ + unsigned short lossOfSignal; + unsigned short receiveRemoteAlarm; + unsigned short alarmIndicationSignal; +}; + +/* "valid" bitmask */ +#define FSTVAL_NONE 0x00000000 /* Nothing valid (firmware not running). + * Slight misnomer. In fact nports, + * type, state and index will be set + * based on hardware detected. + */ +#define FSTVAL_OMODEM 0x0000001F /* First 5 bits correspond to the + * output status bits defined for + * v24OpSts + */ +#define FSTVAL_SPEED 0x00000020 /* internalClock, lineSpeed, clockStatus + */ +#define FSTVAL_CABLE 0x00000040 /* lineInterface, cableStatus */ +#define FSTVAL_IMODEM 0x00000080 /* v24IpSts */ +#define FSTVAL_CARD 0x00000100 /* nports, type, state, index, + * smcFirmwareVersion + */ +#define FSTVAL_PROTO 0x00000200 /* proto */ +#define FSTVAL_MODE 0x00000400 /* cardMode */ +#define FSTVAL_PHASE 0x00000800 /* Clock phase */ +#define FSTVAL_TE1 0x00001000 /* T1E1 Configuration */ +#define FSTVAL_DEBUG 0x80000000 /* debug */ +#define FSTVAL_ALL 0x00001FFF /* Note: does not include DEBUG flag */ + +/* "type" */ +#define FST_TYPE_NONE 0 /* Probably should never happen */ +#define FST_TYPE_T2P 1 /* T2P X21 2 port card */ +#define FST_TYPE_T4P 2 /* T4P X21 4 port card */ +#define FST_TYPE_T1U 3 /* T1U X21 1 port card */ +#define FST_TYPE_T2U 4 /* T2U X21 2 port card */ +#define FST_TYPE_T4U 5 /* T4U X21 4 port card */ +#define FST_TYPE_TE1 6 /* T1E1 X21 1 port card */ + +/* "family" */ +#define FST_FAMILY_TXP 0 /* T2P or T4P */ +#define FST_FAMILY_TXU 1 /* T1U or T2U or T4U */ + +/* "state" */ +#define FST_UNINIT 0 /* Raw uninitialised state following + * system startup */ +#define FST_RESET 1 /* Processor held in reset state */ +#define FST_DOWNLOAD 2 /* Card being downloaded */ +#define FST_STARTING 3 /* Released following download */ +#define FST_RUNNING 4 /* Processor running */ +#define FST_BADVERSION 5 /* Bad shared memory version detected */ +#define FST_HALTED 6 /* Processor flagged a halt */ +#define FST_IFAILED 7 /* Firmware issued initialisation failed + * interrupt + */ +/* "lineInterface" */ +#define V24 1 +#define X21 2 +#define V35 3 +#define X21D 4 +#define T1 5 +#define E1 6 +#define J1 7 + +/* "proto" */ +#define FST_RAW 4 /* Two way raw packets */ +#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */ + +/* "internalClock" */ +#define INTCLK 1 +#define EXTCLK 0 + +/* "v24IpSts" bitmask */ +#define IPSTS_CTS 0x00000001 /* Clear To Send (Indicate for X.21) */ +#define IPSTS_INDICATE IPSTS_CTS +#define IPSTS_DSR 0x00000002 /* Data Set Ready (T2P Port A) */ +#define IPSTS_DCD 0x00000004 /* Data Carrier Detect */ +#define IPSTS_RI 0x00000008 /* Ring Indicator (T2P Port A) */ +#define IPSTS_TMI 0x00000010 /* Test Mode Indicator (Not Supported)*/ + +/* "v24OpSts" bitmask */ +#define OPSTS_RTS 0x00000001 /* Request To Send (Control for X.21) */ +#define OPSTS_CONTROL OPSTS_RTS +#define OPSTS_DTR 0x00000002 /* Data Terminal Ready */ +#define OPSTS_DSRS 0x00000004 /* Data Signalling Rate Select (Not + * Supported) */ +#define OPSTS_SS 0x00000008 /* Select Standby (Not Supported) */ +#define OPSTS_LL 0x00000010 /* Maintenance Test (Not Supported) */ + +/* "cardMode" bitmask */ +#define CARD_MODE_IDENTIFY 0x0001 + +/* + * Constants for T1/E1 configuration + */ + +/* + * Clock source + */ +#define CLOCKING_SLAVE 0 +#define CLOCKING_MASTER 1 + +/* + * Framing + */ +#define FRAMING_E1 0 +#define FRAMING_J1 1 +#define FRAMING_T1 2 + +/* + * Structure + */ +#define STRUCTURE_UNFRAMED 0 +#define STRUCTURE_E1_DOUBLE 1 +#define STRUCTURE_E1_CRC4 2 +#define STRUCTURE_E1_CRC4M 3 +#define STRUCTURE_T1_4 4 +#define STRUCTURE_T1_12 5 +#define STRUCTURE_T1_24 6 +#define STRUCTURE_T1_72 7 + +/* + * Interface + */ +#define INTERFACE_RJ48C 0 +#define INTERFACE_BNC 1 + +/* + * Coding + */ + +#define CODING_HDB3 0 +#define CODING_NRZ 1 +#define CODING_CMI 2 +#define CODING_CMI_HDB3 3 +#define CODING_CMI_B8ZS 4 +#define CODING_AMI 5 +#define CODING_AMI_ZCS 6 +#define CODING_B8ZS 7 + +/* + * Line Build Out + */ +#define LBO_0dB 0 +#define LBO_7dB5 1 +#define LBO_15dB 2 +#define LBO_22dB5 3 + +/* + * Range for long haul t1 > 655ft + */ +#define RANGE_0_133_FT 0 +#define RANGE_0_40_M RANGE_0_133_FT +#define RANGE_133_266_FT 1 +#define RANGE_40_81_M RANGE_133_266_FT +#define RANGE_266_399_FT 2 +#define RANGE_81_122_M RANGE_266_399_FT +#define RANGE_399_533_FT 3 +#define RANGE_122_162_M RANGE_399_533_FT +#define RANGE_533_655_FT 4 +#define RANGE_162_200_M RANGE_533_655_FT +/* + * Receive Equaliser + */ +#define EQUALIZER_SHORT 0 +#define EQUALIZER_LONG 1 + +/* + * Loop modes + */ +#define LOOP_NONE 0 +#define LOOP_LOCAL 1 +#define LOOP_PAYLOAD_EXC_TS0 2 +#define LOOP_PAYLOAD_INC_TS0 3 +#define LOOP_REMOTE 4 + +/* + * Buffer modes + */ +#define BUFFER_2_FRAME 0 +#define BUFFER_1_FRAME 1 +#define BUFFER_96_BIT 2 +#define BUFFER_NONE 3 + +/* Debug support + * + * These should only be enabled for development kernels, production code + * should define FST_DEBUG=0 in order to exclude the code. + * Setting FST_DEBUG=1 will include all the debug code but in a disabled + * state, use the FSTSETCONF ioctl to enable specific debug actions, or + * FST_DEBUG can be set to prime the debug selection. + */ +#define FST_DEBUG 0x0000 +#if FST_DEBUG + +extern int fst_debug_mask; /* Bit mask of actions to debug, bits + * listed below. Note: Bit 0 is used + * to trigger the inclusion of this + * code, without enabling any actions. + */ +#define DBG_INIT 0x0002 /* Card detection and initialisation */ +#define DBG_OPEN 0x0004 /* Open and close sequences */ +#define DBG_PCI 0x0008 /* PCI config operations */ +#define DBG_IOCTL 0x0010 /* Ioctls and other config */ +#define DBG_INTR 0x0020 /* Interrupt routines (be careful) */ +#define DBG_TX 0x0040 /* Packet transmission */ +#define DBG_RX 0x0080 /* Packet reception */ +#define DBG_CMD 0x0100 /* Port command issuing */ + +#define DBG_ASS 0xFFFF /* Assert like statements. Code that + * should never be reached, if you see + * one of these then I've been an ass + */ +#endif /* FST_DEBUG */ + diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c new file mode 100644 index 000000000..5fec8abe8 --- /dev/null +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -0,0 +1,1310 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Freescale QUICC Engine HDLC Device Driver + * + * Copyright 2016 Freescale Semiconductor Inc. + */ + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/hdlc.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/stddef.h> +#include <soc/fsl/qe/qe_tdm.h> +#include <uapi/linux/if_arp.h> + +#include "fsl_ucc_hdlc.h" + +#define DRV_DESC "Freescale QE UCC HDLC Driver" +#define DRV_NAME "ucc_hdlc" + +#define TDM_PPPOHT_SLIC_MAXIN +#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S) + +static int uhdlc_close(struct net_device *dev); + +static struct ucc_tdm_info utdm_primary_info = { + .uf_info = { + .tsa = 0, + .cdp = 0, + .cds = 1, + .ctsp = 1, + .ctss = 1, + .revd = 0, + .urfs = 256, + .utfs = 256, + .urfet = 128, + .urfset = 192, + .utfet = 128, + .utftt = 0x40, + .ufpt = 256, + .mode = UCC_FAST_PROTOCOL_MODE_HDLC, + .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, + .tenc = UCC_FAST_TX_ENCODING_NRZ, + .renc = UCC_FAST_RX_ENCODING_NRZ, + .tcrc = UCC_FAST_16_BIT_CRC, + .synl = UCC_FAST_SYNC_LEN_NOT_USED, + }, + + .si_info = { +#ifdef TDM_PPPOHT_SLIC_MAXIN + .simr_rfsd = 1, + .simr_tfsd = 2, +#else + .simr_rfsd = 0, + .simr_tfsd = 0, +#endif + .simr_crt = 0, + .simr_sl = 0, + .simr_ce = 1, + .simr_fe = 1, + .simr_gm = 0, + }, +}; + +static struct ucc_tdm_info utdm_info[UCC_MAX_NUM]; + +static int uhdlc_init(struct ucc_hdlc_private *priv) +{ + struct ucc_tdm_info *ut_info; + struct ucc_fast_info *uf_info; + u32 cecr_subblock; + u16 bd_status; + int ret, i; + void *bd_buffer; + dma_addr_t bd_dma_addr; + s32 riptr; + s32 tiptr; + u32 gumr; + + ut_info = priv->ut_info; + uf_info = &ut_info->uf_info; + + if (priv->tsa) { + uf_info->tsa = 1; + uf_info->ctsp = 1; + uf_info->cds = 1; + uf_info->ctss = 1; + } else { + uf_info->cds = 0; + uf_info->ctsp = 0; + uf_info->ctss = 0; + } + + /* This sets HPM register in CMXUCR register which configures a + * open drain connected HDLC bus + */ + if (priv->hdlc_bus) + uf_info->brkpt_support = 1; + + uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF | + UCC_HDLC_UCCE_TXB) << 16); + + ret = ucc_fast_init(uf_info, &priv->uccf); + if (ret) { + dev_err(priv->dev, "Failed to init uccf."); + return ret; + } + + priv->uf_regs = priv->uccf->uf_regs; + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* Loopback mode */ + if (priv->loopback) { + dev_info(priv->dev, "Loopback Mode\n"); + /* use the same clock when work in loopback */ + qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1); + + gumr = ioread32be(&priv->uf_regs->gumr); + gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS | + UCC_FAST_GUMR_TCI); + gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN); + iowrite32be(gumr, &priv->uf_regs->gumr); + } + + /* Initialize SI */ + if (priv->tsa) + ucc_tdm_init(priv->utdm, priv->ut_info); + + /* Write to QE CECR, UCCx channel to Stop Transmission */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, 0); + + /* Set UPSMR normal mode (need fixed)*/ + iowrite32be(0, &priv->uf_regs->upsmr); + + /* hdlc_bus mode */ + if (priv->hdlc_bus) { + u32 upsmr; + + dev_info(priv->dev, "HDLC bus Mode\n"); + upsmr = ioread32be(&priv->uf_regs->upsmr); + + /* bus mode and retransmit enable, with collision window + * set to 8 bytes + */ + upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS | + UCC_HDLC_UPSMR_CW8; + iowrite32be(upsmr, &priv->uf_regs->upsmr); + + /* explicitly disable CDS & CTSP */ + gumr = ioread32be(&priv->uf_regs->gumr); + gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP); + /* set automatic sync to explicitly ignore CD signal */ + gumr |= UCC_FAST_GUMR_SYNL_AUTO; + iowrite32be(gumr, &priv->uf_regs->gumr); + } + + priv->rx_ring_size = RX_BD_RING_LEN; + priv->tx_ring_size = TX_BD_RING_LEN; + /* Alloc Rx BD */ + priv->rx_bd_base = dma_alloc_coherent(priv->dev, + RX_BD_RING_LEN * sizeof(struct qe_bd), + &priv->dma_rx_bd, GFP_KERNEL); + + if (!priv->rx_bd_base) { + dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n"); + ret = -ENOMEM; + goto free_uccf; + } + + /* Alloc Tx BD */ + priv->tx_bd_base = dma_alloc_coherent(priv->dev, + TX_BD_RING_LEN * sizeof(struct qe_bd), + &priv->dma_tx_bd, GFP_KERNEL); + + if (!priv->tx_bd_base) { + dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n"); + ret = -ENOMEM; + goto free_rx_bd; + } + + /* Alloc parameter ram for ucc hdlc */ + priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param), + ALIGNMENT_OF_UCC_HDLC_PRAM); + + if (priv->ucc_pram_offset < 0) { + dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n"); + ret = -ENOMEM; + goto free_tx_bd; + } + + priv->rx_skbuff = kcalloc(priv->rx_ring_size, + sizeof(*priv->rx_skbuff), + GFP_KERNEL); + if (!priv->rx_skbuff) { + ret = -ENOMEM; + goto free_ucc_pram; + } + + priv->tx_skbuff = kcalloc(priv->tx_ring_size, + sizeof(*priv->tx_skbuff), + GFP_KERNEL); + if (!priv->tx_skbuff) { + ret = -ENOMEM; + goto free_rx_skbuff; + } + + priv->skb_curtx = 0; + priv->skb_dirtytx = 0; + priv->curtx_bd = priv->tx_bd_base; + priv->dirty_tx = priv->tx_bd_base; + priv->currx_bd = priv->rx_bd_base; + priv->currx_bdnum = 0; + + /* init parameter base */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); + + priv->ucc_pram = (struct ucc_hdlc_param __iomem *) + qe_muram_addr(priv->ucc_pram_offset); + + /* Zero out parameter ram */ + memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param)); + + /* Alloc riptr, tiptr */ + riptr = qe_muram_alloc(32, 32); + if (riptr < 0) { + dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); + ret = -ENOMEM; + goto free_tx_skbuff; + } + + tiptr = qe_muram_alloc(32, 32); + if (tiptr < 0) { + dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n"); + ret = -ENOMEM; + goto free_riptr; + } + if (riptr != (u16)riptr || tiptr != (u16)tiptr) { + dev_err(priv->dev, "MURAM allocation out of addressable range\n"); + ret = -ENOMEM; + goto free_tiptr; + } + + /* Set RIPTR, TIPTR */ + iowrite16be(riptr, &priv->ucc_pram->riptr); + iowrite16be(tiptr, &priv->ucc_pram->tiptr); + + /* Set MRBLR */ + iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr); + + /* Set RBASE, TBASE */ + iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase); + iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase); + + /* Set RSTATE, TSTATE */ + iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate); + iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate); + + /* Set C_MASK, C_PRES for 16bit CRC */ + iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask); + iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres); + + iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr); + iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr); + iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt); + iowrite16be(priv->hmask, &priv->ucc_pram->hmask); + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1); + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2); + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3); + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); + + /* Get BD buffer */ + bd_buffer = dma_alloc_coherent(priv->dev, + (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH, + &bd_dma_addr, GFP_KERNEL); + + if (!bd_buffer) { + dev_err(priv->dev, "Could not allocate buffer descriptors\n"); + ret = -ENOMEM; + goto free_tiptr; + } + + priv->rx_buffer = bd_buffer; + priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; + + priv->dma_rx_addr = bd_dma_addr; + priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; + + for (i = 0; i < RX_BD_RING_LEN; i++) { + if (i < (RX_BD_RING_LEN - 1)) + bd_status = R_E_S | R_I_S; + else + bd_status = R_E_S | R_I_S | R_W_S; + + priv->rx_bd_base[i].status = cpu_to_be16(bd_status); + priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH); + } + + for (i = 0; i < TX_BD_RING_LEN; i++) { + if (i < (TX_BD_RING_LEN - 1)) + bd_status = T_I_S | T_TC_S; + else + bd_status = T_I_S | T_TC_S | T_W_S; + + priv->tx_bd_base[i].status = cpu_to_be16(bd_status); + priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH); + } + dma_wmb(); + + return 0; + +free_tiptr: + qe_muram_free(tiptr); +free_riptr: + qe_muram_free(riptr); +free_tx_skbuff: + kfree(priv->tx_skbuff); +free_rx_skbuff: + kfree(priv->rx_skbuff); +free_ucc_pram: + qe_muram_free(priv->ucc_pram_offset); +free_tx_bd: + dma_free_coherent(priv->dev, + TX_BD_RING_LEN * sizeof(struct qe_bd), + priv->tx_bd_base, priv->dma_tx_bd); +free_rx_bd: + dma_free_coherent(priv->dev, + RX_BD_RING_LEN * sizeof(struct qe_bd), + priv->rx_bd_base, priv->dma_rx_bd); +free_uccf: + ucc_fast_free(priv->uccf); + + return ret; +} + +static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv; + struct qe_bd *bd; + u16 bd_status; + unsigned long flags; + __be16 *proto_head; + + switch (dev->type) { + case ARPHRD_RAWHDLC: + if (skb_headroom(skb) < HDLC_HEAD_LEN) { + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + netdev_err(dev, "No enough space for hdlc head\n"); + return -ENOMEM; + } + + skb_push(skb, HDLC_HEAD_LEN); + + proto_head = (__be16 *)skb->data; + *proto_head = htons(DEFAULT_HDLC_HEAD); + + dev->stats.tx_bytes += skb->len; + break; + + case ARPHRD_PPP: + proto_head = (__be16 *)skb->data; + if (*proto_head != htons(DEFAULT_PPP_HEAD)) { + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + netdev_err(dev, "Wrong ppp header\n"); + return -ENOMEM; + } + + dev->stats.tx_bytes += skb->len; + break; + + case ARPHRD_ETHER: + dev->stats.tx_bytes += skb->len; + break; + + default: + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return -ENOMEM; + } + netdev_sent_queue(dev, skb->len); + spin_lock_irqsave(&priv->lock, flags); + + dma_rmb(); + /* Start from the next BD that should be filled */ + bd = priv->curtx_bd; + bd_status = be16_to_cpu(bd->status); + /* Save the skb pointer so we can free it later */ + priv->tx_skbuff[priv->skb_curtx] = skb; + + /* Update the current skb pointer (wrapping if this was the last) */ + priv->skb_curtx = + (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); + + /* copy skb data to tx buffer for sdma processing */ + memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), + skb->data, skb->len); + + /* set bd status and length */ + bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; + + bd->length = cpu_to_be16(skb->len); + bd->status = cpu_to_be16(bd_status); + + /* Move to next BD in the ring */ + if (!(bd_status & T_W_S)) + bd += 1; + else + bd = priv->tx_bd_base; + + if (bd == priv->dirty_tx) { + if (!netif_queue_stopped(dev)) + netif_stop_queue(dev); + } + + priv->curtx_bd = bd; + + spin_unlock_irqrestore(&priv->lock, flags); + + return NETDEV_TX_OK; +} + +static int hdlc_tx_restart(struct ucc_hdlc_private *priv) +{ + u32 cecr_subblock; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num); + + qe_issue_cmd(QE_RESTART_TX, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, 0); + return 0; +} + +static int hdlc_tx_done(struct ucc_hdlc_private *priv) +{ + /* Start from the next BD that should be filled */ + struct net_device *dev = priv->ndev; + unsigned int bytes_sent = 0; + int howmany = 0; + struct qe_bd *bd; /* BD pointer */ + u16 bd_status; + int tx_restart = 0; + + dma_rmb(); + bd = priv->dirty_tx; + bd_status = be16_to_cpu(bd->status); + + /* Normal processing. */ + while ((bd_status & T_R_S) == 0) { + struct sk_buff *skb; + + if (bd_status & T_UN_S) { /* Underrun */ + dev->stats.tx_fifo_errors++; + tx_restart = 1; + } + if (bd_status & T_CT_S) { /* Carrier lost */ + dev->stats.tx_carrier_errors++; + tx_restart = 1; + } + + /* BD contains already transmitted buffer. */ + /* Handle the transmitted buffer and release */ + /* the BD to be used with the current frame */ + + skb = priv->tx_skbuff[priv->skb_dirtytx]; + if (!skb) + break; + howmany++; + bytes_sent += skb->len; + dev->stats.tx_packets++; + memset(priv->tx_buffer + + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), + 0, skb->len); + dev_consume_skb_irq(skb); + + priv->tx_skbuff[priv->skb_dirtytx] = NULL; + priv->skb_dirtytx = + (priv->skb_dirtytx + + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); + + /* We freed a buffer, so now we can restart transmission */ + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + /* Advance the confirmation BD pointer */ + if (!(bd_status & T_W_S)) + bd += 1; + else + bd = priv->tx_bd_base; + bd_status = be16_to_cpu(bd->status); + } + priv->dirty_tx = bd; + + if (tx_restart) + hdlc_tx_restart(priv); + + netdev_completed_queue(dev, howmany, bytes_sent); + return 0; +} + +static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) +{ + struct net_device *dev = priv->ndev; + struct sk_buff *skb = NULL; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct qe_bd *bd; + u16 bd_status; + u16 length, howmany = 0; + u8 *bdbuffer; + + dma_rmb(); + bd = priv->currx_bd; + bd_status = be16_to_cpu(bd->status); + + /* while there are received buffers and BD is full (~R_E) */ + while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) { + if (bd_status & (RX_BD_ERRORS)) { + dev->stats.rx_errors++; + + if (bd_status & R_CD_S) + dev->stats.collisions++; + if (bd_status & R_OV_S) + dev->stats.rx_fifo_errors++; + if (bd_status & R_CR_S) + dev->stats.rx_crc_errors++; + if (bd_status & R_AB_S) + dev->stats.rx_over_errors++; + if (bd_status & R_NO_S) + dev->stats.rx_frame_errors++; + if (bd_status & R_LG_S) + dev->stats.rx_length_errors++; + + goto recycle; + } + bdbuffer = priv->rx_buffer + + (priv->currx_bdnum * MAX_RX_BUF_LENGTH); + length = be16_to_cpu(bd->length); + + switch (dev->type) { + case ARPHRD_RAWHDLC: + bdbuffer += HDLC_HEAD_LEN; + length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE); + + skb = dev_alloc_skb(length); + if (!skb) { + dev->stats.rx_dropped++; + return -ENOMEM; + } + + skb_put(skb, length); + skb->len = length; + skb->dev = dev; + memcpy(skb->data, bdbuffer, length); + break; + + case ARPHRD_PPP: + case ARPHRD_ETHER: + length -= HDLC_CRC_SIZE; + + skb = dev_alloc_skb(length); + if (!skb) { + dev->stats.rx_dropped++; + return -ENOMEM; + } + + skb_put(skb, length); + skb->len = length; + skb->dev = dev; + memcpy(skb->data, bdbuffer, length); + break; + } + + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + howmany++; + if (hdlc->proto) + skb->protocol = hdlc_type_trans(skb, dev); + netif_receive_skb(skb); + +recycle: + bd->status = cpu_to_be16((bd_status & R_W_S) | R_E_S | R_I_S); + + /* update to point at the next bd */ + if (bd_status & R_W_S) { + priv->currx_bdnum = 0; + bd = priv->rx_bd_base; + } else { + if (priv->currx_bdnum < (RX_BD_RING_LEN - 1)) + priv->currx_bdnum += 1; + else + priv->currx_bdnum = RX_BD_RING_LEN - 1; + + bd += 1; + } + + bd_status = be16_to_cpu(bd->status); + } + dma_rmb(); + + priv->currx_bd = bd; + return howmany; +} + +static int ucc_hdlc_poll(struct napi_struct *napi, int budget) +{ + struct ucc_hdlc_private *priv = container_of(napi, + struct ucc_hdlc_private, + napi); + int howmany; + + /* Tx event processing */ + spin_lock(&priv->lock); + hdlc_tx_done(priv); + spin_unlock(&priv->lock); + + howmany = 0; + howmany += hdlc_rx_done(priv, budget - howmany); + + if (howmany < budget) { + napi_complete_done(napi, howmany); + qe_setbits_be32(priv->uccf->p_uccm, + (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); + } + + return howmany; +} + +static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id) +{ + struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id; + struct net_device *dev = priv->ndev; + struct ucc_fast_private *uccf; + u32 ucce; + u32 uccm; + + uccf = priv->uccf; + + ucce = ioread32be(uccf->p_ucce); + uccm = ioread32be(uccf->p_uccm); + ucce &= uccm; + iowrite32be(ucce, uccf->p_ucce); + if (!ucce) + return IRQ_NONE; + + if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) { + if (napi_schedule_prep(&priv->napi)) { + uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) + << 16); + iowrite32be(uccm, uccf->p_uccm); + __napi_schedule(&priv->napi); + } + } + + /* Errors and other events */ + if (ucce >> 16 & UCC_HDLC_UCCE_BSY) + dev->stats.rx_missed_errors++; + if (ucce >> 16 & UCC_HDLC_UCCE_TXE) + dev->stats.tx_errors++; + + return IRQ_HANDLED; +} + +static int uhdlc_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(te1_settings); + te1_settings line; + struct ucc_hdlc_private *priv = netdev_priv(dev); + + switch (ifs->type) { + case IF_GET_IFACE: + ifs->type = IF_IFACE_E1; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + memset(&line, 0, sizeof(line)); + line.clock_type = priv->clocking; + + if (copy_to_user(ifs->ifs_ifsu.sync, &line, size)) + return -EFAULT; + return 0; + + default: + return hdlc_ioctl(dev, ifs); + } +} + +static int uhdlc_open(struct net_device *dev) +{ + u32 cecr_subblock; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct ucc_hdlc_private *priv = hdlc->priv; + struct ucc_tdm *utdm = priv->utdm; + int rc = 0; + + if (priv->hdlc_busy != 1) { + if (request_irq(priv->ut_info->uf_info.irq, + ucc_hdlc_irq_handler, 0, "hdlc", priv)) + return -ENODEV; + + cecr_subblock = ucc_fast_get_qe_cr_subblock( + priv->ut_info->uf_info.ucc_num); + + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, 0); + + ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* Enable the TDM port */ + if (priv->tsa) + qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port); + + priv->hdlc_busy = 1; + netif_device_attach(priv->ndev); + napi_enable(&priv->napi); + netdev_reset_queue(dev); + netif_start_queue(dev); + + rc = hdlc_open(dev); + if (rc) + uhdlc_close(dev); + } + + return rc; +} + +static void uhdlc_memclean(struct ucc_hdlc_private *priv) +{ + qe_muram_free(ioread16be(&priv->ucc_pram->riptr)); + qe_muram_free(ioread16be(&priv->ucc_pram->tiptr)); + + if (priv->rx_bd_base) { + dma_free_coherent(priv->dev, + RX_BD_RING_LEN * sizeof(struct qe_bd), + priv->rx_bd_base, priv->dma_rx_bd); + + priv->rx_bd_base = NULL; + priv->dma_rx_bd = 0; + } + + if (priv->tx_bd_base) { + dma_free_coherent(priv->dev, + TX_BD_RING_LEN * sizeof(struct qe_bd), + priv->tx_bd_base, priv->dma_tx_bd); + + priv->tx_bd_base = NULL; + priv->dma_tx_bd = 0; + } + + if (priv->ucc_pram) { + qe_muram_free(priv->ucc_pram_offset); + priv->ucc_pram = NULL; + priv->ucc_pram_offset = 0; + } + + kfree(priv->rx_skbuff); + priv->rx_skbuff = NULL; + + kfree(priv->tx_skbuff); + priv->tx_skbuff = NULL; + + if (priv->uf_regs) { + iounmap(priv->uf_regs); + priv->uf_regs = NULL; + } + + if (priv->uccf) { + ucc_fast_free(priv->uccf); + priv->uccf = NULL; + } + + if (priv->rx_buffer) { + dma_free_coherent(priv->dev, + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH, + priv->rx_buffer, priv->dma_rx_addr); + priv->rx_buffer = NULL; + priv->dma_rx_addr = 0; + } + + if (priv->tx_buffer) { + dma_free_coherent(priv->dev, + TX_BD_RING_LEN * MAX_RX_BUF_LENGTH, + priv->tx_buffer, priv->dma_tx_addr); + priv->tx_buffer = NULL; + priv->dma_tx_addr = 0; + } +} + +static int uhdlc_close(struct net_device *dev) +{ + struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; + struct ucc_tdm *utdm = priv->utdm; + u32 cecr_subblock; + + napi_disable(&priv->napi); + cecr_subblock = ucc_fast_get_qe_cr_subblock( + priv->ut_info->uf_info.ucc_num); + + qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + + if (priv->tsa) + qe_clrbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port); + + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + free_irq(priv->ut_info->uf_info.irq, priv); + netif_stop_queue(dev); + netdev_reset_queue(dev); + priv->hdlc_busy = 0; + + hdlc_close(dev); + + return 0; +} + +static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; + + if (encoding != ENCODING_NRZ && + encoding != ENCODING_NRZI) + return -EINVAL; + + if (parity != PARITY_NONE && + parity != PARITY_CRC32_PR1_CCITT && + parity != PARITY_CRC16_PR0_CCITT && + parity != PARITY_CRC16_PR1_CCITT) + return -EINVAL; + + priv->encoding = encoding; + priv->parity = parity; + + return 0; +} + +#ifdef CONFIG_PM +static void store_clk_config(struct ucc_hdlc_private *priv) +{ + struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx; + + /* store si clk */ + priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h); + priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l); + + /* store si sync */ + priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr); + + /* store ucc clk */ + memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32)); +} + +static void resume_clk_config(struct ucc_hdlc_private *priv) +{ + struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx; + + memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32)); + + iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h); + iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l); + + iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr); +} + +static int uhdlc_suspend(struct device *dev) +{ + struct ucc_hdlc_private *priv = dev_get_drvdata(dev); + struct ucc_fast __iomem *uf_regs; + + if (!priv) + return -EINVAL; + + if (!netif_running(priv->ndev)) + return 0; + + netif_device_detach(priv->ndev); + napi_disable(&priv->napi); + + uf_regs = priv->uf_regs; + + /* backup gumr guemr*/ + priv->gumr = ioread32be(&uf_regs->gumr); + priv->guemr = ioread8(&uf_regs->guemr); + + priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak), + GFP_KERNEL); + if (!priv->ucc_pram_bak) + return -ENOMEM; + + /* backup HDLC parameter */ + memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram, + sizeof(struct ucc_hdlc_param)); + + /* store the clk configuration */ + store_clk_config(priv); + + /* save power */ + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + return 0; +} + +static int uhdlc_resume(struct device *dev) +{ + struct ucc_hdlc_private *priv = dev_get_drvdata(dev); + struct ucc_tdm *utdm; + struct ucc_tdm_info *ut_info; + struct ucc_fast __iomem *uf_regs; + struct ucc_fast_private *uccf; + struct ucc_fast_info *uf_info; + int i; + u32 cecr_subblock; + u16 bd_status; + + if (!priv) + return -EINVAL; + + if (!netif_running(priv->ndev)) + return 0; + + utdm = priv->utdm; + ut_info = priv->ut_info; + uf_info = &ut_info->uf_info; + uf_regs = priv->uf_regs; + uccf = priv->uccf; + + /* restore gumr guemr */ + iowrite8(priv->guemr, &uf_regs->guemr); + iowrite32be(priv->gumr, &uf_regs->gumr); + + /* Set Virtual Fifo registers */ + iowrite16be(uf_info->urfs, &uf_regs->urfs); + iowrite16be(uf_info->urfet, &uf_regs->urfet); + iowrite16be(uf_info->urfset, &uf_regs->urfset); + iowrite16be(uf_info->utfs, &uf_regs->utfs); + iowrite16be(uf_info->utfet, &uf_regs->utfet); + iowrite16be(uf_info->utftt, &uf_regs->utftt); + /* utfb, urfb are offsets from MURAM base */ + iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb); + iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb); + + /* Rx Tx and sync clock routing */ + resume_clk_config(priv); + + iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); + iowrite32be(0xffffffff, &uf_regs->ucce); + + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* rebuild SIRAM */ + if (priv->tsa) + ucc_tdm_init(priv->utdm, priv->ut_info); + + /* Write to QE CECR, UCCx channel to Stop Transmission */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + qe_issue_cmd(QE_STOP_TX, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + + /* Set UPSMR normal mode */ + iowrite32be(0, &uf_regs->upsmr); + + /* init parameter base */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); + + priv->ucc_pram = (struct ucc_hdlc_param __iomem *) + qe_muram_addr(priv->ucc_pram_offset); + + /* restore ucc parameter */ + memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak, + sizeof(struct ucc_hdlc_param)); + kfree(priv->ucc_pram_bak); + + /* rebuild BD entry */ + for (i = 0; i < RX_BD_RING_LEN; i++) { + if (i < (RX_BD_RING_LEN - 1)) + bd_status = R_E_S | R_I_S; + else + bd_status = R_E_S | R_I_S | R_W_S; + + priv->rx_bd_base[i].status = cpu_to_be16(bd_status); + priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH); + } + + for (i = 0; i < TX_BD_RING_LEN; i++) { + if (i < (TX_BD_RING_LEN - 1)) + bd_status = T_I_S | T_TC_S; + else + bd_status = T_I_S | T_TC_S | T_W_S; + + priv->tx_bd_base[i].status = cpu_to_be16(bd_status); + priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH); + } + dma_wmb(); + + /* if hdlc is busy enable TX and RX */ + if (priv->hdlc_busy == 1) { + cecr_subblock = ucc_fast_get_qe_cr_subblock( + priv->ut_info->uf_info.ucc_num); + + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + + ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* Enable the TDM port */ + if (priv->tsa) + qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port); + } + + napi_enable(&priv->napi); + netif_device_attach(priv->ndev); + + return 0; +} + +static const struct dev_pm_ops uhdlc_pm_ops = { + .suspend = uhdlc_suspend, + .resume = uhdlc_resume, + .freeze = uhdlc_suspend, + .thaw = uhdlc_resume, +}; + +#define HDLC_PM_OPS (&uhdlc_pm_ops) + +#else + +#define HDLC_PM_OPS NULL + +#endif +static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue) +{ + netdev_err(ndev, "%s\n", __func__); +} + +static const struct net_device_ops uhdlc_ops = { + .ndo_open = uhdlc_open, + .ndo_stop = uhdlc_close, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_siocwandev = uhdlc_ioctl, + .ndo_tx_timeout = uhdlc_tx_timeout, +}; + +static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr) +{ + struct device_node *np; + struct platform_device *pdev; + struct resource *res; + static int siram_init_flag; + int ret = 0; + + np = of_find_compatible_node(NULL, NULL, name); + if (!np) + return -EINVAL; + + pdev = of_find_device_by_node(np); + if (!pdev) { + pr_err("%pOFn: failed to lookup pdev\n", np); + of_node_put(np); + return -EINVAL; + } + + of_node_put(np); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -EINVAL; + goto error_put_device; + } + *ptr = ioremap(res->start, resource_size(res)); + if (!*ptr) { + ret = -ENOMEM; + goto error_put_device; + } + + /* We've remapped the addresses, and we don't need the device any + * more, so we should release it. + */ + put_device(&pdev->dev); + + if (init_flag && siram_init_flag == 0) { + memset_io(*ptr, 0, resource_size(res)); + siram_init_flag = 1; + } + return 0; + +error_put_device: + put_device(&pdev->dev); + + return ret; +} + +static int ucc_hdlc_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct ucc_hdlc_private *uhdlc_priv = NULL; + struct ucc_tdm_info *ut_info; + struct ucc_tdm *utdm = NULL; + struct resource res; + struct net_device *dev; + hdlc_device *hdlc; + int ucc_num; + const char *sprop; + int ret; + u32 val; + + ret = of_property_read_u32_index(np, "cell-index", 0, &val); + if (ret) { + dev_err(&pdev->dev, "Invalid ucc property\n"); + return -ENODEV; + } + + ucc_num = val - 1; + if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) { + dev_err(&pdev->dev, ": Invalid UCC num\n"); + return -EINVAL; + } + + memcpy(&utdm_info[ucc_num], &utdm_primary_info, + sizeof(utdm_primary_info)); + + ut_info = &utdm_info[ucc_num]; + ut_info->uf_info.ucc_num = ucc_num; + + sprop = of_get_property(np, "rx-clock-name", NULL); + if (sprop) { + ut_info->uf_info.rx_clock = qe_clock_source(sprop); + if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) || + (ut_info->uf_info.rx_clock > QE_CLK24)) { + dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); + return -EINVAL; + } + } else { + dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); + return -EINVAL; + } + + sprop = of_get_property(np, "tx-clock-name", NULL); + if (sprop) { + ut_info->uf_info.tx_clock = qe_clock_source(sprop); + if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) || + (ut_info->uf_info.tx_clock > QE_CLK24)) { + dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); + return -EINVAL; + } + } else { + dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); + return -EINVAL; + } + + ret = of_address_to_resource(np, 0, &res); + if (ret) + return -EINVAL; + + ut_info->uf_info.regs = res.start; + ut_info->uf_info.irq = irq_of_parse_and_map(np, 0); + + uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL); + if (!uhdlc_priv) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, uhdlc_priv); + uhdlc_priv->dev = &pdev->dev; + uhdlc_priv->ut_info = ut_info; + + if (of_get_property(np, "fsl,tdm-interface", NULL)) + uhdlc_priv->tsa = 1; + + if (of_get_property(np, "fsl,ucc-internal-loopback", NULL)) + uhdlc_priv->loopback = 1; + + if (of_get_property(np, "fsl,hdlc-bus", NULL)) + uhdlc_priv->hdlc_bus = 1; + + if (uhdlc_priv->tsa == 1) { + utdm = kzalloc(sizeof(*utdm), GFP_KERNEL); + if (!utdm) { + ret = -ENOMEM; + dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n"); + goto free_uhdlc_priv; + } + uhdlc_priv->utdm = utdm; + ret = ucc_of_parse_tdm(np, utdm, ut_info); + if (ret) + goto free_utdm; + + ret = hdlc_map_iomem("fsl,t1040-qe-si", 0, + (void __iomem **)&utdm->si_regs); + if (ret) + goto free_utdm; + ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1, + (void __iomem **)&utdm->siram); + if (ret) + goto unmap_si_regs; + } + + if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) + uhdlc_priv->hmask = DEFAULT_ADDR_MASK; + + ret = uhdlc_init(uhdlc_priv); + if (ret) { + dev_err(&pdev->dev, "Failed to init uhdlc\n"); + goto undo_uhdlc_init; + } + + dev = alloc_hdlcdev(uhdlc_priv); + if (!dev) { + ret = -ENOMEM; + pr_err("ucc_hdlc: unable to allocate memory\n"); + goto undo_uhdlc_init; + } + + uhdlc_priv->ndev = dev; + hdlc = dev_to_hdlc(dev); + dev->tx_queue_len = 16; + dev->netdev_ops = &uhdlc_ops; + dev->watchdog_timeo = 2 * HZ; + hdlc->attach = ucc_hdlc_attach; + hdlc->xmit = ucc_hdlc_tx; + netif_napi_add_weight(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32); + if (register_hdlc_device(dev)) { + ret = -ENOBUFS; + pr_err("ucc_hdlc: unable to register hdlc device\n"); + goto free_dev; + } + + return 0; + +free_dev: + free_netdev(dev); +undo_uhdlc_init: + if (utdm) + iounmap(utdm->siram); +unmap_si_regs: + if (utdm) + iounmap(utdm->si_regs); +free_utdm: + if (uhdlc_priv->tsa) + kfree(utdm); +free_uhdlc_priv: + kfree(uhdlc_priv); + return ret; +} + +static int ucc_hdlc_remove(struct platform_device *pdev) +{ + struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev); + + uhdlc_memclean(priv); + + if (priv->utdm->si_regs) { + iounmap(priv->utdm->si_regs); + priv->utdm->si_regs = NULL; + } + + if (priv->utdm->siram) { + iounmap(priv->utdm->siram); + priv->utdm->siram = NULL; + } + kfree(priv); + + dev_info(&pdev->dev, "UCC based hdlc module removed\n"); + + return 0; +} + +static const struct of_device_id fsl_ucc_hdlc_of_match[] = { + { + .compatible = "fsl,ucc-hdlc", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match); + +static struct platform_driver ucc_hdlc_driver = { + .probe = ucc_hdlc_probe, + .remove = ucc_hdlc_remove, + .driver = { + .name = DRV_NAME, + .pm = HDLC_PM_OPS, + .of_match_table = fsl_ucc_hdlc_of_match, + }, +}; + +module_platform_driver(ucc_hdlc_driver); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(DRV_DESC); diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h new file mode 100644 index 000000000..71d5ad0a7 --- /dev/null +++ b/drivers/net/wan/fsl_ucc_hdlc.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Freescale QUICC Engine HDLC Device Driver + * + * Copyright 2014 Freescale Semiconductor Inc. + */ + +#ifndef _UCC_HDLC_H_ +#define _UCC_HDLC_H_ + +#include <linux/kernel.h> +#include <linux/list.h> + +#include <soc/fsl/qe/immap_qe.h> +#include <soc/fsl/qe/qe.h> + +#include <soc/fsl/qe/ucc.h> +#include <soc/fsl/qe/ucc_fast.h> + +/* UCC HDLC event register */ +#define UCCE_HDLC_RX_EVENTS \ +(UCC_HDLC_UCCE_RXF | UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_BSY) +#define UCCE_HDLC_TX_EVENTS (UCC_HDLC_UCCE_TXB | UCC_HDLC_UCCE_TXE) + +struct ucc_hdlc_param { + __be16 riptr; + __be16 tiptr; + __be16 res0; + __be16 mrblr; + __be32 rstate; + __be32 rbase; + __be16 rbdstat; + __be16 rbdlen; + __be32 rdptr; + __be32 tstate; + __be32 tbase; + __be16 tbdstat; + __be16 tbdlen; + __be32 tdptr; + __be32 rbptr; + __be32 tbptr; + __be32 rcrc; + __be32 res1; + __be32 tcrc; + __be32 res2; + __be32 res3; + __be32 c_mask; + __be32 c_pres; + __be16 disfc; + __be16 crcec; + __be16 abtsc; + __be16 nmarc; + __be32 max_cnt; + __be16 mflr; + __be16 rfthr; + __be16 rfcnt; + __be16 hmask; + __be16 haddr1; + __be16 haddr2; + __be16 haddr3; + __be16 haddr4; + __be16 ts_tmp; + __be16 tmp_mb; +}; + +struct ucc_hdlc_private { + struct ucc_tdm *utdm; + struct ucc_tdm_info *ut_info; + struct ucc_fast_private *uccf; + struct device *dev; + struct net_device *ndev; + struct napi_struct napi; + struct ucc_fast __iomem *uf_regs; /* UCC Fast registers */ + struct ucc_hdlc_param __iomem *ucc_pram; + u16 tsa; + bool hdlc_busy; + bool loopback; + bool hdlc_bus; + + u8 *tx_buffer; + u8 *rx_buffer; + dma_addr_t dma_tx_addr; + dma_addr_t dma_rx_addr; + + struct qe_bd *tx_bd_base; + struct qe_bd *rx_bd_base; + dma_addr_t dma_tx_bd; + dma_addr_t dma_rx_bd; + struct qe_bd *curtx_bd; + struct qe_bd *currx_bd; + struct qe_bd *dirty_tx; + u16 currx_bdnum; + + struct sk_buff **tx_skbuff; + struct sk_buff **rx_skbuff; + u16 skb_curtx; + u16 skb_currx; + unsigned short skb_dirtytx; + + unsigned short tx_ring_size; + unsigned short rx_ring_size; + s32 ucc_pram_offset; + + unsigned short encoding; + unsigned short parity; + unsigned short hmask; + u32 clocking; + spinlock_t lock; /* lock for Tx BD and Tx buffer */ +#ifdef CONFIG_PM + struct ucc_hdlc_param *ucc_pram_bak; + u32 gumr; + u8 guemr; + u32 cmxsi1cr_l, cmxsi1cr_h; + u32 cmxsi1syr; + u32 cmxucr[4]; +#endif +}; + +#define TX_BD_RING_LEN 0x10 +#define RX_BD_RING_LEN 0x20 +#define RX_CLEAN_MAX 0x10 +#define NUM_OF_BUF 4 +#define MAX_RX_BUF_LENGTH (48 * 0x20) +#define MAX_FRAME_LENGTH (MAX_RX_BUF_LENGTH + 8) +#define ALIGNMENT_OF_UCC_HDLC_PRAM 64 +#define SI_BANK_SIZE 128 +#define MAX_HDLC_NUM 4 +#define HDLC_HEAD_LEN 2 +#define HDLC_CRC_SIZE 2 +#define TX_RING_MOD_MASK(size) (size - 1) +#define RX_RING_MOD_MASK(size) (size - 1) + +#define HDLC_HEAD_MASK 0x0000 +#define DEFAULT_HDLC_HEAD 0xff44 +#define DEFAULT_ADDR_MASK 0x00ff +#define DEFAULT_HDLC_ADDR 0x00ff + +#define BMR_GBL 0x20000000 +#define BMR_BIG_ENDIAN 0x10000000 +#define CRC_16BIT_MASK 0x0000F0B8 +#define CRC_16BIT_PRES 0x0000FFFF +#define DEFAULT_RFTHR 1 + +#define DEFAULT_PPP_HEAD 0xff03 + +#endif diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c new file mode 100644 index 000000000..0d19e39fe --- /dev/null +++ b/drivers/net/wan/hd64570.c @@ -0,0 +1,724 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hitachi SCA HD64570 driver for Linux + * + * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl> + * + * Source of information: Hitachi HD64570 SCA User's Manual + * + * We use the following SCA memory map: + * + * Packet buffer descriptor rings - starting from winbase or win0base: + * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring + * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring + * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used) + * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used) + * + * Packet data buffers - starting from winbase + buff_offset: + * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers + * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers + * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used) + * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used) + */ + +#include <linux/bitops.h> +#include <linux/errno.h> +#include <linux/fcntl.h> +#include <linux/hdlc.h> +#include <linux/in.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/string.h> +#include <linux/types.h> +#include <asm/io.h> +#include <linux/uaccess.h> +#include "hd64570.h" + +#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) +#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET) +#define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET) + +#define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01) +#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02) +#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04) + +static inline struct net_device *port_to_dev(port_t *port) +{ + return port->dev; +} + +static inline int sca_intr_status(card_t *card) +{ + u8 result = 0; + u8 isr0 = sca_in(ISR0, card); + u8 isr1 = sca_in(ISR1, card); + + if (isr1 & 0x03) + result |= SCA_INTR_DMAC_RX(0); + if (isr1 & 0x0C) + result |= SCA_INTR_DMAC_TX(0); + if (isr1 & 0x30) + result |= SCA_INTR_DMAC_RX(1); + if (isr1 & 0xC0) + result |= SCA_INTR_DMAC_TX(1); + if (isr0 & 0x0F) + result |= SCA_INTR_MSCI(0); + if (isr0 & 0xF0) + result |= SCA_INTR_MSCI(1); + + if (!(result & SCA_INTR_DMAC_TX(0))) + if (sca_in(DSR_TX(0), card) & DSR_EOM) + result |= SCA_INTR_DMAC_TX(0); + if (!(result & SCA_INTR_DMAC_TX(1))) + if (sca_in(DSR_TX(1), card) & DSR_EOM) + result |= SCA_INTR_DMAC_TX(1); + + return result; +} + +static inline port_t *dev_to_port(struct net_device *dev) +{ + return dev_to_hdlc(dev)->priv; +} + +static inline u16 next_desc(port_t *port, u16 desc, int transmit) +{ + return (desc + 1) % (transmit ? port_to_card(port)->tx_ring_buffers + : port_to_card(port)->rx_ring_buffers); +} + +static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit) +{ + u16 rx_buffs = port_to_card(port)->rx_ring_buffers; + u16 tx_buffs = port_to_card(port)->tx_ring_buffers; + + desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc. + return log_node(port) * (rx_buffs + tx_buffs) + + transmit * rx_buffs + desc; +} + +static inline u16 desc_offset(port_t *port, u16 desc, int transmit) +{ + /* Descriptor offset always fits in 16 bits */ + return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc); +} + +static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc, + int transmit) +{ +#ifdef PAGE0_ALWAYS_MAPPED + return (pkt_desc __iomem *)(win0base(port_to_card(port)) + + desc_offset(port, desc, transmit)); +#else + return (pkt_desc __iomem *)(winbase(port_to_card(port)) + + desc_offset(port, desc, transmit)); +#endif +} + +static inline u32 buffer_offset(port_t *port, u16 desc, int transmit) +{ + return port_to_card(port)->buff_offset + + desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU; +} + +static inline void sca_set_carrier(port_t *port) +{ + if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) { +#ifdef DEBUG_LINK + printk(KERN_DEBUG "%s: sca_set_carrier on\n", + port_to_dev(port)->name); +#endif + netif_carrier_on(port_to_dev(port)); + } else { +#ifdef DEBUG_LINK + printk(KERN_DEBUG "%s: sca_set_carrier off\n", + port_to_dev(port)->name); +#endif + netif_carrier_off(port_to_dev(port)); + } +} + +static void sca_init_port(port_t *port) +{ + card_t *card = port_to_card(port); + int transmit, i; + + port->rxin = 0; + port->txin = 0; + port->txlast = 0; + +#ifndef PAGE0_ALWAYS_MAPPED + openwin(card, 0); +#endif + + for (transmit = 0; transmit < 2; transmit++) { + u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port); + u16 buffs = transmit ? card->tx_ring_buffers + : card->rx_ring_buffers; + + for (i = 0; i < buffs; i++) { + pkt_desc __iomem *desc = desc_address(port, i, transmit); + u16 chain_off = desc_offset(port, i + 1, transmit); + u32 buff_off = buffer_offset(port, i, transmit); + + writew(chain_off, &desc->cp); + writel(buff_off, &desc->bp); + writew(0, &desc->len); + writeb(0, &desc->stat); + } + + /* DMA disable - to halt state */ + sca_out(0, transmit ? DSR_TX(phy_node(port)) : + DSR_RX(phy_node(port)), card); + /* software ABORT - to initial state */ + sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) : + DCR_RX(phy_node(port)), card); + + /* current desc addr */ + sca_out(0, dmac + CPB, card); /* pointer base */ + sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card); + if (!transmit) + sca_outw(desc_offset(port, buffs - 1, transmit), + dmac + EDAL, card); + else + sca_outw(desc_offset(port, 0, transmit), dmac + EDAL, + card); + + /* clear frame end interrupt counter */ + sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) : + DCR_RX(phy_node(port)), card); + + if (!transmit) { /* Receive */ + /* set buffer length */ + sca_outw(HDLC_MAX_MRU, dmac + BFLL, card); + /* Chain mode, Multi-frame */ + sca_out(0x14, DMR_RX(phy_node(port)), card); + sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)), + card); + /* DMA enable */ + sca_out(DSR_DE, DSR_RX(phy_node(port)), card); + } else { /* Transmit */ + /* Chain mode, Multi-frame */ + sca_out(0x14, DMR_TX(phy_node(port)), card); + /* enable underflow interrupts */ + sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card); + } + } + sca_set_carrier(port); +} + +#ifdef NEED_SCA_MSCI_INTR +/* MSCI interrupt service */ +static inline void sca_msci_intr(port_t *port) +{ + u16 msci = get_msci(port); + card_t *card = port_to_card(port); + u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */ + + /* Reset MSCI TX underrun and CDCD status bit */ + sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card); + + if (stat & ST1_UDRN) { + /* TX Underrun error detected */ + port_to_dev(port)->stats.tx_errors++; + port_to_dev(port)->stats.tx_fifo_errors++; + } + + if (stat & ST1_CDCD) + sca_set_carrier(port); +} +#endif + +static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, + u16 rxin) +{ + struct net_device *dev = port_to_dev(port); + struct sk_buff *skb; + u16 len; + u32 buff; + u32 maxlen; + u8 page; + + len = readw(&desc->len); + skb = dev_alloc_skb(len); + if (!skb) { + dev->stats.rx_dropped++; + return; + } + + buff = buffer_offset(port, rxin, 0); + page = buff / winsize(card); + buff = buff % winsize(card); + maxlen = winsize(card) - buff; + + openwin(card, page); + + if (len > maxlen) { + memcpy_fromio(skb->data, winbase(card) + buff, maxlen); + openwin(card, page + 1); + memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen); + } else { + memcpy_fromio(skb->data, winbase(card) + buff, len); + } + +#ifndef PAGE0_ALWAYS_MAPPED + openwin(card, 0); /* select pkt_desc table page back */ +#endif + skb_put(skb, len); +#ifdef DEBUG_PKT + printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len); + debug_frame(skb); +#endif + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + skb->protocol = hdlc_type_trans(skb, dev); + netif_rx(skb); +} + +/* Receive DMA interrupt service */ +static inline void sca_rx_intr(port_t *port) +{ + struct net_device *dev = port_to_dev(port); + u16 dmac = get_dmac_rx(port); + card_t *card = port_to_card(port); + u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */ + + /* Reset DSR status bits */ + sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, + DSR_RX(phy_node(port)), card); + + if (stat & DSR_BOF) + /* Dropped one or more frames */ + dev->stats.rx_over_errors++; + + while (1) { + u32 desc_off = desc_offset(port, port->rxin, 0); + pkt_desc __iomem *desc; + u32 cda = sca_inw(dmac + CDAL, card); + + if (cda >= desc_off && (cda < desc_off + sizeof(pkt_desc))) + break; /* No frame received */ + + desc = desc_address(port, port->rxin, 0); + stat = readb(&desc->stat); + if (!(stat & ST_RX_EOM)) + port->rxpart = 1; /* partial frame received */ + else if ((stat & ST_ERROR_MASK) || port->rxpart) { + dev->stats.rx_errors++; + if (stat & ST_RX_OVERRUN) + dev->stats.rx_fifo_errors++; + else if ((stat & (ST_RX_SHORT | ST_RX_ABORT | + ST_RX_RESBIT)) || port->rxpart) + dev->stats.rx_frame_errors++; + else if (stat & ST_RX_CRC) + dev->stats.rx_crc_errors++; + if (stat & ST_RX_EOM) + port->rxpart = 0; /* received last fragment */ + } else { + sca_rx(card, port, desc, port->rxin); + } + + /* Set new error descriptor address */ + sca_outw(desc_off, dmac + EDAL, card); + port->rxin = next_desc(port, port->rxin, 0); + } + + /* make sure RX DMA is enabled */ + sca_out(DSR_DE, DSR_RX(phy_node(port)), card); +} + +/* Transmit DMA interrupt service */ +static inline void sca_tx_intr(port_t *port) +{ + struct net_device *dev = port_to_dev(port); + u16 dmac = get_dmac_tx(port); + card_t *card = port_to_card(port); + u8 stat; + + spin_lock(&port->lock); + + stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */ + + /* Reset DSR status bits */ + sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, + DSR_TX(phy_node(port)), card); + + while (1) { + pkt_desc __iomem *desc; + + u32 desc_off = desc_offset(port, port->txlast, 1); + u32 cda = sca_inw(dmac + CDAL, card); + + if (cda >= desc_off && (cda < desc_off + sizeof(pkt_desc))) + break; /* Transmitter is/will_be sending this frame */ + + desc = desc_address(port, port->txlast, 1); + dev->stats.tx_packets++; + dev->stats.tx_bytes += readw(&desc->len); + writeb(0, &desc->stat); /* Free descriptor */ + port->txlast = next_desc(port, port->txlast, 1); + } + + netif_wake_queue(dev); + spin_unlock(&port->lock); +} + +static irqreturn_t sca_intr(int irq, void *dev_id) +{ + card_t *card = dev_id; + int i; + u8 stat; + int handled = 0; + u8 page = sca_get_page(card); + + while ((stat = sca_intr_status(card)) != 0) { + handled = 1; + for (i = 0; i < 2; i++) { + port_t *port = get_port(card, i); + + if (port) { + if (stat & SCA_INTR_MSCI(i)) + sca_msci_intr(port); + + if (stat & SCA_INTR_DMAC_RX(i)) + sca_rx_intr(port); + + if (stat & SCA_INTR_DMAC_TX(i)) + sca_tx_intr(port); + } + } + } + + openwin(card, page); /* Restore original page */ + return IRQ_RETVAL(handled); +} + +static void sca_set_port(port_t *port) +{ + card_t *card = port_to_card(port); + u16 msci = get_msci(port); + u8 md2 = sca_in(msci + MD2, card); + unsigned int tmc, br = 10, brv = 1024; + + if (port->settings.clock_rate > 0) { + /* Try lower br for better accuracy*/ + do { + br--; + brv >>= 1; /* brv = 2^9 = 512 max in specs */ + + /* Baud Rate = CLOCK_BASE / TMC / 2^BR */ + tmc = CLOCK_BASE / brv / port->settings.clock_rate; + } while (br > 1 && tmc <= 128); + + if (tmc < 1) { + tmc = 1; + br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */ + brv = 1; + } else if (tmc > 255) { + tmc = 256; /* tmc=0 means 256 - low baud rates */ + } + + port->settings.clock_rate = CLOCK_BASE / brv / tmc; + } else { + br = 9; /* Minimum clock rate */ + tmc = 256; /* 8bit = 0 */ + port->settings.clock_rate = CLOCK_BASE / (256 * 512); + } + + port->rxs = (port->rxs & ~CLK_BRG_MASK) | br; + port->txs = (port->txs & ~CLK_BRG_MASK) | br; + port->tmc = tmc; + + /* baud divisor - time constant*/ + sca_out(port->tmc, msci + TMC, card); + + /* Set BRG bits */ + sca_out(port->rxs, msci + RXS, card); + sca_out(port->txs, msci + TXS, card); + + if (port->settings.loopback) + md2 |= MD2_LOOPBACK; + else + md2 &= ~MD2_LOOPBACK; + + sca_out(md2, msci + MD2, card); +} + +static void sca_open(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + card_t *card = port_to_card(port); + u16 msci = get_msci(port); + u8 md0, md2; + + switch (port->encoding) { + case ENCODING_NRZ: + md2 = MD2_NRZ; + break; + case ENCODING_NRZI: + md2 = MD2_NRZI; + break; + case ENCODING_FM_MARK: + md2 = MD2_FM_MARK; + break; + case ENCODING_FM_SPACE: + md2 = MD2_FM_SPACE; + break; + default: + md2 = MD2_MANCHESTER; + } + + if (port->settings.loopback) + md2 |= MD2_LOOPBACK; + + switch (port->parity) { + case PARITY_CRC16_PR0: + md0 = MD0_HDLC | MD0_CRC_16_0; + break; + case PARITY_CRC16_PR1: + md0 = MD0_HDLC | MD0_CRC_16; + break; + case PARITY_CRC16_PR0_CCITT: + md0 = MD0_HDLC | MD0_CRC_ITU_0; + break; + case PARITY_CRC16_PR1_CCITT: + md0 = MD0_HDLC | MD0_CRC_ITU; + break; + default: + md0 = MD0_HDLC | MD0_CRC_NONE; + } + + sca_out(CMD_RESET, msci + CMD, card); + sca_out(md0, msci + MD0, card); + sca_out(0x00, msci + MD1, card); /* no address field check */ + sca_out(md2, msci + MD2, card); + sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */ + sca_out(CTL_IDLE, msci + CTL, card); + + /* Allow at least 8 bytes before requesting RX DMA operation */ + /* TX with higher priority and possibly with shorter transfers */ + sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/ + sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/ + sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */ + +/* We're using the following interrupts: + * - TXINT (DMAC completed all transmisions, underrun or DCD change) + * - all DMA interrupts + */ + sca_set_carrier(port); + + /* MSCI TX INT and RX INT A IRQ enable */ + sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card); + sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card); + sca_out(sca_in(IER0, card) | (phy_node(port) ? 0xC0 : 0x0C), + IER0, card); /* TXINT and RXINT */ + /* enable DMA IRQ */ + sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F), + IER1, card); + + sca_out(port->tmc, msci + TMC, card); /* Restore registers */ + sca_out(port->rxs, msci + RXS, card); + sca_out(port->txs, msci + TXS, card); + sca_out(CMD_TX_ENABLE, msci + CMD, card); + sca_out(CMD_RX_ENABLE, msci + CMD, card); + + netif_start_queue(dev); +} + +static void sca_close(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + card_t *card = port_to_card(port); + + /* reset channel */ + sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port)); + /* disable MSCI interrupts */ + sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0), + IER0, card); + /* disable DMA interrupts */ + sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0), + IER1, card); + + netif_stop_queue(dev); +} + +static int sca_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + if (encoding != ENCODING_NRZ && + encoding != ENCODING_NRZI && + encoding != ENCODING_FM_MARK && + encoding != ENCODING_FM_SPACE && + encoding != ENCODING_MANCHESTER) + return -EINVAL; + + if (parity != PARITY_NONE && + parity != PARITY_CRC16_PR0 && + parity != PARITY_CRC16_PR1 && + parity != PARITY_CRC16_PR0_CCITT && + parity != PARITY_CRC16_PR1_CCITT) + return -EINVAL; + + dev_to_port(dev)->encoding = encoding; + dev_to_port(dev)->parity = parity; + return 0; +} + +#ifdef DEBUG_RINGS +static void sca_dump_rings(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + card_t *card = port_to_card(port); + u16 cnt; +#ifndef PAGE0_ALWAYS_MAPPED + u8 page = sca_get_page(card); + + openwin(card, 0); +#endif + + printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive", + sca_inw(get_dmac_rx(port) + CDAL, card), + sca_inw(get_dmac_rx(port) + EDAL, card), + sca_in(DSR_RX(phy_node(port)), card), port->rxin, + sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in"); + for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++) + pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat))); + pr_cont("\n"); + + printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " + "last=%u %sactive", + sca_inw(get_dmac_tx(port) + CDAL, card), + sca_inw(get_dmac_tx(port) + EDAL, card), + sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast, + sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in"); + + for (cnt = 0; cnt < port_to_card(port)->tx_ring_buffers; cnt++) + pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat))); + pr_cont("\n"); + + printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, ST: %02x %02x %02x %02x," + " FST: %02x CST: %02x %02x\n", + sca_in(get_msci(port) + MD0, card), + sca_in(get_msci(port) + MD1, card), + sca_in(get_msci(port) + MD2, card), + sca_in(get_msci(port) + ST0, card), + sca_in(get_msci(port) + ST1, card), + sca_in(get_msci(port) + ST2, card), + sca_in(get_msci(port) + ST3, card), + sca_in(get_msci(port) + FST, card), + sca_in(get_msci(port) + CST0, card), + sca_in(get_msci(port) + CST1, card)); + + printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card), + sca_in(ISR1, card), sca_in(ISR2, card)); + +#ifndef PAGE0_ALWAYS_MAPPED + openwin(card, page); /* Restore original page */ +#endif +} +#endif /* DEBUG_RINGS */ + +static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + card_t *card = port_to_card(port); + pkt_desc __iomem *desc; + u32 buff, len; + u8 page; + u32 maxlen; + + spin_lock_irq(&port->lock); + + desc = desc_address(port, port->txin + 1, 1); + BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */ + +#ifdef DEBUG_PKT + printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len); + debug_frame(skb); +#endif + + desc = desc_address(port, port->txin, 1); + buff = buffer_offset(port, port->txin, 1); + len = skb->len; + page = buff / winsize(card); + buff = buff % winsize(card); + maxlen = winsize(card) - buff; + + openwin(card, page); + if (len > maxlen) { + memcpy_toio(winbase(card) + buff, skb->data, maxlen); + openwin(card, page + 1); + memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen); + } else { + memcpy_toio(winbase(card) + buff, skb->data, len); + } + +#ifndef PAGE0_ALWAYS_MAPPED + openwin(card, 0); /* select pkt_desc table page back */ +#endif + writew(len, &desc->len); + writeb(ST_TX_EOM, &desc->stat); + + port->txin = next_desc(port, port->txin, 1); + sca_outw(desc_offset(port, port->txin, 1), + get_dmac_tx(port) + EDAL, card); + + sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */ + + desc = desc_address(port, port->txin + 1, 1); + if (readb(&desc->stat)) /* allow 1 packet gap */ + netif_stop_queue(dev); + + spin_unlock_irq(&port->lock); + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +#ifdef NEED_DETECT_RAM +static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize) +{ + /* Round RAM size to 32 bits, fill from end to start */ + u32 i = ramsize &= ~3; + u32 size = winsize(card); + + openwin(card, (i - 4) / size); /* select last window */ + + do { + i -= 4; + if ((i + 4) % size == 0) + openwin(card, i / size); + writel(i ^ 0x12345678, rambase + i % size); + } while (i > 0); + + for (i = 0; i < ramsize ; i += 4) { + if (i % size == 0) + openwin(card, i / size); + + if (readl(rambase + i % size) != (i ^ 0x12345678)) + break; + } + + return i; +} +#endif /* NEED_DETECT_RAM */ + +static void sca_init(card_t *card, int wait_states) +{ + sca_out(wait_states, WCRL, card); /* Wait Control */ + sca_out(wait_states, WCRM, card); + sca_out(wait_states, WCRH, card); + + sca_out(0, DMER, card); /* DMA Master disable */ + sca_out(0x03, PCR, card); /* DMA priority */ + sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */ + sca_out(0, DSR_TX(0), card); + sca_out(0, DSR_RX(1), card); + sca_out(0, DSR_TX(1), card); + sca_out(DMER_DME, DMER, card); /* DMA Master enable */ +} diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h new file mode 100644 index 000000000..24529996c --- /dev/null +++ b/drivers/net/wan/hd64570.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __HD64570_H +#define __HD64570_H + +/* SCA HD64570 register definitions - all addresses for mode 0 (8086 MPU) + and 1 (64180 MPU). For modes 2 and 3, XOR the address with 0x01. + + Source: HD64570 SCA User's Manual +*/ + + + +/* SCA Control Registers */ +#define LPR 0x00 /* Low Power */ + +/* Wait controller registers */ +#define PABR0 0x02 /* Physical Address Boundary 0 */ +#define PABR1 0x03 /* Physical Address Boundary 1 */ +#define WCRL 0x04 /* Wait Control L */ +#define WCRM 0x05 /* Wait Control M */ +#define WCRH 0x06 /* Wait Control H */ + +#define PCR 0x08 /* DMA Priority Control */ +#define DMER 0x09 /* DMA Master Enable */ + + +/* Interrupt registers */ +#define ISR0 0x10 /* Interrupt Status 0 */ +#define ISR1 0x11 /* Interrupt Status 1 */ +#define ISR2 0x12 /* Interrupt Status 2 */ + +#define IER0 0x14 /* Interrupt Enable 0 */ +#define IER1 0x15 /* Interrupt Enable 1 */ +#define IER2 0x16 /* Interrupt Enable 2 */ + +#define ITCR 0x18 /* Interrupt Control */ +#define IVR 0x1A /* Interrupt Vector */ +#define IMVR 0x1C /* Interrupt Modified Vector */ + + + +/* MSCI channel (port) 0 registers - offset 0x20 + MSCI channel (port) 1 registers - offset 0x40 */ + +#define MSCI0_OFFSET 0x20 +#define MSCI1_OFFSET 0x40 + +#define TRBL 0x00 /* TX/RX buffer L */ +#define TRBH 0x01 /* TX/RX buffer H */ +#define ST0 0x02 /* Status 0 */ +#define ST1 0x03 /* Status 1 */ +#define ST2 0x04 /* Status 2 */ +#define ST3 0x05 /* Status 3 */ +#define FST 0x06 /* Frame Status */ +#define IE0 0x08 /* Interrupt Enable 0 */ +#define IE1 0x09 /* Interrupt Enable 1 */ +#define IE2 0x0A /* Interrupt Enable 2 */ +#define FIE 0x0B /* Frame Interrupt Enable */ +#define CMD 0x0C /* Command */ +#define MD0 0x0E /* Mode 0 */ +#define MD1 0x0F /* Mode 1 */ +#define MD2 0x10 /* Mode 2 */ +#define CTL 0x11 /* Control */ +#define SA0 0x12 /* Sync/Address 0 */ +#define SA1 0x13 /* Sync/Address 1 */ +#define IDL 0x14 /* Idle Pattern */ +#define TMC 0x15 /* Time Constant */ +#define RXS 0x16 /* RX Clock Source */ +#define TXS 0x17 /* TX Clock Source */ +#define TRC0 0x18 /* TX Ready Control 0 */ +#define TRC1 0x19 /* TX Ready Control 1 */ +#define RRC 0x1A /* RX Ready Control */ +#define CST0 0x1C /* Current Status 0 */ +#define CST1 0x1D /* Current Status 1 */ + + +/* Timer channel 0 (port 0 RX) registers - offset 0x60 + Timer channel 1 (port 0 TX) registers - offset 0x68 + Timer channel 2 (port 1 RX) registers - offset 0x70 + Timer channel 3 (port 1 TX) registers - offset 0x78 +*/ + +#define TIMER0RX_OFFSET 0x60 +#define TIMER0TX_OFFSET 0x68 +#define TIMER1RX_OFFSET 0x70 +#define TIMER1TX_OFFSET 0x78 + +#define TCNTL 0x00 /* Up-counter L */ +#define TCNTH 0x01 /* Up-counter H */ +#define TCONRL 0x02 /* Constant L */ +#define TCONRH 0x03 /* Constant H */ +#define TCSR 0x04 /* Control/Status */ +#define TEPR 0x05 /* Expand Prescale */ + + + +/* DMA channel 0 (port 0 RX) registers - offset 0x80 + DMA channel 1 (port 0 TX) registers - offset 0xA0 + DMA channel 2 (port 1 RX) registers - offset 0xC0 + DMA channel 3 (port 1 TX) registers - offset 0xE0 +*/ + +#define DMAC0RX_OFFSET 0x80 +#define DMAC0TX_OFFSET 0xA0 +#define DMAC1RX_OFFSET 0xC0 +#define DMAC1TX_OFFSET 0xE0 + +#define BARL 0x00 /* Buffer Address L (chained block) */ +#define BARH 0x01 /* Buffer Address H (chained block) */ +#define BARB 0x02 /* Buffer Address B (chained block) */ + +#define DARL 0x00 /* RX Destination Addr L (single block) */ +#define DARH 0x01 /* RX Destination Addr H (single block) */ +#define DARB 0x02 /* RX Destination Addr B (single block) */ + +#define SARL 0x04 /* TX Source Address L (single block) */ +#define SARH 0x05 /* TX Source Address H (single block) */ +#define SARB 0x06 /* TX Source Address B (single block) */ + +#define CPB 0x06 /* Chain Pointer Base (chained block) */ + +#define CDAL 0x08 /* Current Descriptor Addr L (chained block) */ +#define CDAH 0x09 /* Current Descriptor Addr H (chained block) */ +#define EDAL 0x0A /* Error Descriptor Addr L (chained block) */ +#define EDAH 0x0B /* Error Descriptor Addr H (chained block) */ +#define BFLL 0x0C /* RX Receive Buffer Length L (chained block)*/ +#define BFLH 0x0D /* RX Receive Buffer Length H (chained block)*/ +#define BCRL 0x0E /* Byte Count L */ +#define BCRH 0x0F /* Byte Count H */ +#define DSR 0x10 /* DMA Status */ +#define DSR_RX(node) (DSR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)) +#define DSR_TX(node) (DSR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)) +#define DMR 0x11 /* DMA Mode */ +#define DMR_RX(node) (DMR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)) +#define DMR_TX(node) (DMR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)) +#define FCT 0x13 /* Frame End Interrupt Counter */ +#define FCT_RX(node) (FCT + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)) +#define FCT_TX(node) (FCT + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)) +#define DIR 0x14 /* DMA Interrupt Enable */ +#define DIR_RX(node) (DIR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)) +#define DIR_TX(node) (DIR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)) +#define DCR 0x15 /* DMA Command */ +#define DCR_RX(node) (DCR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)) +#define DCR_TX(node) (DCR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)) + + + + +/* Descriptor Structure */ + +typedef struct { + u16 cp; /* Chain Pointer */ + u32 bp; /* Buffer Pointer (24 bits) */ + u16 len; /* Data Length */ + u8 stat; /* Status */ + u8 unused; /* pads to 2-byte boundary */ +}__packed pkt_desc; + + +/* Packet Descriptor Status bits */ + +#define ST_TX_EOM 0x80 /* End of frame */ +#define ST_TX_EOT 0x01 /* End of transmission */ + +#define ST_RX_EOM 0x80 /* End of frame */ +#define ST_RX_SHORT 0x40 /* Short frame */ +#define ST_RX_ABORT 0x20 /* Abort */ +#define ST_RX_RESBIT 0x10 /* Residual bit */ +#define ST_RX_OVERRUN 0x08 /* Overrun */ +#define ST_RX_CRC 0x04 /* CRC */ + +#define ST_ERROR_MASK 0x7C + +#define DIR_EOTE 0x80 /* Transfer completed */ +#define DIR_EOME 0x40 /* Frame Transfer Completed (chained-block) */ +#define DIR_BOFE 0x20 /* Buffer Overflow/Underflow (chained-block)*/ +#define DIR_COFE 0x10 /* Counter Overflow (chained-block) */ + + +#define DSR_EOT 0x80 /* Transfer completed */ +#define DSR_EOM 0x40 /* Frame Transfer Completed (chained-block) */ +#define DSR_BOF 0x20 /* Buffer Overflow/Underflow (chained-block)*/ +#define DSR_COF 0x10 /* Counter Overflow (chained-block) */ +#define DSR_DE 0x02 /* DMA Enable */ +#define DSR_DWE 0x01 /* DMA Write Disable */ + +/* DMA Master Enable Register (DMER) bits */ +#define DMER_DME 0x80 /* DMA Master Enable */ + + +#define CMD_RESET 0x21 /* Reset Channel */ +#define CMD_TX_ENABLE 0x02 /* Start transmitter */ +#define CMD_RX_ENABLE 0x12 /* Start receiver */ + +#define MD0_HDLC 0x80 /* Bit-sync HDLC mode */ +#define MD0_CRC_ENA 0x04 /* Enable CRC code calculation */ +#define MD0_CRC_CCITT 0x02 /* CCITT CRC instead of CRC-16 */ +#define MD0_CRC_PR1 0x01 /* Initial all-ones instead of all-zeros */ + +#define MD0_CRC_NONE 0x00 +#define MD0_CRC_16_0 0x04 +#define MD0_CRC_16 0x05 +#define MD0_CRC_ITU_0 0x06 +#define MD0_CRC_ITU 0x07 + +#define MD2_NRZ 0x00 +#define MD2_NRZI 0x20 +#define MD2_MANCHESTER 0x80 +#define MD2_FM_MARK 0xA0 +#define MD2_FM_SPACE 0xC0 +#define MD2_LOOPBACK 0x03 /* Local data Loopback */ + +#define CTL_NORTS 0x01 +#define CTL_IDLE 0x10 /* Transmit an idle pattern */ +#define CTL_UDRNC 0x20 /* Idle after CRC or FCS+flag transmission */ + +#define ST0_TXRDY 0x02 /* TX ready */ +#define ST0_RXRDY 0x01 /* RX ready */ + +#define ST1_UDRN 0x80 /* MSCI TX underrun */ +#define ST1_CDCD 0x04 /* DCD level changed */ + +#define ST3_CTS 0x08 /* modem input - /CTS */ +#define ST3_DCD 0x04 /* modem input - /DCD */ + +#define IE0_TXINT 0x80 /* TX INT MSCI interrupt enable */ +#define IE0_RXINTA 0x40 /* RX INT A MSCI interrupt enable */ +#define IE1_UDRN 0x80 /* TX underrun MSCI interrupt enable */ +#define IE1_CDCD 0x04 /* DCD level changed */ + +#define DCR_ABORT 0x01 /* Software abort command */ +#define DCR_CLEAR_EOF 0x02 /* Clear EOF interrupt */ + +/* TX and RX Clock Source - RXS and TXS */ +#define CLK_BRG_MASK 0x0F +#define CLK_LINE_RX 0x00 /* TX/RX clock line input */ +#define CLK_LINE_TX 0x00 /* TX/RX line input */ +#define CLK_BRG_RX 0x40 /* internal baud rate generator */ +#define CLK_BRG_TX 0x40 /* internal baud rate generator */ +#define CLK_RXCLK_TX 0x60 /* TX clock from RX clock */ + +#endif diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c new file mode 100644 index 000000000..534369ffe --- /dev/null +++ b/drivers/net/wan/hd64572.c @@ -0,0 +1,636 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hitachi (now Renesas) SCA-II HD64572 driver for Linux + * + * Copyright (C) 1998-2008 Krzysztof Halasa <khc@pm.waw.pl> + * + * Source of information: HD64572 SCA-II User's Manual + * + * We use the following SCA memory map: + * + * Packet buffer descriptor rings - starting from card->rambase: + * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring + * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring + * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used) + * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used) + * + * Packet data buffers - starting from card->rambase + buff_offset: + * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers + * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers + * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used) + * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used) + */ + +#include <linux/bitops.h> +#include <linux/errno.h> +#include <linux/fcntl.h> +#include <linux/hdlc.h> +#include <linux/in.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/string.h> +#include <linux/types.h> +#include <asm/io.h> +#include <linux/uaccess.h> +#include "hd64572.h" + +#define NAPI_WEIGHT 16 + +#define get_msci(port) ((port)->chan ? MSCI1_OFFSET : MSCI0_OFFSET) +#define get_dmac_rx(port) ((port)->chan ? DMAC1RX_OFFSET : DMAC0RX_OFFSET) +#define get_dmac_tx(port) ((port)->chan ? DMAC1TX_OFFSET : DMAC0TX_OFFSET) + +#define sca_in(reg, card) readb((card)->scabase + (reg)) +#define sca_out(value, reg, card) writeb(value, (card)->scabase + (reg)) +#define sca_inw(reg, card) readw((card)->scabase + (reg)) +#define sca_outw(value, reg, card) writew(value, (card)->scabase + (reg)) +#define sca_inl(reg, card) readl((card)->scabase + (reg)) +#define sca_outl(value, reg, card) writel(value, (card)->scabase + (reg)) + +static int sca_poll(struct napi_struct *napi, int budget); + +static inline port_t *dev_to_port(struct net_device *dev) +{ + return dev_to_hdlc(dev)->priv; +} + +static inline void enable_intr(port_t *port) +{ + /* enable DMIB and MSCI RXINTA interrupts */ + sca_outl(sca_inl(IER0, port->card) | + (port->chan ? 0x08002200 : 0x00080022), IER0, port->card); +} + +static inline void disable_intr(port_t *port) +{ + sca_outl(sca_inl(IER0, port->card) & + (port->chan ? 0x00FF00FF : 0xFF00FF00), IER0, port->card); +} + +static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit) +{ + u16 rx_buffs = port->card->rx_ring_buffers; + u16 tx_buffs = port->card->tx_ring_buffers; + + desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc. + return port->chan * (rx_buffs + tx_buffs) + transmit * rx_buffs + desc; +} + +static inline u16 desc_offset(port_t *port, u16 desc, int transmit) +{ + /* Descriptor offset always fits in 16 bits */ + return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc); +} + +static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc, + int transmit) +{ + return (pkt_desc __iomem *)(port->card->rambase + + desc_offset(port, desc, transmit)); +} + +static inline u32 buffer_offset(port_t *port, u16 desc, int transmit) +{ + return port->card->buff_offset + + desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU; +} + +static inline void sca_set_carrier(port_t *port) +{ + if (!(sca_in(get_msci(port) + ST3, port->card) & ST3_DCD)) { +#ifdef DEBUG_LINK + printk(KERN_DEBUG "%s: sca_set_carrier on\n", + port->netdev.name); +#endif + netif_carrier_on(port->netdev); + } else { +#ifdef DEBUG_LINK + printk(KERN_DEBUG "%s: sca_set_carrier off\n", + port->netdev.name); +#endif + netif_carrier_off(port->netdev); + } +} + +static void sca_init_port(port_t *port) +{ + card_t *card = port->card; + u16 dmac_rx = get_dmac_rx(port), dmac_tx = get_dmac_tx(port); + int transmit, i; + + port->rxin = 0; + port->txin = 0; + port->txlast = 0; + + for (transmit = 0; transmit < 2; transmit++) { + u16 buffs = transmit ? card->tx_ring_buffers + : card->rx_ring_buffers; + + for (i = 0; i < buffs; i++) { + pkt_desc __iomem *desc = desc_address(port, i, transmit); + u16 chain_off = desc_offset(port, i + 1, transmit); + u32 buff_off = buffer_offset(port, i, transmit); + + writel(chain_off, &desc->cp); + writel(buff_off, &desc->bp); + writew(0, &desc->len); + writeb(0, &desc->stat); + } + } + + /* DMA disable - to halt state */ + sca_out(0, DSR_RX(port->chan), card); + sca_out(0, DSR_TX(port->chan), card); + + /* software ABORT - to initial state */ + sca_out(DCR_ABORT, DCR_RX(port->chan), card); + sca_out(DCR_ABORT, DCR_TX(port->chan), card); + + /* current desc addr */ + sca_outl(desc_offset(port, 0, 0), dmac_rx + CDAL, card); + sca_outl(desc_offset(port, card->tx_ring_buffers - 1, 0), + dmac_rx + EDAL, card); + sca_outl(desc_offset(port, 0, 1), dmac_tx + CDAL, card); + sca_outl(desc_offset(port, 0, 1), dmac_tx + EDAL, card); + + /* clear frame end interrupt counter */ + sca_out(DCR_CLEAR_EOF, DCR_RX(port->chan), card); + sca_out(DCR_CLEAR_EOF, DCR_TX(port->chan), card); + + /* Receive */ + sca_outw(HDLC_MAX_MRU, dmac_rx + BFLL, card); /* set buffer length */ + sca_out(0x14, DMR_RX(port->chan), card); /* Chain mode, Multi-frame */ + sca_out(DIR_EOME, DIR_RX(port->chan), card); /* enable interrupts */ + sca_out(DSR_DE, DSR_RX(port->chan), card); /* DMA enable */ + + /* Transmit */ + sca_out(0x14, DMR_TX(port->chan), card); /* Chain mode, Multi-frame */ + sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */ + + sca_set_carrier(port); + netif_napi_add_weight(port->netdev, &port->napi, sca_poll, + NAPI_WEIGHT); +} + +/* MSCI interrupt service */ +static inline void sca_msci_intr(port_t *port) +{ + u16 msci = get_msci(port); + card_t *card = port->card; + + if (sca_in(msci + ST1, card) & ST1_CDCD) { + /* Reset MSCI CDCD status bit */ + sca_out(ST1_CDCD, msci + ST1, card); + sca_set_carrier(port); + } +} + +static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, + u16 rxin) +{ + struct net_device *dev = port->netdev; + struct sk_buff *skb; + u16 len; + u32 buff; + + len = readw(&desc->len); + skb = dev_alloc_skb(len); + if (!skb) { + dev->stats.rx_dropped++; + return; + } + + buff = buffer_offset(port, rxin, 0); + memcpy_fromio(skb->data, card->rambase + buff, len); + + skb_put(skb, len); +#ifdef DEBUG_PKT + printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len); + debug_frame(skb); +#endif + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + skb->protocol = hdlc_type_trans(skb, dev); + netif_receive_skb(skb); +} + +/* Receive DMA service */ +static inline int sca_rx_done(port_t *port, int budget) +{ + struct net_device *dev = port->netdev; + u16 dmac = get_dmac_rx(port); + card_t *card = port->card; + u8 stat = sca_in(DSR_RX(port->chan), card); /* read DMA Status */ + int received = 0; + + /* Reset DSR status bits */ + sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, + DSR_RX(port->chan), card); + + if (stat & DSR_BOF) + /* Dropped one or more frames */ + dev->stats.rx_over_errors++; + + while (received < budget) { + u32 desc_off = desc_offset(port, port->rxin, 0); + pkt_desc __iomem *desc; + u32 cda = sca_inl(dmac + CDAL, card); + + if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc))) + break; /* No frame received */ + + desc = desc_address(port, port->rxin, 0); + stat = readb(&desc->stat); + if (!(stat & ST_RX_EOM)) + port->rxpart = 1; /* partial frame received */ + else if ((stat & ST_ERROR_MASK) || port->rxpart) { + dev->stats.rx_errors++; + if (stat & ST_RX_OVERRUN) + dev->stats.rx_fifo_errors++; + else if ((stat & (ST_RX_SHORT | ST_RX_ABORT | + ST_RX_RESBIT)) || port->rxpart) + dev->stats.rx_frame_errors++; + else if (stat & ST_RX_CRC) + dev->stats.rx_crc_errors++; + if (stat & ST_RX_EOM) + port->rxpart = 0; /* received last fragment */ + } else { + sca_rx(card, port, desc, port->rxin); + received++; + } + + /* Set new error descriptor address */ + sca_outl(desc_off, dmac + EDAL, card); + port->rxin = (port->rxin + 1) % card->rx_ring_buffers; + } + + /* make sure RX DMA is enabled */ + sca_out(DSR_DE, DSR_RX(port->chan), card); + return received; +} + +/* Transmit DMA service */ +static inline void sca_tx_done(port_t *port) +{ + struct net_device *dev = port->netdev; + card_t *card = port->card; + u8 stat; + unsigned count = 0; + + spin_lock(&port->lock); + + stat = sca_in(DSR_TX(port->chan), card); /* read DMA Status */ + + /* Reset DSR status bits */ + sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, + DSR_TX(port->chan), card); + + while (1) { + pkt_desc __iomem *desc = desc_address(port, port->txlast, 1); + u8 stat = readb(&desc->stat); + + if (!(stat & ST_TX_OWNRSHP)) + break; /* not yet transmitted */ + if (stat & ST_TX_UNDRRUN) { + dev->stats.tx_errors++; + dev->stats.tx_fifo_errors++; + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += readw(&desc->len); + } + writeb(0, &desc->stat); /* Free descriptor */ + count++; + port->txlast = (port->txlast + 1) % card->tx_ring_buffers; + } + + if (count) + netif_wake_queue(dev); + spin_unlock(&port->lock); +} + +static int sca_poll(struct napi_struct *napi, int budget) +{ + port_t *port = container_of(napi, port_t, napi); + u32 isr0 = sca_inl(ISR0, port->card); + int received = 0; + + if (isr0 & (port->chan ? 0x08000000 : 0x00080000)) + sca_msci_intr(port); + + if (isr0 & (port->chan ? 0x00002000 : 0x00000020)) + sca_tx_done(port); + + if (isr0 & (port->chan ? 0x00000200 : 0x00000002)) + received = sca_rx_done(port, budget); + + if (received < budget) { + napi_complete_done(napi, received); + enable_intr(port); + } + + return received; +} + +static irqreturn_t sca_intr(int irq, void *dev_id) +{ + card_t *card = dev_id; + u32 isr0 = sca_inl(ISR0, card); + int i, handled = 0; + + for (i = 0; i < 2; i++) { + port_t *port = get_port(card, i); + if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) { + handled = 1; + disable_intr(port); + napi_schedule(&port->napi); + } + } + + return IRQ_RETVAL(handled); +} + +static void sca_set_port(port_t *port) +{ + card_t *card = port->card; + u16 msci = get_msci(port); + u8 md2 = sca_in(msci + MD2, card); + unsigned int tmc, br = 10, brv = 1024; + + if (port->settings.clock_rate > 0) { + /* Try lower br for better accuracy*/ + do { + br--; + brv >>= 1; /* brv = 2^9 = 512 max in specs */ + + /* Baud Rate = CLOCK_BASE / TMC / 2^BR */ + tmc = CLOCK_BASE / brv / port->settings.clock_rate; + } while (br > 1 && tmc <= 128); + + if (tmc < 1) { + tmc = 1; + br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */ + brv = 1; + } else if (tmc > 255) { + tmc = 256; /* tmc=0 means 256 - low baud rates */ + } + + port->settings.clock_rate = CLOCK_BASE / brv / tmc; + } else { + br = 9; /* Minimum clock rate */ + tmc = 256; /* 8bit = 0 */ + port->settings.clock_rate = CLOCK_BASE / (256 * 512); + } + + port->rxs = (port->rxs & ~CLK_BRG_MASK) | br; + port->txs = (port->txs & ~CLK_BRG_MASK) | br; + port->tmc = tmc; + + /* baud divisor - time constant*/ + sca_out(port->tmc, msci + TMCR, card); + sca_out(port->tmc, msci + TMCT, card); + + /* Set BRG bits */ + sca_out(port->rxs, msci + RXS, card); + sca_out(port->txs, msci + TXS, card); + + if (port->settings.loopback) + md2 |= MD2_LOOPBACK; + else + md2 &= ~MD2_LOOPBACK; + + sca_out(md2, msci + MD2, card); +} + +static void sca_open(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + card_t *card = port->card; + u16 msci = get_msci(port); + u8 md0, md2; + + switch (port->encoding) { + case ENCODING_NRZ: + md2 = MD2_NRZ; + break; + case ENCODING_NRZI: + md2 = MD2_NRZI; + break; + case ENCODING_FM_MARK: + md2 = MD2_FM_MARK; + break; + case ENCODING_FM_SPACE: + md2 = MD2_FM_SPACE; + break; + default: + md2 = MD2_MANCHESTER; + } + + if (port->settings.loopback) + md2 |= MD2_LOOPBACK; + + switch (port->parity) { + case PARITY_CRC16_PR0: + md0 = MD0_HDLC | MD0_CRC_16_0; + break; + case PARITY_CRC16_PR1: + md0 = MD0_HDLC | MD0_CRC_16; + break; + case PARITY_CRC32_PR1_CCITT: + md0 = MD0_HDLC | MD0_CRC_ITU32; + break; + case PARITY_CRC16_PR1_CCITT: + md0 = MD0_HDLC | MD0_CRC_ITU; + break; + default: + md0 = MD0_HDLC | MD0_CRC_NONE; + } + + sca_out(CMD_RESET, msci + CMD, card); + sca_out(md0, msci + MD0, card); + sca_out(0x00, msci + MD1, card); /* no address field check */ + sca_out(md2, msci + MD2, card); + sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */ + /* Skip the rest of underrun frame */ + sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card); + sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */ + sca_out(0x3C, msci + TFS, card); /* +1 = TX start */ + sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */ + sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */ + sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/ + +/* We're using the following interrupts: + - RXINTA (DCD changes only) + - DMIB (EOM - single frame transfer complete) +*/ + sca_outl(IE0_RXINTA | IE0_CDCD, msci + IE0, card); + + sca_out(port->tmc, msci + TMCR, card); + sca_out(port->tmc, msci + TMCT, card); + sca_out(port->rxs, msci + RXS, card); + sca_out(port->txs, msci + TXS, card); + sca_out(CMD_TX_ENABLE, msci + CMD, card); + sca_out(CMD_RX_ENABLE, msci + CMD, card); + + sca_set_carrier(port); + enable_intr(port); + napi_enable(&port->napi); + netif_start_queue(dev); +} + +static void sca_close(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + + /* reset channel */ + sca_out(CMD_RESET, get_msci(port) + CMD, port->card); + disable_intr(port); + napi_disable(&port->napi); + netif_stop_queue(dev); +} + +static int sca_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + if (encoding != ENCODING_NRZ && + encoding != ENCODING_NRZI && + encoding != ENCODING_FM_MARK && + encoding != ENCODING_FM_SPACE && + encoding != ENCODING_MANCHESTER) + return -EINVAL; + + if (parity != PARITY_NONE && + parity != PARITY_CRC16_PR0 && + parity != PARITY_CRC16_PR1 && + parity != PARITY_CRC32_PR1_CCITT && + parity != PARITY_CRC16_PR1_CCITT) + return -EINVAL; + + dev_to_port(dev)->encoding = encoding; + dev_to_port(dev)->parity = parity; + return 0; +} + +#ifdef DEBUG_RINGS +static void sca_dump_rings(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + card_t *card = port->card; + u16 cnt; + + printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive", + sca_inl(get_dmac_rx(port) + CDAL, card), + sca_inl(get_dmac_rx(port) + EDAL, card), + sca_in(DSR_RX(port->chan), card), port->rxin, + sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in"); + for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++) + pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat))); + pr_cont("\n"); + + printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " + "last=%u %sactive", + sca_inl(get_dmac_tx(port) + CDAL, card), + sca_inl(get_dmac_tx(port) + EDAL, card), + sca_in(DSR_TX(port->chan), card), port->txin, port->txlast, + sca_in(DSR_TX(port->chan), card) & DSR_DE ? "" : "in"); + + for (cnt = 0; cnt < port->card->tx_ring_buffers; cnt++) + pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat))); + pr_cont("\n"); + + printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x," + " ST: %02x %02x %02x %02x %02x, FST: %02x CST: %02x %02x\n", + sca_in(get_msci(port) + MD0, card), + sca_in(get_msci(port) + MD1, card), + sca_in(get_msci(port) + MD2, card), + sca_in(get_msci(port) + ST0, card), + sca_in(get_msci(port) + ST1, card), + sca_in(get_msci(port) + ST2, card), + sca_in(get_msci(port) + ST3, card), + sca_in(get_msci(port) + ST4, card), + sca_in(get_msci(port) + FST, card), + sca_in(get_msci(port) + CST0, card), + sca_in(get_msci(port) + CST1, card)); + + printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card), + sca_inl(ISR0, card), sca_inl(ISR1, card)); +} +#endif /* DEBUG_RINGS */ + +static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + card_t *card = port->card; + pkt_desc __iomem *desc; + u32 buff, len; + + spin_lock_irq(&port->lock); + + desc = desc_address(port, port->txin + 1, 1); + BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */ + +#ifdef DEBUG_PKT + printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len); + debug_frame(skb); +#endif + + desc = desc_address(port, port->txin, 1); + buff = buffer_offset(port, port->txin, 1); + len = skb->len; + memcpy_toio(card->rambase + buff, skb->data, len); + + writew(len, &desc->len); + writeb(ST_TX_EOM, &desc->stat); + + port->txin = (port->txin + 1) % card->tx_ring_buffers; + sca_outl(desc_offset(port, port->txin, 1), + get_dmac_tx(port) + EDAL, card); + + sca_out(DSR_DE, DSR_TX(port->chan), card); /* Enable TX DMA */ + + desc = desc_address(port, port->txin + 1, 1); + if (readb(&desc->stat)) /* allow 1 packet gap */ + netif_stop_queue(dev); + + spin_unlock_irq(&port->lock); + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize) +{ + /* Round RAM size to 32 bits, fill from end to start */ + u32 i = ramsize &= ~3; + + do { + i -= 4; + writel(i ^ 0x12345678, rambase + i); + } while (i > 0); + + for (i = 0; i < ramsize ; i += 4) { + if (readl(rambase + i) != (i ^ 0x12345678)) + break; + } + + return i; +} + +static void sca_init(card_t *card, int wait_states) +{ + sca_out(wait_states, WCRL, card); /* Wait Control */ + sca_out(wait_states, WCRM, card); + sca_out(wait_states, WCRH, card); + + sca_out(0, DMER, card); /* DMA Master disable */ + sca_out(0x03, PCR, card); /* DMA priority */ + sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */ + sca_out(0, DSR_TX(0), card); + sca_out(0, DSR_RX(1), card); + sca_out(0, DSR_TX(1), card); + sca_out(DMER_DME, DMER, card); /* DMA Master enable */ +} diff --git a/drivers/net/wan/hd64572.h b/drivers/net/wan/hd64572.h new file mode 100644 index 000000000..9c87b8628 --- /dev/null +++ b/drivers/net/wan/hd64572.h @@ -0,0 +1,522 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * hd64572.h Description of the Hitachi HD64572 (SCA-II), valid for + * CPU modes 0 & 2. + * + * Author: Ivan Passos <ivan@cyclades.com> + * + * Copyright: (c) 2000-2001 Cyclades Corp. + * + * $Log: hd64572.h,v $ + * Revision 3.1 2001/06/15 12:41:10 regina + * upping major version number + * + * Revision 1.1.1.1 2001/06/13 20:24:49 daniela + * PC300 initial CVS version (3.4.0-pre1) + * + * Revision 1.0 2000/01/25 ivan + * Initial version. + */ + +#ifndef __HD64572_H +#define __HD64572_H + +/* Illegal Access Register */ +#define ILAR 0x00 + +/* Wait Controller Registers */ +#define PABR0L 0x20 /* Physical Addr Boundary Register 0 L */ +#define PABR0H 0x21 /* Physical Addr Boundary Register 0 H */ +#define PABR1L 0x22 /* Physical Addr Boundary Register 1 L */ +#define PABR1H 0x23 /* Physical Addr Boundary Register 1 H */ +#define WCRL 0x24 /* Wait Control Register L */ +#define WCRM 0x25 /* Wait Control Register M */ +#define WCRH 0x26 /* Wait Control Register H */ + +/* Interrupt Registers */ +#define IVR 0x60 /* Interrupt Vector Register */ +#define IMVR 0x64 /* Interrupt Modified Vector Register */ +#define ITCR 0x68 /* Interrupt Control Register */ +#define ISR0 0x6c /* Interrupt Status Register 0 */ +#define ISR1 0x70 /* Interrupt Status Register 1 */ +#define IER0 0x74 /* Interrupt Enable Register 0 */ +#define IER1 0x78 /* Interrupt Enable Register 1 */ + +/* Register Access Macros (chan is 0 or 1 in _any_ case) */ +#define M_REG(reg, chan) (reg + 0x80*chan) /* MSCI */ +#define DRX_REG(reg, chan) (reg + 0x40*chan) /* DMA Rx */ +#define DTX_REG(reg, chan) (reg + 0x20*(2*chan + 1)) /* DMA Tx */ +#define TRX_REG(reg, chan) (reg + 0x20*chan) /* Timer Rx */ +#define TTX_REG(reg, chan) (reg + 0x10*(2*chan + 1)) /* Timer Tx */ +#define ST_REG(reg, chan) (reg + 0x80*chan) /* Status Cnt */ +#define IR0_DRX(val, chan) ((val)<<(8*(chan))) /* Int DMA Rx */ +#define IR0_DTX(val, chan) ((val)<<(4*(2*chan + 1))) /* Int DMA Tx */ +#define IR0_M(val, chan) ((val)<<(8*(chan))) /* Int MSCI */ + +/* MSCI Channel Registers */ +#define MSCI0_OFFSET 0x00 +#define MSCI1_OFFSET 0x80 + +#define MD0 0x138 /* Mode reg 0 */ +#define MD1 0x139 /* Mode reg 1 */ +#define MD2 0x13a /* Mode reg 2 */ +#define MD3 0x13b /* Mode reg 3 */ +#define CTL 0x130 /* Control reg */ +#define RXS 0x13c /* RX clock source */ +#define TXS 0x13d /* TX clock source */ +#define EXS 0x13e /* External clock input selection */ +#define TMCT 0x144 /* Time constant (Tx) */ +#define TMCR 0x145 /* Time constant (Rx) */ +#define CMD 0x128 /* Command reg */ +#define ST0 0x118 /* Status reg 0 */ +#define ST1 0x119 /* Status reg 1 */ +#define ST2 0x11a /* Status reg 2 */ +#define ST3 0x11b /* Status reg 3 */ +#define ST4 0x11c /* Status reg 4 */ +#define FST 0x11d /* frame Status reg */ +#define IE0 0x120 /* Interrupt enable reg 0 */ +#define IE1 0x121 /* Interrupt enable reg 1 */ +#define IE2 0x122 /* Interrupt enable reg 2 */ +#define IE4 0x124 /* Interrupt enable reg 4 */ +#define FIE 0x125 /* Frame Interrupt enable reg */ +#define SA0 0x140 /* Syn Address reg 0 */ +#define SA1 0x141 /* Syn Address reg 1 */ +#define IDL 0x142 /* Idle register */ +#define TRBL 0x100 /* TX/RX buffer reg L */ +#define TRBK 0x101 /* TX/RX buffer reg K */ +#define TRBJ 0x102 /* TX/RX buffer reg J */ +#define TRBH 0x103 /* TX/RX buffer reg H */ +#define TRC0 0x148 /* TX Ready control reg 0 */ +#define TRC1 0x149 /* TX Ready control reg 1 */ +#define RRC 0x14a /* RX Ready control reg */ +#define CST0 0x108 /* Current Status Register 0 */ +#define CST1 0x109 /* Current Status Register 1 */ +#define CST2 0x10a /* Current Status Register 2 */ +#define CST3 0x10b /* Current Status Register 3 */ +#define GPO 0x131 /* General Purpose Output Pin Ctl Reg */ +#define TFS 0x14b /* Tx Start Threshold Ctl Reg */ +#define TFN 0x143 /* Inter-transmit-frame Time Fill Ctl Reg */ +#define TBN 0x110 /* Tx Buffer Number Reg */ +#define RBN 0x111 /* Rx Buffer Number Reg */ +#define TNR0 0x150 /* Tx DMA Request Ctl Reg 0 */ +#define TNR1 0x151 /* Tx DMA Request Ctl Reg 1 */ +#define TCR 0x152 /* Tx DMA Critical Request Reg */ +#define RNR 0x154 /* Rx DMA Request Ctl Reg */ +#define RCR 0x156 /* Rx DMA Critical Request Reg */ + +/* Timer Registers */ +#define TIMER0RX_OFFSET 0x00 +#define TIMER0TX_OFFSET 0x10 +#define TIMER1RX_OFFSET 0x20 +#define TIMER1TX_OFFSET 0x30 + +#define TCNTL 0x200 /* Timer Upcounter L */ +#define TCNTH 0x201 /* Timer Upcounter H */ +#define TCONRL 0x204 /* Timer Constant Register L */ +#define TCONRH 0x205 /* Timer Constant Register H */ +#define TCSR 0x206 /* Timer Control/Status Register */ +#define TEPR 0x207 /* Timer Expand Prescale Register */ + +/* DMA registers */ +#define PCR 0x40 /* DMA priority control reg */ +#define DRR 0x44 /* DMA reset reg */ +#define DMER 0x07 /* DMA Master Enable reg */ +#define BTCR 0x08 /* Burst Tx Ctl Reg */ +#define BOLR 0x0c /* Back-off Length Reg */ +#define DSR_RX(chan) (0x48 + 2*chan) /* DMA Status Reg (Rx) */ +#define DSR_TX(chan) (0x49 + 2*chan) /* DMA Status Reg (Tx) */ +#define DIR_RX(chan) (0x4c + 2*chan) /* DMA Interrupt Enable Reg (Rx) */ +#define DIR_TX(chan) (0x4d + 2*chan) /* DMA Interrupt Enable Reg (Tx) */ +#define FCT_RX(chan) (0x50 + 2*chan) /* Frame End Interrupt Counter (Rx) */ +#define FCT_TX(chan) (0x51 + 2*chan) /* Frame End Interrupt Counter (Tx) */ +#define DMR_RX(chan) (0x54 + 2*chan) /* DMA Mode Reg (Rx) */ +#define DMR_TX(chan) (0x55 + 2*chan) /* DMA Mode Reg (Tx) */ +#define DCR_RX(chan) (0x58 + 2*chan) /* DMA Command Reg (Rx) */ +#define DCR_TX(chan) (0x59 + 2*chan) /* DMA Command Reg (Tx) */ + +/* DMA Channel Registers */ +#define DMAC0RX_OFFSET 0x00 +#define DMAC0TX_OFFSET 0x20 +#define DMAC1RX_OFFSET 0x40 +#define DMAC1TX_OFFSET 0x60 + +#define DARL 0x80 /* Dest Addr Register L (single-block, RX only) */ +#define DARH 0x81 /* Dest Addr Register H (single-block, RX only) */ +#define DARB 0x82 /* Dest Addr Register B (single-block, RX only) */ +#define DARBH 0x83 /* Dest Addr Register BH (single-block, RX only) */ +#define SARL 0x80 /* Source Addr Register L (single-block, TX only) */ +#define SARH 0x81 /* Source Addr Register H (single-block, TX only) */ +#define SARB 0x82 /* Source Addr Register B (single-block, TX only) */ +#define DARBH 0x83 /* Source Addr Register BH (single-block, TX only) */ +#define BARL 0x80 /* Buffer Addr Register L (chained-block) */ +#define BARH 0x81 /* Buffer Addr Register H (chained-block) */ +#define BARB 0x82 /* Buffer Addr Register B (chained-block) */ +#define BARBH 0x83 /* Buffer Addr Register BH (chained-block) */ +#define CDAL 0x84 /* Current Descriptor Addr Register L */ +#define CDAH 0x85 /* Current Descriptor Addr Register H */ +#define CDAB 0x86 /* Current Descriptor Addr Register B */ +#define CDABH 0x87 /* Current Descriptor Addr Register BH */ +#define EDAL 0x88 /* Error Descriptor Addr Register L */ +#define EDAH 0x89 /* Error Descriptor Addr Register H */ +#define EDAB 0x8a /* Error Descriptor Addr Register B */ +#define EDABH 0x8b /* Error Descriptor Addr Register BH */ +#define BFLL 0x90 /* RX Buffer Length L (only RX) */ +#define BFLH 0x91 /* RX Buffer Length H (only RX) */ +#define BCRL 0x8c /* Byte Count Register L */ +#define BCRH 0x8d /* Byte Count Register H */ + +/* Block Descriptor Structure */ +typedef struct { + unsigned long next; /* pointer to next block descriptor */ + unsigned long ptbuf; /* buffer pointer */ + unsigned short len; /* data length */ + unsigned char status; /* status */ + unsigned char filler[5]; /* alignment filler (16 bytes) */ +} pcsca_bd_t; + +/* Block Descriptor Structure */ +typedef struct { + u32 cp; /* pointer to next block descriptor */ + u32 bp; /* buffer pointer */ + u16 len; /* data length */ + u8 stat; /* status */ + u8 unused; /* pads to 4-byte boundary */ +}pkt_desc; + + +/* + Descriptor Status definitions: + + Bit Transmission Reception + + 7 EOM EOM + 6 - Short Frame + 5 - Abort + 4 - Residual bit + 3 Underrun Overrun + 2 - CRC + 1 Ownership Ownership + 0 EOT - +*/ +#define DST_EOT 0x01 /* End of transmit command */ +#define DST_OSB 0x02 /* Ownership bit */ +#define DST_CRC 0x04 /* CRC Error */ +#define DST_OVR 0x08 /* Overrun */ +#define DST_UDR 0x08 /* Underrun */ +#define DST_RBIT 0x10 /* Residual bit */ +#define DST_ABT 0x20 /* Abort */ +#define DST_SHRT 0x40 /* Short Frame */ +#define DST_EOM 0x80 /* End of Message */ + +/* Packet Descriptor Status bits */ + +#define ST_TX_EOM 0x80 /* End of frame */ +#define ST_TX_UNDRRUN 0x08 +#define ST_TX_OWNRSHP 0x02 +#define ST_TX_EOT 0x01 /* End of transmission */ + +#define ST_RX_EOM 0x80 /* End of frame */ +#define ST_RX_SHORT 0x40 /* Short frame */ +#define ST_RX_ABORT 0x20 /* Abort */ +#define ST_RX_RESBIT 0x10 /* Residual bit */ +#define ST_RX_OVERRUN 0x08 /* Overrun */ +#define ST_RX_CRC 0x04 /* CRC */ +#define ST_RX_OWNRSHP 0x02 + +#define ST_ERROR_MASK 0x7C + +/* Status Counter Registers */ +#define CMCR 0x158 /* Counter Master Ctl Reg */ +#define TECNTL 0x160 /* Tx EOM Counter L */ +#define TECNTM 0x161 /* Tx EOM Counter M */ +#define TECNTH 0x162 /* Tx EOM Counter H */ +#define TECCR 0x163 /* Tx EOM Counter Ctl Reg */ +#define URCNTL 0x164 /* Underrun Counter L */ +#define URCNTH 0x165 /* Underrun Counter H */ +#define URCCR 0x167 /* Underrun Counter Ctl Reg */ +#define RECNTL 0x168 /* Rx EOM Counter L */ +#define RECNTM 0x169 /* Rx EOM Counter M */ +#define RECNTH 0x16a /* Rx EOM Counter H */ +#define RECCR 0x16b /* Rx EOM Counter Ctl Reg */ +#define ORCNTL 0x16c /* Overrun Counter L */ +#define ORCNTH 0x16d /* Overrun Counter H */ +#define ORCCR 0x16f /* Overrun Counter Ctl Reg */ +#define CECNTL 0x170 /* CRC Counter L */ +#define CECNTH 0x171 /* CRC Counter H */ +#define CECCR 0x173 /* CRC Counter Ctl Reg */ +#define ABCNTL 0x174 /* Abort frame Counter L */ +#define ABCNTH 0x175 /* Abort frame Counter H */ +#define ABCCR 0x177 /* Abort frame Counter Ctl Reg */ +#define SHCNTL 0x178 /* Short frame Counter L */ +#define SHCNTH 0x179 /* Short frame Counter H */ +#define SHCCR 0x17b /* Short frame Counter Ctl Reg */ +#define RSCNTL 0x17c /* Residual bit Counter L */ +#define RSCNTH 0x17d /* Residual bit Counter H */ +#define RSCCR 0x17f /* Residual bit Counter Ctl Reg */ + +/* Register Programming Constants */ + +#define IR0_DMIC 0x00000001 +#define IR0_DMIB 0x00000002 +#define IR0_DMIA 0x00000004 +#define IR0_EFT 0x00000008 +#define IR0_DMAREQ 0x00010000 +#define IR0_TXINT 0x00020000 +#define IR0_RXINTB 0x00040000 +#define IR0_RXINTA 0x00080000 +#define IR0_TXRDY 0x00100000 +#define IR0_RXRDY 0x00200000 + +#define MD0_CRC16_0 0x00 +#define MD0_CRC16_1 0x01 +#define MD0_CRC32 0x02 +#define MD0_CRC_CCITT 0x03 +#define MD0_CRCC0 0x04 +#define MD0_CRCC1 0x08 +#define MD0_AUTO_ENA 0x10 +#define MD0_ASYNC 0x00 +#define MD0_BY_MSYNC 0x20 +#define MD0_BY_BISYNC 0x40 +#define MD0_BY_EXT 0x60 +#define MD0_BIT_SYNC 0x80 +#define MD0_TRANSP 0xc0 + +#define MD0_HDLC 0x80 /* Bit-sync HDLC mode */ + +#define MD0_CRC_NONE 0x00 +#define MD0_CRC_16_0 0x04 +#define MD0_CRC_16 0x05 +#define MD0_CRC_ITU32 0x06 +#define MD0_CRC_ITU 0x07 + +#define MD1_NOADDR 0x00 +#define MD1_SADDR1 0x40 +#define MD1_SADDR2 0x80 +#define MD1_DADDR 0xc0 + +#define MD2_NRZI_IEEE 0x40 +#define MD2_MANCHESTER 0x80 +#define MD2_FM_MARK 0xA0 +#define MD2_FM_SPACE 0xC0 +#define MD2_LOOPBACK 0x03 /* Local data Loopback */ + +#define MD2_F_DUPLEX 0x00 +#define MD2_AUTO_ECHO 0x01 +#define MD2_LOOP_HI_Z 0x02 +#define MD2_LOOP_MIR 0x03 +#define MD2_ADPLL_X8 0x00 +#define MD2_ADPLL_X16 0x08 +#define MD2_ADPLL_X32 0x10 +#define MD2_NRZ 0x00 +#define MD2_NRZI 0x20 +#define MD2_NRZ_IEEE 0x40 +#define MD2_MANCH 0x00 +#define MD2_FM1 0x20 +#define MD2_FM0 0x40 +#define MD2_FM 0x80 + +#define CTL_RTS 0x01 +#define CTL_DTR 0x02 +#define CTL_SYN 0x04 +#define CTL_IDLC 0x10 +#define CTL_UDRNC 0x20 +#define CTL_URSKP 0x40 +#define CTL_URCT 0x80 + +#define CTL_NORTS 0x01 +#define CTL_NODTR 0x02 +#define CTL_IDLE 0x10 + +#define RXS_BR0 0x01 +#define RXS_BR1 0x02 +#define RXS_BR2 0x04 +#define RXS_BR3 0x08 +#define RXS_ECLK 0x00 +#define RXS_ECLK_NS 0x20 +#define RXS_IBRG 0x40 +#define RXS_PLL1 0x50 +#define RXS_PLL2 0x60 +#define RXS_PLL3 0x70 +#define RXS_DRTXC 0x80 + +#define TXS_BR0 0x01 +#define TXS_BR1 0x02 +#define TXS_BR2 0x04 +#define TXS_BR3 0x08 +#define TXS_ECLK 0x00 +#define TXS_IBRG 0x40 +#define TXS_RCLK 0x60 +#define TXS_DTRXC 0x80 + +#define EXS_RES0 0x01 +#define EXS_RES1 0x02 +#define EXS_RES2 0x04 +#define EXS_TES0 0x10 +#define EXS_TES1 0x20 +#define EXS_TES2 0x40 + +#define CLK_BRG_MASK 0x0F +#define CLK_PIN_OUT 0x80 +#define CLK_LINE 0x00 /* clock line input */ +#define CLK_BRG 0x40 /* internal baud rate generator */ +#define CLK_TX_RXCLK 0x60 /* TX clock from RX clock */ + +#define CMD_RX_RST 0x11 +#define CMD_RX_ENA 0x12 +#define CMD_RX_DIS 0x13 +#define CMD_RX_CRC_INIT 0x14 +#define CMD_RX_MSG_REJ 0x15 +#define CMD_RX_MP_SRCH 0x16 +#define CMD_RX_CRC_EXC 0x17 +#define CMD_RX_CRC_FRC 0x18 +#define CMD_TX_RST 0x01 +#define CMD_TX_ENA 0x02 +#define CMD_TX_DISA 0x03 +#define CMD_TX_CRC_INIT 0x04 +#define CMD_TX_CRC_EXC 0x05 +#define CMD_TX_EOM 0x06 +#define CMD_TX_ABORT 0x07 +#define CMD_TX_MP_ON 0x08 +#define CMD_TX_BUF_CLR 0x09 +#define CMD_TX_DISB 0x0b +#define CMD_CH_RST 0x21 +#define CMD_SRCH_MODE 0x31 +#define CMD_NOP 0x00 + +#define CMD_RESET 0x21 +#define CMD_TX_ENABLE 0x02 +#define CMD_RX_ENABLE 0x12 + +#define ST0_RXRDY 0x01 +#define ST0_TXRDY 0x02 +#define ST0_RXINTB 0x20 +#define ST0_RXINTA 0x40 +#define ST0_TXINT 0x80 + +#define ST1_IDLE 0x01 +#define ST1_ABORT 0x02 +#define ST1_CDCD 0x04 +#define ST1_CCTS 0x08 +#define ST1_SYN_FLAG 0x10 +#define ST1_CLMD 0x20 +#define ST1_TXIDLE 0x40 +#define ST1_UDRN 0x80 + +#define ST2_CRCE 0x04 +#define ST2_ONRN 0x08 +#define ST2_RBIT 0x10 +#define ST2_ABORT 0x20 +#define ST2_SHORT 0x40 +#define ST2_EOM 0x80 + +#define ST3_RX_ENA 0x01 +#define ST3_TX_ENA 0x02 +#define ST3_DCD 0x04 +#define ST3_CTS 0x08 +#define ST3_SRCH_MODE 0x10 +#define ST3_SLOOP 0x20 +#define ST3_GPI 0x80 + +#define ST4_RDNR 0x01 +#define ST4_RDCR 0x02 +#define ST4_TDNR 0x04 +#define ST4_TDCR 0x08 +#define ST4_OCLM 0x20 +#define ST4_CFT 0x40 +#define ST4_CGPI 0x80 + +#define FST_CRCEF 0x04 +#define FST_OVRNF 0x08 +#define FST_RBIF 0x10 +#define FST_ABTF 0x20 +#define FST_SHRTF 0x40 +#define FST_EOMF 0x80 + +#define IE0_RXRDY 0x01 +#define IE0_TXRDY 0x02 +#define IE0_RXINTB 0x20 +#define IE0_RXINTA 0x40 +#define IE0_TXINT 0x80 +#define IE0_UDRN 0x00008000 /* TX underrun MSCI interrupt enable */ +#define IE0_CDCD 0x00000400 /* CD level change interrupt enable */ + +#define IE1_IDLD 0x01 +#define IE1_ABTD 0x02 +#define IE1_CDCD 0x04 +#define IE1_CCTS 0x08 +#define IE1_SYNCD 0x10 +#define IE1_CLMD 0x20 +#define IE1_IDL 0x40 +#define IE1_UDRN 0x80 + +#define IE2_CRCE 0x04 +#define IE2_OVRN 0x08 +#define IE2_RBIT 0x10 +#define IE2_ABT 0x20 +#define IE2_SHRT 0x40 +#define IE2_EOM 0x80 + +#define IE4_RDNR 0x01 +#define IE4_RDCR 0x02 +#define IE4_TDNR 0x04 +#define IE4_TDCR 0x08 +#define IE4_OCLM 0x20 +#define IE4_CFT 0x40 +#define IE4_CGPI 0x80 + +#define FIE_CRCEF 0x04 +#define FIE_OVRNF 0x08 +#define FIE_RBIF 0x10 +#define FIE_ABTF 0x20 +#define FIE_SHRTF 0x40 +#define FIE_EOMF 0x80 + +#define DSR_DWE 0x01 +#define DSR_DE 0x02 +#define DSR_REF 0x04 +#define DSR_UDRF 0x04 +#define DSR_COA 0x08 +#define DSR_COF 0x10 +#define DSR_BOF 0x20 +#define DSR_EOM 0x40 +#define DSR_EOT 0x80 + +#define DIR_REF 0x04 +#define DIR_UDRF 0x04 +#define DIR_COA 0x08 +#define DIR_COF 0x10 +#define DIR_BOF 0x20 +#define DIR_EOM 0x40 +#define DIR_EOT 0x80 + +#define DIR_REFE 0x04 +#define DIR_UDRFE 0x04 +#define DIR_COAE 0x08 +#define DIR_COFE 0x10 +#define DIR_BOFE 0x20 +#define DIR_EOME 0x40 +#define DIR_EOTE 0x80 + +#define DMR_CNTE 0x02 +#define DMR_NF 0x04 +#define DMR_SEOME 0x08 +#define DMR_TMOD 0x10 + +#define DMER_DME 0x80 /* DMA Master Enable */ + +#define DCR_SW_ABT 0x01 +#define DCR_FCT_CLR 0x02 + +#define DCR_ABORT 0x01 +#define DCR_CLEAR_EOF 0x02 + +#define PCR_COTE 0x80 +#define PCR_PR0 0x01 +#define PCR_PR1 0x02 +#define PCR_PR2 0x04 +#define PCR_CCC 0x08 +#define PCR_BRC 0x10 +#define PCR_OSB 0x40 +#define PCR_BURST 0x80 + +#endif /* (__HD64572_H) */ diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c new file mode 100644 index 000000000..cbed10b1d --- /dev/null +++ b/drivers/net/wan/hdlc.c @@ -0,0 +1,379 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic HDLC support routines for Linux + * + * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl> + * + * Currently supported: + * * raw IP-in-HDLC + * * Cisco HDLC + * * Frame Relay with ANSI or CCITT LMI (both user and network side) + * * PPP + * * X.25 + * + * Use sethdlc utility to set line parameters, protocol and PVCs + * + * How does it work: + * - proto->open(), close(), start(), stop() calls are serialized. + * The order is: open, [ start, stop ... ] close ... + * - proto->start() and stop() are called with spin_lock_irq held. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/errno.h> +#include <linux/hdlc.h> +#include <linux/if_arp.h> +#include <linux/inetdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/pkt_sched.h> +#include <linux/poll.h> +#include <linux/rtnetlink.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <net/net_namespace.h> + +static const char *version = "HDLC support module revision 1.22"; + +#undef DEBUG_LINK + +static struct hdlc_proto *first_proto; + +static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *p, struct net_device *orig_dev) +{ + struct hdlc_device *hdlc; + + /* First make sure "dev" is an HDLC device */ + if (!(dev->priv_flags & IFF_WAN_HDLC)) { + kfree_skb(skb); + return NET_RX_SUCCESS; + } + + hdlc = dev_to_hdlc(dev); + + if (!net_eq(dev_net(dev), &init_net)) { + kfree_skb(skb); + return 0; + } + + BUG_ON(!hdlc->proto->netif_rx); + return hdlc->proto->netif_rx(skb); +} + +netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + + if (hdlc->proto->xmit) + return hdlc->proto->xmit(skb, dev); + + return hdlc->xmit(skb, dev); /* call hardware driver directly */ +} +EXPORT_SYMBOL(hdlc_start_xmit); + +static inline void hdlc_proto_start(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + + if (hdlc->proto->start) + hdlc->proto->start(dev); +} + +static inline void hdlc_proto_stop(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + + if (hdlc->proto->stop) + hdlc->proto->stop(dev); +} + +static int hdlc_device_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + hdlc_device *hdlc; + unsigned long flags; + int on; + + if (!net_eq(dev_net(dev), &init_net)) + return NOTIFY_DONE; + + if (!(dev->priv_flags & IFF_WAN_HDLC)) + return NOTIFY_DONE; /* not an HDLC device */ + + if (event != NETDEV_CHANGE) + return NOTIFY_DONE; /* Only interested in carrier changes */ + + on = netif_carrier_ok(dev); + +#ifdef DEBUG_LINK + printk(KERN_DEBUG "%s: hdlc_device_event NETDEV_CHANGE, carrier %i\n", + dev->name, on); +#endif + + hdlc = dev_to_hdlc(dev); + spin_lock_irqsave(&hdlc->state_lock, flags); + + if (hdlc->carrier == on) + goto carrier_exit; /* no change in DCD line level */ + + hdlc->carrier = on; + + if (!hdlc->open) + goto carrier_exit; + + if (hdlc->carrier) { + netdev_info(dev, "Carrier detected\n"); + hdlc_proto_start(dev); + } else { + netdev_info(dev, "Carrier lost\n"); + hdlc_proto_stop(dev); + } + +carrier_exit: + spin_unlock_irqrestore(&hdlc->state_lock, flags); + return NOTIFY_DONE; +} + +/* Must be called by hardware driver when HDLC device is being opened */ +int hdlc_open(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); +#ifdef DEBUG_LINK + printk(KERN_DEBUG "%s: hdlc_open() carrier %i open %i\n", dev->name, + hdlc->carrier, hdlc->open); +#endif + + if (!hdlc->proto) + return -ENOSYS; /* no protocol attached */ + + if (hdlc->proto->open) { + int result = hdlc->proto->open(dev); + + if (result) + return result; + } + + spin_lock_irq(&hdlc->state_lock); + + if (hdlc->carrier) { + netdev_info(dev, "Carrier detected\n"); + hdlc_proto_start(dev); + } else { + netdev_info(dev, "No carrier\n"); + } + + hdlc->open = 1; + + spin_unlock_irq(&hdlc->state_lock); + return 0; +} +EXPORT_SYMBOL(hdlc_open); + +/* Must be called by hardware driver when HDLC device is being closed */ +void hdlc_close(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); +#ifdef DEBUG_LINK + printk(KERN_DEBUG "%s: hdlc_close() carrier %i open %i\n", dev->name, + hdlc->carrier, hdlc->open); +#endif + + spin_lock_irq(&hdlc->state_lock); + + hdlc->open = 0; + if (hdlc->carrier) + hdlc_proto_stop(dev); + + spin_unlock_irq(&hdlc->state_lock); + + if (hdlc->proto->close) + hdlc->proto->close(dev); +} +EXPORT_SYMBOL(hdlc_close); + +int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + struct hdlc_proto *proto = first_proto; + int result; + + if (dev_to_hdlc(dev)->proto) { + result = dev_to_hdlc(dev)->proto->ioctl(dev, ifs); + if (result != -EINVAL) + return result; + } + + /* Not handled by currently attached protocol (if any) */ + + while (proto) { + result = proto->ioctl(dev, ifs); + if (result != -EINVAL) + return result; + proto = proto->next; + } + return -EINVAL; +} +EXPORT_SYMBOL(hdlc_ioctl); + +static const struct header_ops hdlc_null_ops; + +static void hdlc_setup_dev(struct net_device *dev) +{ + /* Re-init all variables changed by HDLC protocol drivers, + * including ether_setup() called from hdlc_raw_eth.c. + */ + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->priv_flags = IFF_WAN_HDLC; + dev->mtu = HDLC_MAX_MTU; + dev->min_mtu = 68; + dev->max_mtu = HDLC_MAX_MTU; + dev->type = ARPHRD_RAWHDLC; + dev->hard_header_len = 0; + dev->needed_headroom = 0; + dev->addr_len = 0; + dev->header_ops = &hdlc_null_ops; +} + +static void hdlc_setup(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + + hdlc_setup_dev(dev); + hdlc->carrier = 1; + hdlc->open = 0; + spin_lock_init(&hdlc->state_lock); +} + +struct net_device *alloc_hdlcdev(void *priv) +{ + struct net_device *dev; + + dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d", + NET_NAME_UNKNOWN, hdlc_setup); + if (dev) + dev_to_hdlc(dev)->priv = priv; + return dev; +} +EXPORT_SYMBOL(alloc_hdlcdev); + +void unregister_hdlc_device(struct net_device *dev) +{ + rtnl_lock(); + detach_hdlc_protocol(dev); + unregister_netdevice(dev); + rtnl_unlock(); +} +EXPORT_SYMBOL(unregister_hdlc_device); + +int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, + size_t size) +{ + int err; + + err = detach_hdlc_protocol(dev); + if (err) + return err; + + if (!try_module_get(proto->module)) + return -ENOSYS; + + if (size) { + dev_to_hdlc(dev)->state = kmalloc(size, GFP_KERNEL); + if (!dev_to_hdlc(dev)->state) { + module_put(proto->module); + return -ENOBUFS; + } + } + dev_to_hdlc(dev)->proto = proto; + + return 0; +} +EXPORT_SYMBOL(attach_hdlc_protocol); + +int detach_hdlc_protocol(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + int err; + + if (hdlc->proto) { + err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev); + err = notifier_to_errno(err); + if (err) { + netdev_err(dev, "Refused to change device type\n"); + return err; + } + + if (hdlc->proto->detach) + hdlc->proto->detach(dev); + module_put(hdlc->proto->module); + hdlc->proto = NULL; + } + kfree(hdlc->state); + hdlc->state = NULL; + hdlc_setup_dev(dev); + + return 0; +} +EXPORT_SYMBOL(detach_hdlc_protocol); + +void register_hdlc_protocol(struct hdlc_proto *proto) +{ + rtnl_lock(); + proto->next = first_proto; + first_proto = proto; + rtnl_unlock(); +} +EXPORT_SYMBOL(register_hdlc_protocol); + +void unregister_hdlc_protocol(struct hdlc_proto *proto) +{ + struct hdlc_proto **p; + + rtnl_lock(); + p = &first_proto; + while (*p != proto) { + BUG_ON(!*p); + p = &((*p)->next); + } + *p = proto->next; + rtnl_unlock(); +} +EXPORT_SYMBOL(unregister_hdlc_protocol); + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("HDLC support module"); +MODULE_LICENSE("GPL v2"); + +static struct packet_type hdlc_packet_type __read_mostly = { + .type = cpu_to_be16(ETH_P_HDLC), + .func = hdlc_rcv, +}; + +static struct notifier_block hdlc_notifier = { + .notifier_call = hdlc_device_event, +}; + +static int __init hdlc_module_init(void) +{ + int result; + + pr_info("%s\n", version); + result = register_netdevice_notifier(&hdlc_notifier); + if (result) + return result; + dev_add_pack(&hdlc_packet_type); + return 0; +} + +static void __exit hdlc_module_exit(void) +{ + dev_remove_pack(&hdlc_packet_type); + unregister_netdevice_notifier(&hdlc_notifier); +} + +module_init(hdlc_module_init); +module_exit(hdlc_module_exit); diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c new file mode 100644 index 000000000..cdebe65a7 --- /dev/null +++ b/drivers/net/wan/hdlc_cisco.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic HDLC support routines for Linux + * Cisco HDLC support + * + * Copyright (C) 2000 - 2006 Krzysztof Halasa <khc@pm.waw.pl> + */ + +#include <linux/errno.h> +#include <linux/hdlc.h> +#include <linux/if_arp.h> +#include <linux/inetdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pkt_sched.h> +#include <linux/poll.h> +#include <linux/rtnetlink.h> +#include <linux/skbuff.h> + +#undef DEBUG_HARD_HEADER + +#define CISCO_MULTICAST 0x8F /* Cisco multicast address */ +#define CISCO_UNICAST 0x0F /* Cisco unicast address */ +#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */ +#define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */ +#define CISCO_ADDR_REQ 0 /* Cisco address request */ +#define CISCO_ADDR_REPLY 1 /* Cisco address reply */ +#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */ + +struct hdlc_header { + u8 address; + u8 control; + __be16 protocol; +} __packed; + +struct cisco_packet { + __be32 type; /* code */ + __be32 par1; + __be32 par2; + __be16 rel; /* reliability */ + __be32 time; +} __packed; +#define CISCO_PACKET_LEN 18 +#define CISCO_BIG_PACKET_LEN 20 + +struct cisco_state { + cisco_proto settings; + + struct timer_list timer; + struct net_device *dev; + spinlock_t lock; + unsigned long last_poll; + int up; + u32 txseq; /* TX sequence number, 0 = none */ + u32 rxseq; /* RX sequence number */ +}; + +static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs); + +static inline struct cisco_state *state(hdlc_device *hdlc) +{ + return (struct cisco_state *)hdlc->state; +} + +static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev, + u16 type, const void *daddr, const void *saddr, + unsigned int len) +{ + struct hdlc_header *data; +#ifdef DEBUG_HARD_HEADER + netdev_dbg(dev, "%s called\n", __func__); +#endif + + skb_push(skb, sizeof(struct hdlc_header)); + data = (struct hdlc_header *)skb->data; + if (type == CISCO_KEEPALIVE) + data->address = CISCO_MULTICAST; + else + data->address = CISCO_UNICAST; + data->control = 0; + data->protocol = htons(type); + + return sizeof(struct hdlc_header); +} + +static void cisco_keepalive_send(struct net_device *dev, u32 type, + __be32 par1, __be32 par2) +{ + struct sk_buff *skb; + struct cisco_packet *data; + + skb = dev_alloc_skb(sizeof(struct hdlc_header) + + sizeof(struct cisco_packet)); + if (!skb) + return; + + skb_reserve(skb, 4); + cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0); + data = (struct cisco_packet *)(skb->data + 4); + + data->type = htonl(type); + data->par1 = par1; + data->par2 = par2; + data->rel = cpu_to_be16(0xFFFF); + /* we will need do_div here if 1000 % HZ != 0 */ + data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ)); + + skb_put(skb, sizeof(struct cisco_packet)); + skb->priority = TC_PRIO_CONTROL; + skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); + + dev_queue_xmit(skb); +} + +static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev) +{ + struct hdlc_header *data = (struct hdlc_header *)skb->data; + + if (skb->len < sizeof(struct hdlc_header)) + return cpu_to_be16(ETH_P_HDLC); + + if (data->address != CISCO_MULTICAST && + data->address != CISCO_UNICAST) + return cpu_to_be16(ETH_P_HDLC); + + switch (data->protocol) { + case cpu_to_be16(ETH_P_IP): + case cpu_to_be16(ETH_P_IPX): + case cpu_to_be16(ETH_P_IPV6): + skb_pull(skb, sizeof(struct hdlc_header)); + return data->protocol; + default: + return cpu_to_be16(ETH_P_HDLC); + } +} + +static int cisco_rx(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct cisco_state *st = state(hdlc); + struct hdlc_header *data = (struct hdlc_header *)skb->data; + struct cisco_packet *cisco_data; + struct in_device *in_dev; + __be32 addr, mask; + u32 ack; + + if (skb->len < sizeof(struct hdlc_header)) + goto rx_error; + + if (data->address != CISCO_MULTICAST && + data->address != CISCO_UNICAST) + goto rx_error; + + switch (ntohs(data->protocol)) { + case CISCO_SYS_INFO: + /* Packet is not needed, drop it. */ + dev_kfree_skb_any(skb); + return NET_RX_SUCCESS; + + case CISCO_KEEPALIVE: + if ((skb->len != sizeof(struct hdlc_header) + + CISCO_PACKET_LEN) && + (skb->len != sizeof(struct hdlc_header) + + CISCO_BIG_PACKET_LEN)) { + netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n", + skb->len); + goto rx_error; + } + + cisco_data = (struct cisco_packet *)(skb->data + sizeof + (struct hdlc_header)); + + switch (ntohl(cisco_data->type)) { + case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ + rcu_read_lock(); + in_dev = __in_dev_get_rcu(dev); + addr = 0; + mask = ~cpu_to_be32(0); /* is the mask correct? */ + + if (in_dev != NULL) { + const struct in_ifaddr *ifa; + + in_dev_for_each_ifa_rcu(ifa, in_dev) { + if (strcmp(dev->name, + ifa->ifa_label) == 0) { + addr = ifa->ifa_local; + mask = ifa->ifa_mask; + break; + } + } + + cisco_keepalive_send(dev, CISCO_ADDR_REPLY, + addr, mask); + } + rcu_read_unlock(); + dev_kfree_skb_any(skb); + return NET_RX_SUCCESS; + + case CISCO_ADDR_REPLY: + netdev_info(dev, "Unexpected Cisco IP address reply\n"); + goto rx_error; + + case CISCO_KEEPALIVE_REQ: + spin_lock(&st->lock); + st->rxseq = ntohl(cisco_data->par1); + ack = ntohl(cisco_data->par2); + if (ack && (ack == st->txseq || + /* our current REQ may be in transit */ + ack == st->txseq - 1)) { + st->last_poll = jiffies; + if (!st->up) { + u32 sec, min, hrs, days; + + sec = ntohl(cisco_data->time) / 1000; + min = sec / 60; sec -= min * 60; + hrs = min / 60; min -= hrs * 60; + days = hrs / 24; hrs -= days * 24; + netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n", + days, hrs, min, sec); + netif_dormant_off(dev); + st->up = 1; + } + } + spin_unlock(&st->lock); + + dev_kfree_skb_any(skb); + return NET_RX_SUCCESS; + } /* switch (keepalive type) */ + } /* switch (protocol) */ + + netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol)); + dev_kfree_skb_any(skb); + return NET_RX_DROP; + +rx_error: + dev->stats.rx_errors++; /* Mark error */ + dev_kfree_skb_any(skb); + return NET_RX_DROP; +} + +static void cisco_timer(struct timer_list *t) +{ + struct cisco_state *st = from_timer(st, t, timer); + struct net_device *dev = st->dev; + + spin_lock(&st->lock); + if (st->up && + time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) { + st->up = 0; + netdev_info(dev, "Link down\n"); + netif_dormant_on(dev); + } + + cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), + htonl(st->rxseq)); + spin_unlock(&st->lock); + + st->timer.expires = jiffies + st->settings.interval * HZ; + add_timer(&st->timer); +} + +static void cisco_start(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct cisco_state *st = state(hdlc); + unsigned long flags; + + spin_lock_irqsave(&st->lock, flags); + st->up = st->txseq = st->rxseq = 0; + spin_unlock_irqrestore(&st->lock, flags); + + st->dev = dev; + timer_setup(&st->timer, cisco_timer, 0); + st->timer.expires = jiffies + HZ; /* First poll after 1 s */ + add_timer(&st->timer); +} + +static void cisco_stop(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct cisco_state *st = state(hdlc); + unsigned long flags; + + del_timer_sync(&st->timer); + + spin_lock_irqsave(&st->lock, flags); + netif_dormant_on(dev); + st->up = st->txseq = 0; + spin_unlock_irqrestore(&st->lock, flags); +} + +static struct hdlc_proto proto = { + .start = cisco_start, + .stop = cisco_stop, + .type_trans = cisco_type_trans, + .ioctl = cisco_ioctl, + .netif_rx = cisco_rx, + .module = THIS_MODULE, +}; + +static const struct header_ops cisco_header_ops = { + .create = cisco_hard_header, +}; + +static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + cisco_proto __user *cisco_s = ifs->ifs_ifsu.cisco; + const size_t size = sizeof(cisco_proto); + cisco_proto new_settings; + hdlc_device *hdlc = dev_to_hdlc(dev); + int result; + + switch (ifs->type) { + case IF_GET_PROTO: + if (dev_to_hdlc(dev)->proto != &proto) + return -EINVAL; + ifs->type = IF_PROTO_CISCO; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(cisco_s, &state(hdlc)->settings, size)) + return -EFAULT; + return 0; + + case IF_PROTO_CISCO: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (dev->flags & IFF_UP) + return -EBUSY; + + if (copy_from_user(&new_settings, cisco_s, size)) + return -EFAULT; + + if (new_settings.interval < 1 || + new_settings.timeout < 2) + return -EINVAL; + + result = hdlc->attach(dev, ENCODING_NRZ, + PARITY_CRC16_PR1_CCITT); + if (result) + return result; + + result = attach_hdlc_protocol(dev, &proto, + sizeof(struct cisco_state)); + if (result) + return result; + + memcpy(&state(hdlc)->settings, &new_settings, size); + spin_lock_init(&state(hdlc)->lock); + dev->header_ops = &cisco_header_ops; + dev->hard_header_len = sizeof(struct hdlc_header); + dev->type = ARPHRD_CISCO; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + netif_dormant_on(dev); + return 0; + } + + return -EINVAL; +} + +static int __init hdlc_cisco_init(void) +{ + register_hdlc_protocol(&proto); + return 0; +} + +static void __exit hdlc_cisco_exit(void) +{ + unregister_hdlc_protocol(&proto); +} + +module_init(hdlc_cisco_init); +module_exit(hdlc_cisco_exit); + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c new file mode 100644 index 000000000..81e72bc18 --- /dev/null +++ b/drivers/net/wan/hdlc_fr.c @@ -0,0 +1,1300 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic HDLC support routines for Linux + * Frame Relay support + * + * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> + * + + Theory of PVC state + + DCE mode: + + (exist,new) -> 0,0 when "PVC create" or if "link unreliable" + 0,x -> 1,1 if "link reliable" when sending FULL STATUS + 1,1 -> 1,0 if received FULL STATUS ACK + + (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create" + -> 1 when "PVC up" and (exist,new) = 1,0 + + DTE mode: + (exist,new,active) = FULL STATUS if "link reliable" + = 0, 0, 0 if "link unreliable" + No LMI: + active = open and "link reliable" + exist = new = not used + + CCITT LMI: ITU-T Q.933 Annex A + ANSI LMI: ANSI T1.617 Annex D + CISCO LMI: the original, aka "Gang of Four" LMI + +*/ + +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/hdlc.h> +#include <linux/if_arp.h> +#include <linux/inetdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pkt_sched.h> +#include <linux/poll.h> +#include <linux/rtnetlink.h> +#include <linux/skbuff.h> +#include <linux/slab.h> + +#undef DEBUG_PKT +#undef DEBUG_ECN +#undef DEBUG_LINK +#undef DEBUG_PROTO +#undef DEBUG_PVC + +#define FR_UI 0x03 +#define FR_PAD 0x00 + +#define NLPID_IP 0xCC +#define NLPID_IPV6 0x8E +#define NLPID_SNAP 0x80 +#define NLPID_PAD 0x00 +#define NLPID_CCITT_ANSI_LMI 0x08 +#define NLPID_CISCO_LMI 0x09 + +#define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */ +#define LMI_CISCO_DLCI 1023 + +#define LMI_CALLREF 0x00 /* Call Reference */ +#define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */ +#define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */ +#define LMI_CCITT_REPTYPE 0x51 +#define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */ +#define LMI_CCITT_ALIVE 0x53 +#define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */ +#define LMI_CCITT_PVCSTAT 0x57 + +#define LMI_FULLREP 0x00 /* full report */ +#define LMI_INTEGRITY 0x01 /* link integrity report */ +#define LMI_SINGLE 0x02 /* single PVC report */ + +#define LMI_STATUS_ENQUIRY 0x75 +#define LMI_STATUS 0x7D /* reply */ + +#define LMI_REPT_LEN 1 /* report type element length */ +#define LMI_INTEG_LEN 2 /* link integrity element length */ + +#define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */ +#define LMI_ANSI_LENGTH 14 + +struct fr_hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned ea1: 1; + unsigned cr: 1; + unsigned dlcih: 6; + + unsigned ea2: 1; + unsigned de: 1; + unsigned becn: 1; + unsigned fecn: 1; + unsigned dlcil: 4; +#else + unsigned dlcih: 6; + unsigned cr: 1; + unsigned ea1: 1; + + unsigned dlcil: 4; + unsigned fecn: 1; + unsigned becn: 1; + unsigned de: 1; + unsigned ea2: 1; +#endif +} __packed; + +struct pvc_device { + struct net_device *frad; + struct net_device *main; + struct net_device *ether; /* bridged Ethernet interface */ + struct pvc_device *next; /* Sorted in ascending DLCI order */ + int dlci; + int open_count; + + struct { + unsigned int new: 1; + unsigned int active: 1; + unsigned int exist: 1; + unsigned int deleted: 1; + unsigned int fecn: 1; + unsigned int becn: 1; + unsigned int bandwidth; /* Cisco LMI reporting only */ + } state; +}; + +struct frad_state { + fr_proto settings; + struct pvc_device *first_pvc; + int dce_pvc_count; + + struct timer_list timer; + struct net_device *dev; + unsigned long last_poll; + int reliable; + int dce_changed; + int request; + int fullrep_sent; + u32 last_errors; /* last errors bit list */ + u8 n391cnt; + u8 txseq; /* TX sequence number */ + u8 rxseq; /* RX sequence number */ +}; + +static int fr_ioctl(struct net_device *dev, struct if_settings *ifs); + +static inline u16 q922_to_dlci(u8 *hdr) +{ + return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4); +} + +static inline void dlci_to_q922(u8 *hdr, u16 dlci) +{ + hdr[0] = (dlci >> 2) & 0xFC; + hdr[1] = ((dlci << 4) & 0xF0) | 0x01; +} + +static inline struct frad_state *state(hdlc_device *hdlc) +{ + return (struct frad_state *)(hdlc->state); +} + +static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci) +{ + struct pvc_device *pvc = state(hdlc)->first_pvc; + + while (pvc) { + if (pvc->dlci == dlci) + return pvc; + if (pvc->dlci > dlci) + return NULL; /* the list is sorted */ + pvc = pvc->next; + } + + return NULL; +} + +static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc; + + while (*pvc_p) { + if ((*pvc_p)->dlci == dlci) + return *pvc_p; + if ((*pvc_p)->dlci > dlci) + break; /* the list is sorted */ + pvc_p = &(*pvc_p)->next; + } + + pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC); +#ifdef DEBUG_PVC + printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev); +#endif + if (!pvc) + return NULL; + + pvc->dlci = dlci; + pvc->frad = dev; + pvc->next = *pvc_p; /* Put it in the chain */ + *pvc_p = pvc; + return pvc; +} + +static inline int pvc_is_used(struct pvc_device *pvc) +{ + return pvc->main || pvc->ether; +} + +static inline void pvc_carrier(int on, struct pvc_device *pvc) +{ + if (on) { + if (pvc->main) + if (!netif_carrier_ok(pvc->main)) + netif_carrier_on(pvc->main); + if (pvc->ether) + if (!netif_carrier_ok(pvc->ether)) + netif_carrier_on(pvc->ether); + } else { + if (pvc->main) + if (netif_carrier_ok(pvc->main)) + netif_carrier_off(pvc->main); + if (pvc->ether) + if (netif_carrier_ok(pvc->ether)) + netif_carrier_off(pvc->ether); + } +} + +static inline void delete_unused_pvcs(hdlc_device *hdlc) +{ + struct pvc_device **pvc_p = &state(hdlc)->first_pvc; + + while (*pvc_p) { + if (!pvc_is_used(*pvc_p)) { + struct pvc_device *pvc = *pvc_p; +#ifdef DEBUG_PVC + printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc); +#endif + *pvc_p = pvc->next; + kfree(pvc); + continue; + } + pvc_p = &(*pvc_p)->next; + } +} + +static inline struct net_device **get_dev_p(struct pvc_device *pvc, + int type) +{ + if (type == ARPHRD_ETHER) + return &pvc->ether; + else + return &pvc->main; +} + +static int fr_hard_header(struct sk_buff *skb, u16 dlci) +{ + if (!skb->dev) { /* Control packets */ + switch (dlci) { + case LMI_CCITT_ANSI_DLCI: + skb_push(skb, 4); + skb->data[3] = NLPID_CCITT_ANSI_LMI; + break; + + case LMI_CISCO_DLCI: + skb_push(skb, 4); + skb->data[3] = NLPID_CISCO_LMI; + break; + + default: + return -EINVAL; + } + + } else if (skb->dev->type == ARPHRD_DLCI) { + switch (skb->protocol) { + case htons(ETH_P_IP): + skb_push(skb, 4); + skb->data[3] = NLPID_IP; + break; + + case htons(ETH_P_IPV6): + skb_push(skb, 4); + skb->data[3] = NLPID_IPV6; + break; + + default: + skb_push(skb, 10); + skb->data[3] = FR_PAD; + skb->data[4] = NLPID_SNAP; + /* OUI 00-00-00 indicates an Ethertype follows */ + skb->data[5] = 0x00; + skb->data[6] = 0x00; + skb->data[7] = 0x00; + /* This should be an Ethertype: */ + *(__be16 *)(skb->data + 8) = skb->protocol; + } + + } else if (skb->dev->type == ARPHRD_ETHER) { + skb_push(skb, 10); + skb->data[3] = FR_PAD; + skb->data[4] = NLPID_SNAP; + /* OUI 00-80-C2 stands for the 802.1 organization */ + skb->data[5] = 0x00; + skb->data[6] = 0x80; + skb->data[7] = 0xC2; + /* PID 00-07 stands for Ethernet frames without FCS */ + skb->data[8] = 0x00; + skb->data[9] = 0x07; + + } else { + return -EINVAL; + } + + dlci_to_q922(skb->data, dlci); + skb->data[2] = FR_UI; + return 0; +} + +static int pvc_open(struct net_device *dev) +{ + struct pvc_device *pvc = dev->ml_priv; + + if ((pvc->frad->flags & IFF_UP) == 0) + return -EIO; /* Frad must be UP in order to activate PVC */ + + if (pvc->open_count++ == 0) { + hdlc_device *hdlc = dev_to_hdlc(pvc->frad); + + if (state(hdlc)->settings.lmi == LMI_NONE) + pvc->state.active = netif_carrier_ok(pvc->frad); + + pvc_carrier(pvc->state.active, pvc); + state(hdlc)->dce_changed = 1; + } + return 0; +} + +static int pvc_close(struct net_device *dev) +{ + struct pvc_device *pvc = dev->ml_priv; + + if (--pvc->open_count == 0) { + hdlc_device *hdlc = dev_to_hdlc(pvc->frad); + + if (state(hdlc)->settings.lmi == LMI_NONE) + pvc->state.active = 0; + + if (state(hdlc)->settings.dce) { + state(hdlc)->dce_changed = 1; + pvc->state.active = 0; + } + } + return 0; +} + +static int pvc_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + struct pvc_device *pvc = dev->ml_priv; + fr_proto_pvc_info info; + + if (ifs->type == IF_GET_PROTO) { + if (dev->type == ARPHRD_ETHER) + ifs->type = IF_PROTO_FR_ETH_PVC; + else + ifs->type = IF_PROTO_FR_PVC; + + if (ifs->size < sizeof(info)) { + /* data size wanted */ + ifs->size = sizeof(info); + return -ENOBUFS; + } + + info.dlci = pvc->dlci; + memcpy(info.master, pvc->frad->name, IFNAMSIZ); + if (copy_to_user(ifs->ifs_ifsu.fr_pvc_info, + &info, sizeof(info))) + return -EFAULT; + return 0; + } + + return -EINVAL; +} + +static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct pvc_device *pvc = dev->ml_priv; + + if (!pvc->state.active) + goto drop; + + if (dev->type == ARPHRD_ETHER) { + int pad = ETH_ZLEN - skb->len; + + if (pad > 0) { /* Pad the frame with zeros */ + if (__skb_pad(skb, pad, false)) + goto drop; + skb_put(skb, pad); + } + } + + /* We already requested the header space with dev->needed_headroom. + * So this is just a protection in case the upper layer didn't take + * dev->needed_headroom into consideration. + */ + if (skb_headroom(skb) < 10) { + struct sk_buff *skb2 = skb_realloc_headroom(skb, 10); + + if (!skb2) + goto drop; + dev_kfree_skb(skb); + skb = skb2; + } + + skb->dev = dev; + if (fr_hard_header(skb, pvc->dlci)) + goto drop; + + dev->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + if (pvc->state.fecn) /* TX Congestion counter */ + dev->stats.tx_compressed++; + skb->dev = pvc->frad; + skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); + dev_queue_xmit(skb); + return NETDEV_TX_OK; + +drop: + dev->stats.tx_dropped++; + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static inline void fr_log_dlci_active(struct pvc_device *pvc) +{ + netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n", + pvc->dlci, + pvc->main ? pvc->main->name : "", + pvc->main && pvc->ether ? " " : "", + pvc->ether ? pvc->ether->name : "", + pvc->state.new ? " new" : "", + !pvc->state.exist ? "deleted" : + pvc->state.active ? "active" : "inactive"); +} + +static inline u8 fr_lmi_nextseq(u8 x) +{ + x++; + return x ? x : 1; +} + +static void fr_lmi_send(struct net_device *dev, int fullrep) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct sk_buff *skb; + struct pvc_device *pvc = state(hdlc)->first_pvc; + int lmi = state(hdlc)->settings.lmi; + int dce = state(hdlc)->settings.dce; + int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH; + int stat_len = (lmi == LMI_CISCO) ? 6 : 3; + u8 *data; + int i = 0; + + if (dce && fullrep) { + len += state(hdlc)->dce_pvc_count * (2 + stat_len); + if (len > HDLC_MAX_MRU) { + netdev_warn(dev, "Too many PVCs while sending LMI full report\n"); + return; + } + } + + skb = dev_alloc_skb(len); + if (!skb) + return; + + memset(skb->data, 0, len); + skb_reserve(skb, 4); + if (lmi == LMI_CISCO) + fr_hard_header(skb, LMI_CISCO_DLCI); + else + fr_hard_header(skb, LMI_CCITT_ANSI_DLCI); + + data = skb_tail_pointer(skb); + data[i++] = LMI_CALLREF; + data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; + if (lmi == LMI_ANSI) + data[i++] = LMI_ANSI_LOCKSHIFT; + data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : + LMI_ANSI_CISCO_REPTYPE; + data[i++] = LMI_REPT_LEN; + data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY; + data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE; + data[i++] = LMI_INTEG_LEN; + data[i++] = state(hdlc)->txseq = + fr_lmi_nextseq(state(hdlc)->txseq); + data[i++] = state(hdlc)->rxseq; + + if (dce && fullrep) { + while (pvc) { + data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : + LMI_ANSI_CISCO_PVCSTAT; + data[i++] = stat_len; + + /* LMI start/restart */ + if (state(hdlc)->reliable && !pvc->state.exist) { + pvc->state.exist = pvc->state.new = 1; + fr_log_dlci_active(pvc); + } + + /* ifconfig PVC up */ + if (pvc->open_count && !pvc->state.active && + pvc->state.exist && !pvc->state.new) { + pvc_carrier(1, pvc); + pvc->state.active = 1; + fr_log_dlci_active(pvc); + } + + if (lmi == LMI_CISCO) { + data[i] = pvc->dlci >> 8; + data[i + 1] = pvc->dlci & 0xFF; + } else { + data[i] = (pvc->dlci >> 4) & 0x3F; + data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80; + data[i + 2] = 0x80; + } + + if (pvc->state.new) + data[i + 2] |= 0x08; + else if (pvc->state.active) + data[i + 2] |= 0x02; + + i += stat_len; + pvc = pvc->next; + } + } + + skb_put(skb, i); + skb->priority = TC_PRIO_CONTROL; + skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); + + dev_queue_xmit(skb); +} + +static void fr_set_link_state(int reliable, struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct pvc_device *pvc = state(hdlc)->first_pvc; + + state(hdlc)->reliable = reliable; + if (reliable) { + netif_dormant_off(dev); + state(hdlc)->n391cnt = 0; /* Request full status */ + state(hdlc)->dce_changed = 1; + + if (state(hdlc)->settings.lmi == LMI_NONE) { + while (pvc) { /* Activate all PVCs */ + pvc_carrier(1, pvc); + pvc->state.exist = pvc->state.active = 1; + pvc->state.new = 0; + pvc = pvc->next; + } + } + } else { + netif_dormant_on(dev); + while (pvc) { /* Deactivate all PVCs */ + pvc_carrier(0, pvc); + pvc->state.exist = pvc->state.active = 0; + pvc->state.new = 0; + if (!state(hdlc)->settings.dce) + pvc->state.bandwidth = 0; + pvc = pvc->next; + } + } +} + +static void fr_timer(struct timer_list *t) +{ + struct frad_state *st = from_timer(st, t, timer); + struct net_device *dev = st->dev; + hdlc_device *hdlc = dev_to_hdlc(dev); + int i, cnt = 0, reliable; + u32 list; + + if (state(hdlc)->settings.dce) { + reliable = state(hdlc)->request && + time_before(jiffies, state(hdlc)->last_poll + + state(hdlc)->settings.t392 * HZ); + state(hdlc)->request = 0; + } else { + state(hdlc)->last_errors <<= 1; /* Shift the list */ + if (state(hdlc)->request) { + if (state(hdlc)->reliable) + netdev_info(dev, "No LMI status reply received\n"); + state(hdlc)->last_errors |= 1; + } + + list = state(hdlc)->last_errors; + for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1) + cnt += (list & 1); /* errors count */ + + reliable = (cnt < state(hdlc)->settings.n392); + } + + if (state(hdlc)->reliable != reliable) { + netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un"); + fr_set_link_state(reliable, dev); + } + + if (state(hdlc)->settings.dce) { + state(hdlc)->timer.expires = jiffies + + state(hdlc)->settings.t392 * HZ; + } else { + if (state(hdlc)->n391cnt) + state(hdlc)->n391cnt--; + + fr_lmi_send(dev, state(hdlc)->n391cnt == 0); + + state(hdlc)->last_poll = jiffies; + state(hdlc)->request = 1; + state(hdlc)->timer.expires = jiffies + + state(hdlc)->settings.t391 * HZ; + } + + add_timer(&state(hdlc)->timer); +} + +static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct pvc_device *pvc; + u8 rxseq, txseq; + int lmi = state(hdlc)->settings.lmi; + int dce = state(hdlc)->settings.dce; + int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i; + + if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH : + LMI_CCITT_CISCO_LENGTH)) { + netdev_info(dev, "Short LMI frame\n"); + return 1; + } + + if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI : + NLPID_CCITT_ANSI_LMI)) { + netdev_info(dev, "Received non-LMI frame with LMI DLCI\n"); + return 1; + } + + if (skb->data[4] != LMI_CALLREF) { + netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n", + skb->data[4]); + return 1; + } + + if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) { + netdev_info(dev, "Invalid LMI Message type (0x%02X)\n", + skb->data[5]); + return 1; + } + + if (lmi == LMI_ANSI) { + if (skb->data[6] != LMI_ANSI_LOCKSHIFT) { + netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n", + skb->data[6]); + return 1; + } + i = 7; + } else { + i = 6; + } + + if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : + LMI_ANSI_CISCO_REPTYPE)) { + netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n", + skb->data[i]); + return 1; + } + + if (skb->data[++i] != LMI_REPT_LEN) { + netdev_info(dev, "Invalid LMI Report type IE length (%u)\n", + skb->data[i]); + return 1; + } + + reptype = skb->data[++i]; + if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) { + netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n", + reptype); + return 1; + } + + if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE : + LMI_ANSI_CISCO_ALIVE)) { + netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n", + skb->data[i]); + return 1; + } + + if (skb->data[++i] != LMI_INTEG_LEN) { + netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n", + skb->data[i]); + return 1; + } + i++; + + state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */ + rxseq = skb->data[i++]; /* Should confirm our sequence */ + + txseq = state(hdlc)->txseq; + + if (dce) + state(hdlc)->last_poll = jiffies; + + error = 0; + if (!state(hdlc)->reliable) + error = 1; + + if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */ + state(hdlc)->n391cnt = 0; + error = 1; + } + + if (dce) { + if (state(hdlc)->fullrep_sent && !error) { +/* Stop sending full report - the last one has been confirmed by DTE */ + state(hdlc)->fullrep_sent = 0; + pvc = state(hdlc)->first_pvc; + while (pvc) { + if (pvc->state.new) { + pvc->state.new = 0; + +/* Tell DTE that new PVC is now active */ + state(hdlc)->dce_changed = 1; + } + pvc = pvc->next; + } + } + + if (state(hdlc)->dce_changed) { + reptype = LMI_FULLREP; + state(hdlc)->fullrep_sent = 1; + state(hdlc)->dce_changed = 0; + } + + state(hdlc)->request = 1; /* got request */ + fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0); + return 0; + } + + /* DTE */ + + state(hdlc)->request = 0; /* got response, no request pending */ + + if (error) + return 0; + + if (reptype != LMI_FULLREP) + return 0; + + pvc = state(hdlc)->first_pvc; + + while (pvc) { + pvc->state.deleted = 1; + pvc = pvc->next; + } + + no_ram = 0; + while (skb->len >= i + 2 + stat_len) { + u16 dlci; + u32 bw; + unsigned int active, new; + + if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : + LMI_ANSI_CISCO_PVCSTAT)) { + netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n", + skb->data[i]); + return 1; + } + + if (skb->data[++i] != stat_len) { + netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n", + skb->data[i]); + return 1; + } + i++; + + new = !!(skb->data[i + 2] & 0x08); + active = !!(skb->data[i + 2] & 0x02); + if (lmi == LMI_CISCO) { + dlci = (skb->data[i] << 8) | skb->data[i + 1]; + bw = (skb->data[i + 3] << 16) | + (skb->data[i + 4] << 8) | + (skb->data[i + 5]); + } else { + dlci = ((skb->data[i] & 0x3F) << 4) | + ((skb->data[i + 1] & 0x78) >> 3); + bw = 0; + } + + pvc = add_pvc(dev, dlci); + + if (!pvc && !no_ram) { + netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n"); + no_ram = 1; + } + + if (pvc) { + pvc->state.exist = 1; + pvc->state.deleted = 0; + if (active != pvc->state.active || + new != pvc->state.new || + bw != pvc->state.bandwidth || + !pvc->state.exist) { + pvc->state.new = new; + pvc->state.active = active; + pvc->state.bandwidth = bw; + pvc_carrier(active, pvc); + fr_log_dlci_active(pvc); + } + } + + i += stat_len; + } + + pvc = state(hdlc)->first_pvc; + + while (pvc) { + if (pvc->state.deleted && pvc->state.exist) { + pvc_carrier(0, pvc); + pvc->state.active = pvc->state.new = 0; + pvc->state.exist = 0; + pvc->state.bandwidth = 0; + fr_log_dlci_active(pvc); + } + pvc = pvc->next; + } + + /* Next full report after N391 polls */ + state(hdlc)->n391cnt = state(hdlc)->settings.n391; + + return 0; +} + +static int fr_snap_parse(struct sk_buff *skb, struct pvc_device *pvc) +{ + /* OUI 00-00-00 indicates an Ethertype follows */ + if (skb->data[0] == 0x00 && + skb->data[1] == 0x00 && + skb->data[2] == 0x00) { + if (!pvc->main) + return -1; + skb->dev = pvc->main; + skb->protocol = *(__be16 *)(skb->data + 3); /* Ethertype */ + skb_pull(skb, 5); + skb_reset_mac_header(skb); + return 0; + + /* OUI 00-80-C2 stands for the 802.1 organization */ + } else if (skb->data[0] == 0x00 && + skb->data[1] == 0x80 && + skb->data[2] == 0xC2) { + /* PID 00-07 stands for Ethernet frames without FCS */ + if (skb->data[3] == 0x00 && + skb->data[4] == 0x07) { + if (!pvc->ether) + return -1; + skb_pull(skb, 5); + if (skb->len < ETH_HLEN) + return -1; + skb->protocol = eth_type_trans(skb, pvc->ether); + return 0; + + /* PID unsupported */ + } else { + return -1; + } + + /* OUI unsupported */ + } else { + return -1; + } +} + +static int fr_rx(struct sk_buff *skb) +{ + struct net_device *frad = skb->dev; + hdlc_device *hdlc = dev_to_hdlc(frad); + struct fr_hdr *fh = (struct fr_hdr *)skb->data; + u8 *data = skb->data; + u16 dlci; + struct pvc_device *pvc; + struct net_device *dev; + + if (skb->len < 4 || fh->ea1 || !fh->ea2 || data[2] != FR_UI) + goto rx_error; + + dlci = q922_to_dlci(skb->data); + + if ((dlci == LMI_CCITT_ANSI_DLCI && + (state(hdlc)->settings.lmi == LMI_ANSI || + state(hdlc)->settings.lmi == LMI_CCITT)) || + (dlci == LMI_CISCO_DLCI && + state(hdlc)->settings.lmi == LMI_CISCO)) { + if (fr_lmi_recv(frad, skb)) + goto rx_error; + dev_kfree_skb_any(skb); + return NET_RX_SUCCESS; + } + + pvc = find_pvc(hdlc, dlci); + if (!pvc) { +#ifdef DEBUG_PKT + netdev_info(frad, "No PVC for received frame's DLCI %d\n", + dlci); +#endif + goto rx_drop; + } + + if (pvc->state.fecn != fh->fecn) { +#ifdef DEBUG_ECN + printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name, + dlci, fh->fecn ? "N" : "FF"); +#endif + pvc->state.fecn ^= 1; + } + + if (pvc->state.becn != fh->becn) { +#ifdef DEBUG_ECN + printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name, + dlci, fh->becn ? "N" : "FF"); +#endif + pvc->state.becn ^= 1; + } + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) { + frad->stats.rx_dropped++; + return NET_RX_DROP; + } + + if (data[3] == NLPID_IP) { + if (!pvc->main) + goto rx_drop; + skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ + skb->dev = pvc->main; + skb->protocol = htons(ETH_P_IP); + skb_reset_mac_header(skb); + + } else if (data[3] == NLPID_IPV6) { + if (!pvc->main) + goto rx_drop; + skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ + skb->dev = pvc->main; + skb->protocol = htons(ETH_P_IPV6); + skb_reset_mac_header(skb); + + } else if (data[3] == FR_PAD) { + if (skb->len < 5) + goto rx_error; + if (data[4] == NLPID_SNAP) { /* A SNAP header follows */ + skb_pull(skb, 5); + if (skb->len < 5) /* Incomplete SNAP header */ + goto rx_error; + if (fr_snap_parse(skb, pvc)) + goto rx_drop; + } else { + goto rx_drop; + } + + } else { + netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n", + data[3], skb->len); + goto rx_drop; + } + + dev = skb->dev; + dev->stats.rx_packets++; /* PVC traffic */ + dev->stats.rx_bytes += skb->len; + if (pvc->state.becn) + dev->stats.rx_compressed++; + netif_rx(skb); + return NET_RX_SUCCESS; + +rx_error: + frad->stats.rx_errors++; /* Mark error */ +rx_drop: + dev_kfree_skb_any(skb); + return NET_RX_DROP; +} + +static void fr_start(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); +#ifdef DEBUG_LINK + printk(KERN_DEBUG "fr_start\n"); +#endif + if (state(hdlc)->settings.lmi != LMI_NONE) { + state(hdlc)->reliable = 0; + state(hdlc)->dce_changed = 1; + state(hdlc)->request = 0; + state(hdlc)->fullrep_sent = 0; + state(hdlc)->last_errors = 0xFFFFFFFF; + state(hdlc)->n391cnt = 0; + state(hdlc)->txseq = state(hdlc)->rxseq = 0; + + state(hdlc)->dev = dev; + timer_setup(&state(hdlc)->timer, fr_timer, 0); + /* First poll after 1 s */ + state(hdlc)->timer.expires = jiffies + HZ; + add_timer(&state(hdlc)->timer); + } else { + fr_set_link_state(1, dev); + } +} + +static void fr_stop(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); +#ifdef DEBUG_LINK + printk(KERN_DEBUG "fr_stop\n"); +#endif + if (state(hdlc)->settings.lmi != LMI_NONE) + del_timer_sync(&state(hdlc)->timer); + fr_set_link_state(0, dev); +} + +static void fr_close(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct pvc_device *pvc = state(hdlc)->first_pvc; + + while (pvc) { /* Shutdown all PVCs for this FRAD */ + if (pvc->main) + dev_close(pvc->main); + if (pvc->ether) + dev_close(pvc->ether); + pvc = pvc->next; + } +} + +static void pvc_setup(struct net_device *dev) +{ + dev->type = ARPHRD_DLCI; + dev->flags = IFF_POINTOPOINT; + dev->hard_header_len = 0; + dev->addr_len = 2; + netif_keep_dst(dev); +} + +static const struct net_device_ops pvc_ops = { + .ndo_open = pvc_open, + .ndo_stop = pvc_close, + .ndo_start_xmit = pvc_xmit, + .ndo_siocwandev = pvc_ioctl, +}; + +static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) +{ + hdlc_device *hdlc = dev_to_hdlc(frad); + struct pvc_device *pvc; + struct net_device *dev; + int used; + + pvc = add_pvc(frad, dlci); + if (!pvc) { + netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n"); + return -ENOBUFS; + } + + if (*get_dev_p(pvc, type)) + return -EEXIST; + + used = pvc_is_used(pvc); + + if (type == ARPHRD_ETHER) + dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, + ether_setup); + else + dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); + + if (!dev) { + netdev_warn(frad, "Memory squeeze on fr_pvc()\n"); + delete_unused_pvcs(hdlc); + return -ENOBUFS; + } + + if (type == ARPHRD_ETHER) { + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + eth_hw_addr_random(dev); + } else { + __be16 addr = htons(dlci); + + dev_addr_set(dev, (u8 *)&addr); + dlci_to_q922(dev->broadcast, dlci); + } + dev->netdev_ops = &pvc_ops; + dev->mtu = HDLC_MAX_MTU; + dev->min_mtu = 68; + dev->max_mtu = HDLC_MAX_MTU; + dev->needed_headroom = 10; + dev->priv_flags |= IFF_NO_QUEUE; + dev->ml_priv = pvc; + + if (register_netdevice(dev) != 0) { + free_netdev(dev); + delete_unused_pvcs(hdlc); + return -EIO; + } + + dev->needs_free_netdev = true; + *get_dev_p(pvc, type) = dev; + if (!used) { + state(hdlc)->dce_changed = 1; + state(hdlc)->dce_pvc_count++; + } + return 0; +} + +static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) +{ + struct pvc_device *pvc; + struct net_device *dev; + + pvc = find_pvc(hdlc, dlci); + if (!pvc) + return -ENOENT; + + dev = *get_dev_p(pvc, type); + if (!dev) + return -ENOENT; + + if (dev->flags & IFF_UP) + return -EBUSY; /* PVC in use */ + + unregister_netdevice(dev); /* the destructor will free_netdev(dev) */ + *get_dev_p(pvc, type) = NULL; + + if (!pvc_is_used(pvc)) { + state(hdlc)->dce_pvc_count--; + state(hdlc)->dce_changed = 1; + } + delete_unused_pvcs(hdlc); + return 0; +} + +static void fr_destroy(struct net_device *frad) +{ + hdlc_device *hdlc = dev_to_hdlc(frad); + struct pvc_device *pvc = state(hdlc)->first_pvc; + + state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */ + state(hdlc)->dce_pvc_count = 0; + state(hdlc)->dce_changed = 1; + + while (pvc) { + struct pvc_device *next = pvc->next; + /* destructors will free_netdev() main and ether */ + if (pvc->main) + unregister_netdevice(pvc->main); + + if (pvc->ether) + unregister_netdevice(pvc->ether); + + kfree(pvc); + pvc = next; + } +} + +static struct hdlc_proto proto = { + .close = fr_close, + .start = fr_start, + .stop = fr_stop, + .detach = fr_destroy, + .ioctl = fr_ioctl, + .netif_rx = fr_rx, + .module = THIS_MODULE, +}; + +static int fr_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + fr_proto __user *fr_s = ifs->ifs_ifsu.fr; + const size_t size = sizeof(fr_proto); + fr_proto new_settings; + hdlc_device *hdlc = dev_to_hdlc(dev); + fr_proto_pvc pvc; + int result; + + switch (ifs->type) { + case IF_GET_PROTO: + if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ + return -EINVAL; + ifs->type = IF_PROTO_FR; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(fr_s, &state(hdlc)->settings, size)) + return -EFAULT; + return 0; + + case IF_PROTO_FR: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (dev->flags & IFF_UP) + return -EBUSY; + + if (copy_from_user(&new_settings, fr_s, size)) + return -EFAULT; + + if (new_settings.lmi == LMI_DEFAULT) + new_settings.lmi = LMI_ANSI; + + if ((new_settings.lmi != LMI_NONE && + new_settings.lmi != LMI_ANSI && + new_settings.lmi != LMI_CCITT && + new_settings.lmi != LMI_CISCO) || + new_settings.t391 < 1 || + new_settings.t392 < 2 || + new_settings.n391 < 1 || + new_settings.n392 < 1 || + new_settings.n393 < new_settings.n392 || + new_settings.n393 > 32 || + (new_settings.dce != 0 && + new_settings.dce != 1)) + return -EINVAL; + + result = hdlc->attach(dev, ENCODING_NRZ, + PARITY_CRC16_PR1_CCITT); + if (result) + return result; + + if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */ + result = attach_hdlc_protocol(dev, &proto, + sizeof(struct frad_state)); + if (result) + return result; + state(hdlc)->first_pvc = NULL; + state(hdlc)->dce_pvc_count = 0; + } + memcpy(&state(hdlc)->settings, &new_settings, size); + dev->type = ARPHRD_FRAD; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + return 0; + + case IF_PROTO_FR_ADD_PVC: + case IF_PROTO_FR_DEL_PVC: + case IF_PROTO_FR_ADD_ETH_PVC: + case IF_PROTO_FR_DEL_ETH_PVC: + if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ + return -EINVAL; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&pvc, ifs->ifs_ifsu.fr_pvc, + sizeof(fr_proto_pvc))) + return -EFAULT; + + if (pvc.dlci <= 0 || pvc.dlci >= 1024) + return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ + + if (ifs->type == IF_PROTO_FR_ADD_ETH_PVC || + ifs->type == IF_PROTO_FR_DEL_ETH_PVC) + result = ARPHRD_ETHER; /* bridged Ethernet device */ + else + result = ARPHRD_DLCI; + + if (ifs->type == IF_PROTO_FR_ADD_PVC || + ifs->type == IF_PROTO_FR_ADD_ETH_PVC) + return fr_add_pvc(dev, pvc.dlci, result); + else + return fr_del_pvc(hdlc, pvc.dlci, result); + } + + return -EINVAL; +} + +static int __init hdlc_fr_init(void) +{ + register_hdlc_protocol(&proto); + return 0; +} + +static void __exit hdlc_fr_exit(void) +{ + unregister_hdlc_protocol(&proto); +} + +module_init(hdlc_fr_init); +module_exit(hdlc_fr_exit); + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c new file mode 100644 index 000000000..37a3c989c --- /dev/null +++ b/drivers/net/wan/hdlc_ppp.c @@ -0,0 +1,725 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic HDLC support routines for Linux + * Point-to-point protocol support + * + * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl> + */ + +#include <linux/errno.h> +#include <linux/hdlc.h> +#include <linux/if_arp.h> +#include <linux/inetdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pkt_sched.h> +#include <linux/poll.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#define DEBUG_CP 0 /* also bytes# to dump */ +#define DEBUG_STATE 0 +#define DEBUG_HARD_HEADER 0 + +#define HDLC_ADDR_ALLSTATIONS 0xFF +#define HDLC_CTRL_UI 0x03 + +#define PID_LCP 0xC021 +#define PID_IP 0x0021 +#define PID_IPCP 0x8021 +#define PID_IPV6 0x0057 +#define PID_IPV6CP 0x8057 + +enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT}; +enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ, + CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY, + LCP_DISC_REQ, CP_CODES}; +#if DEBUG_CP +static const char *const code_names[CP_CODES] = { + "0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq", + "TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard" +}; + +static char debug_buffer[64 + 3 * DEBUG_CP]; +#endif + +enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5}; + +struct hdlc_header { + u8 address; + u8 control; + __be16 protocol; +}; + +struct cp_header { + u8 code; + u8 id; + __be16 len; +}; + +struct proto { + struct net_device *dev; + struct timer_list timer; + unsigned long timeout; + u16 pid; /* protocol ID */ + u8 state; + u8 cr_id; /* ID of last Configuration-Request */ + u8 restart_counter; +}; + +struct ppp { + struct proto protos[IDX_COUNT]; + spinlock_t lock; + unsigned long last_pong; + unsigned int req_timeout, cr_retries, term_retries; + unsigned int keepalive_interval, keepalive_timeout; + u8 seq; /* local sequence number for requests */ + u8 echo_id; /* ID of last Echo-Request (LCP) */ +}; + +enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED, + STATES, STATE_MASK = 0xF}; +enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA, + RUC, RXJ_GOOD, RXJ_BAD, EVENTS}; +enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100, + SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000}; + +#if DEBUG_STATE +static const char *const state_names[STATES] = { + "Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent", + "Opened" +}; + +static const char *const event_names[EVENTS] = { + "Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN", + "RTR", "RTA", "RUC", "RXJ+", "RXJ-" +}; +#endif + +static struct sk_buff_head tx_queue; /* used when holding the spin lock */ + +static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs); + +static inline struct ppp *get_ppp(struct net_device *dev) +{ + return (struct ppp *)dev_to_hdlc(dev)->state; +} + +static inline struct proto *get_proto(struct net_device *dev, u16 pid) +{ + struct ppp *ppp = get_ppp(dev); + + switch (pid) { + case PID_LCP: + return &ppp->protos[IDX_LCP]; + case PID_IPCP: + return &ppp->protos[IDX_IPCP]; + case PID_IPV6CP: + return &ppp->protos[IDX_IPV6CP]; + default: + return NULL; + } +} + +static inline const char *proto_name(u16 pid) +{ + switch (pid) { + case PID_LCP: + return "LCP"; + case PID_IPCP: + return "IPCP"; + case PID_IPV6CP: + return "IPV6CP"; + default: + return NULL; + } +} + +static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev) +{ + struct hdlc_header *data = (struct hdlc_header *)skb->data; + + if (skb->len < sizeof(struct hdlc_header)) + return htons(ETH_P_HDLC); + if (data->address != HDLC_ADDR_ALLSTATIONS || + data->control != HDLC_CTRL_UI) + return htons(ETH_P_HDLC); + + switch (data->protocol) { + case cpu_to_be16(PID_IP): + skb_pull(skb, sizeof(struct hdlc_header)); + return htons(ETH_P_IP); + + case cpu_to_be16(PID_IPV6): + skb_pull(skb, sizeof(struct hdlc_header)); + return htons(ETH_P_IPV6); + + default: + return htons(ETH_P_HDLC); + } +} + +static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev, + u16 type, const void *daddr, const void *saddr, + unsigned int len) +{ + struct hdlc_header *data; +#if DEBUG_HARD_HEADER + printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name); +#endif + + skb_push(skb, sizeof(struct hdlc_header)); + data = (struct hdlc_header *)skb->data; + + data->address = HDLC_ADDR_ALLSTATIONS; + data->control = HDLC_CTRL_UI; + switch (type) { + case ETH_P_IP: + data->protocol = htons(PID_IP); + break; + case ETH_P_IPV6: + data->protocol = htons(PID_IPV6); + break; + case PID_LCP: + case PID_IPCP: + case PID_IPV6CP: + data->protocol = htons(type); + break; + default: /* unknown protocol */ + data->protocol = 0; + } + return sizeof(struct hdlc_header); +} + +static void ppp_tx_flush(void) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&tx_queue)) != NULL) + dev_queue_xmit(skb); +} + +static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code, + u8 id, unsigned int len, const void *data) +{ + struct sk_buff *skb; + struct cp_header *cp; + unsigned int magic_len = 0; + static u32 magic; + +#if DEBUG_CP + int i; + char *ptr; +#endif + + if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY)) + magic_len = sizeof(magic); + + skb = dev_alloc_skb(sizeof(struct hdlc_header) + + sizeof(struct cp_header) + magic_len + len); + if (!skb) + return; + + skb_reserve(skb, sizeof(struct hdlc_header)); + + cp = skb_put(skb, sizeof(struct cp_header)); + cp->code = code; + cp->id = id; + cp->len = htons(sizeof(struct cp_header) + magic_len + len); + + if (magic_len) + skb_put_data(skb, &magic, magic_len); + if (len) + skb_put_data(skb, data, len); + +#if DEBUG_CP + BUG_ON(code >= CP_CODES); + ptr = debug_buffer; + *ptr = '\x0'; + for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) { + sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]); + ptr += strlen(ptr); + } + printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name, + proto_name(pid), code_names[code], id, debug_buffer); +#endif + + ppp_hard_header(skb, dev, pid, NULL, NULL, 0); + + skb->priority = TC_PRIO_CONTROL; + skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); + skb_queue_tail(&tx_queue, skb); +} + +/* State transition table (compare STD-51) + Events Actions + TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count + TO- = Timeout with counter expired zrc = Zero-Restart-Count + + RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request + RCR- = Receive-Configure-Request (Bad) + RCA = Receive-Configure-Ack sca = Send-Configure-Ack + RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej + + RTR = Receive-Terminate-Request str = Send-Terminate-Request + RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack + + RUC = Receive-Unknown-Code scj = Send-Code-Reject + RXJ+ = Receive-Code-Reject (permitted) + or Receive-Protocol-Reject + RXJ- = Receive-Code-Reject (catastrophic) + or Receive-Protocol-Reject +*/ +static int cp_table[EVENTS][STATES] = { + /* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED + 0 1 2 3 4 5 6 */ + {IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */ + { INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */ + { INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */ + { INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */ + { STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */ + { STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */ + { STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */ + { STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */ + { STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */ + { 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */ + { SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */ + { 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */ + { 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */ +}; + +/* SCA: RCR+ must supply id, len and data + SCN: RCR- must supply code, id, len and data + STA: RTR must supply id + SCJ: RUC must supply CP packet len and data */ +static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code, + u8 id, unsigned int len, const void *data) +{ + int old_state, action; + struct ppp *ppp = get_ppp(dev); + struct proto *proto = get_proto(dev, pid); + + old_state = proto->state; + BUG_ON(old_state >= STATES); + BUG_ON(event >= EVENTS); + +#if DEBUG_STATE + printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name, + proto_name(pid), event_names[event], state_names[proto->state]); +#endif + + action = cp_table[event][old_state]; + + proto->state = action & STATE_MASK; + if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */ + mod_timer(&proto->timer, proto->timeout = + jiffies + ppp->req_timeout * HZ); + if (action & ZRC) + proto->restart_counter = 0; + if (action & IRC) + proto->restart_counter = (proto->state == STOPPING) ? + ppp->term_retries : ppp->cr_retries; + + if (action & SCR) /* send Configure-Request */ + ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq, + 0, NULL); + if (action & SCA) /* send Configure-Ack */ + ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data); + if (action & SCN) /* send Configure-Nak/Reject */ + ppp_tx_cp(dev, pid, code, id, len, data); + if (action & STR) /* send Terminate-Request */ + ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL); + if (action & STA) /* send Terminate-Ack */ + ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL); + if (action & SCJ) /* send Code-Reject */ + ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data); + + if (old_state != OPENED && proto->state == OPENED) { + netdev_info(dev, "%s up\n", proto_name(pid)); + if (pid == PID_LCP) { + netif_dormant_off(dev); + ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL); + ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL); + ppp->last_pong = jiffies; + mod_timer(&proto->timer, proto->timeout = + jiffies + ppp->keepalive_interval * HZ); + } + } + if (old_state == OPENED && proto->state != OPENED) { + netdev_info(dev, "%s down\n", proto_name(pid)); + if (pid == PID_LCP) { + netif_dormant_on(dev); + ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL); + ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL); + } + } + if (old_state != CLOSED && proto->state == CLOSED) + del_timer(&proto->timer); + +#if DEBUG_STATE + printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name, + proto_name(pid), event_names[event], state_names[proto->state]); +#endif +} + +static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, + unsigned int req_len, const u8 *data) +{ + static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 }; + const u8 *opt; + u8 *out; + unsigned int len = req_len, nak_len = 0, rej_len = 0; + + out = kmalloc(len, GFP_ATOMIC); + if (!out) { + dev->stats.rx_dropped++; + return; /* out of memory, ignore CR packet */ + } + + for (opt = data; len; len -= opt[1], opt += opt[1]) { + if (len < 2 || opt[1] < 2 || len < opt[1]) + goto err_out; + + if (pid == PID_LCP) + switch (opt[0]) { + case LCP_OPTION_MRU: + continue; /* MRU always OK and > 1500 bytes? */ + + case LCP_OPTION_ACCM: /* async control character map */ + if (opt[1] < sizeof(valid_accm)) + goto err_out; + if (!memcmp(opt, valid_accm, + sizeof(valid_accm))) + continue; + if (!rej_len) { /* NAK it */ + memcpy(out + nak_len, valid_accm, + sizeof(valid_accm)); + nak_len += sizeof(valid_accm); + continue; + } + break; + case LCP_OPTION_MAGIC: + if (len < 6) + goto err_out; + if (opt[1] != 6 || (!opt[2] && !opt[3] && + !opt[4] && !opt[5])) + break; /* reject invalid magic number */ + continue; + } + /* reject this option */ + memcpy(out + rej_len, opt, opt[1]); + rej_len += opt[1]; + } + + if (rej_len) + ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out); + else if (nak_len) + ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out); + else + ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); + + kfree(out); + return; + +err_out: + dev->stats.rx_errors++; + kfree(out); +} + +static int ppp_rx(struct sk_buff *skb) +{ + struct hdlc_header *hdr = (struct hdlc_header *)skb->data; + struct net_device *dev = skb->dev; + struct ppp *ppp = get_ppp(dev); + struct proto *proto; + struct cp_header *cp; + unsigned long flags; + unsigned int len; + u16 pid; +#if DEBUG_CP + int i; + char *ptr; +#endif + + spin_lock_irqsave(&ppp->lock, flags); + /* Check HDLC header */ + if (skb->len < sizeof(struct hdlc_header)) + goto rx_error; + cp = skb_pull(skb, sizeof(struct hdlc_header)); + if (hdr->address != HDLC_ADDR_ALLSTATIONS || + hdr->control != HDLC_CTRL_UI) + goto rx_error; + + pid = ntohs(hdr->protocol); + proto = get_proto(dev, pid); + if (!proto) { + if (ppp->protos[IDX_LCP].state == OPENED) + ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ, + ++ppp->seq, skb->len + 2, &hdr->protocol); + goto rx_error; + } + + len = ntohs(cp->len); + if (len < sizeof(struct cp_header) /* no complete CP header? */ || + skb->len < len /* truncated packet? */) + goto rx_error; + skb_pull(skb, sizeof(struct cp_header)); + len -= sizeof(struct cp_header); + + /* HDLC and CP headers stripped from skb */ +#if DEBUG_CP + if (cp->code < CP_CODES) + sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code], + cp->id); + else + sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id); + ptr = debug_buffer + strlen(debug_buffer); + for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) { + sprintf(ptr, " %02X", skb->data[i]); + ptr += strlen(ptr); + } + printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid), + debug_buffer); +#endif + + /* LCP only */ + if (pid == PID_LCP) + switch (cp->code) { + case LCP_PROTO_REJ: + pid = ntohs(*(__be16 *)skb->data); + if (pid == PID_LCP || pid == PID_IPCP || + pid == PID_IPV6CP) + ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, + 0, NULL); + goto out; + + case LCP_ECHO_REQ: /* send Echo-Reply */ + if (len >= 4 && proto->state == OPENED) + ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY, + cp->id, len - 4, skb->data + 4); + goto out; + + case LCP_ECHO_REPLY: + if (cp->id == ppp->echo_id) + ppp->last_pong = jiffies; + goto out; + + case LCP_DISC_REQ: /* discard */ + goto out; + } + + /* LCP, IPCP and IPV6CP */ + switch (cp->code) { + case CP_CONF_REQ: + ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data); + break; + + case CP_CONF_ACK: + if (cp->id == proto->cr_id) + ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL); + break; + + case CP_CONF_REJ: + case CP_CONF_NAK: + if (cp->id == proto->cr_id) + ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL); + break; + + case CP_TERM_REQ: + ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL); + break; + + case CP_TERM_ACK: + ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL); + break; + + case CP_CODE_REJ: + ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL); + break; + + default: + len += sizeof(struct cp_header); + if (len > dev->mtu) + len = dev->mtu; + ppp_cp_event(dev, pid, RUC, 0, 0, len, cp); + break; + } + goto out; + +rx_error: + dev->stats.rx_errors++; +out: + spin_unlock_irqrestore(&ppp->lock, flags); + dev_kfree_skb_any(skb); + ppp_tx_flush(); + return NET_RX_DROP; +} + +static void ppp_timer(struct timer_list *t) +{ + struct proto *proto = from_timer(proto, t, timer); + struct ppp *ppp = get_ppp(proto->dev); + unsigned long flags; + + spin_lock_irqsave(&ppp->lock, flags); + /* mod_timer could be called after we entered this function but + * before we got the lock. + */ + if (timer_pending(&proto->timer)) { + spin_unlock_irqrestore(&ppp->lock, flags); + return; + } + switch (proto->state) { + case STOPPING: + case REQ_SENT: + case ACK_RECV: + case ACK_SENT: + if (proto->restart_counter) { + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, + 0, NULL); + proto->restart_counter--; + } else if (netif_carrier_ok(proto->dev)) + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, + 0, NULL); + else + ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, + 0, NULL); + break; + + case OPENED: + if (proto->pid != PID_LCP) + break; + if (time_after(jiffies, ppp->last_pong + + ppp->keepalive_timeout * HZ)) { + netdev_info(proto->dev, "Link down\n"); + ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL); + ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL); + } else { /* send keep-alive packet */ + ppp->echo_id = ++ppp->seq; + ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ, + ppp->echo_id, 0, NULL); + proto->timer.expires = jiffies + + ppp->keepalive_interval * HZ; + add_timer(&proto->timer); + } + break; + } + spin_unlock_irqrestore(&ppp->lock, flags); + ppp_tx_flush(); +} + +static void ppp_start(struct net_device *dev) +{ + struct ppp *ppp = get_ppp(dev); + int i; + + for (i = 0; i < IDX_COUNT; i++) { + struct proto *proto = &ppp->protos[i]; + + proto->dev = dev; + timer_setup(&proto->timer, ppp_timer, 0); + proto->state = CLOSED; + } + ppp->protos[IDX_LCP].pid = PID_LCP; + ppp->protos[IDX_IPCP].pid = PID_IPCP; + ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP; + + ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL); +} + +static void ppp_stop(struct net_device *dev) +{ + ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); +} + +static void ppp_close(struct net_device *dev) +{ + ppp_tx_flush(); +} + +static struct hdlc_proto proto = { + .start = ppp_start, + .stop = ppp_stop, + .close = ppp_close, + .type_trans = ppp_type_trans, + .ioctl = ppp_ioctl, + .netif_rx = ppp_rx, + .module = THIS_MODULE, +}; + +static const struct header_ops ppp_header_ops = { + .create = ppp_hard_header, +}; + +static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct ppp *ppp; + int result; + + switch (ifs->type) { + case IF_GET_PROTO: + if (dev_to_hdlc(dev)->proto != &proto) + return -EINVAL; + ifs->type = IF_PROTO_PPP; + return 0; /* return protocol only, no settable parameters */ + + case IF_PROTO_PPP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (dev->flags & IFF_UP) + return -EBUSY; + + /* no settable parameters */ + + result = hdlc->attach(dev, ENCODING_NRZ, + PARITY_CRC16_PR1_CCITT); + if (result) + return result; + + result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp)); + if (result) + return result; + + ppp = get_ppp(dev); + spin_lock_init(&ppp->lock); + ppp->req_timeout = 2; + ppp->cr_retries = 10; + ppp->term_retries = 2; + ppp->keepalive_interval = 10; + ppp->keepalive_timeout = 60; + + dev->hard_header_len = sizeof(struct hdlc_header); + dev->header_ops = &ppp_header_ops; + dev->type = ARPHRD_PPP; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + netif_dormant_on(dev); + return 0; + } + + return -EINVAL; +} + +static int __init hdlc_ppp_init(void) +{ + skb_queue_head_init(&tx_queue); + register_hdlc_protocol(&proto); + return 0; +} + +static void __exit hdlc_ppp_exit(void) +{ + unregister_hdlc_protocol(&proto); +} + +module_init(hdlc_ppp_init); +module_exit(hdlc_ppp_exit); + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("PPP protocol support for generic HDLC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c new file mode 100644 index 000000000..4a2f06872 --- /dev/null +++ b/drivers/net/wan/hdlc_raw.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic HDLC support routines for Linux + * HDLC support + * + * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> + */ + +#include <linux/errno.h> +#include <linux/hdlc.h> +#include <linux/if_arp.h> +#include <linux/inetdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pkt_sched.h> +#include <linux/poll.h> +#include <linux/rtnetlink.h> +#include <linux/skbuff.h> + + +static int raw_ioctl(struct net_device *dev, struct if_settings *ifs); + +static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev) +{ + return cpu_to_be16(ETH_P_IP); +} + +static struct hdlc_proto proto = { + .type_trans = raw_type_trans, + .ioctl = raw_ioctl, + .module = THIS_MODULE, +}; + + +static int raw_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + raw_hdlc_proto __user *raw_s = ifs->ifs_ifsu.raw_hdlc; + const size_t size = sizeof(raw_hdlc_proto); + raw_hdlc_proto new_settings; + hdlc_device *hdlc = dev_to_hdlc(dev); + int result; + + switch (ifs->type) { + case IF_GET_PROTO: + if (dev_to_hdlc(dev)->proto != &proto) + return -EINVAL; + ifs->type = IF_PROTO_HDLC; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(raw_s, hdlc->state, size)) + return -EFAULT; + return 0; + + case IF_PROTO_HDLC: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (dev->flags & IFF_UP) + return -EBUSY; + + if (copy_from_user(&new_settings, raw_s, size)) + return -EFAULT; + + if (new_settings.encoding == ENCODING_DEFAULT) + new_settings.encoding = ENCODING_NRZ; + + if (new_settings.parity == PARITY_DEFAULT) + new_settings.parity = PARITY_CRC16_PR1_CCITT; + + result = hdlc->attach(dev, new_settings.encoding, + new_settings.parity); + if (result) + return result; + + result = attach_hdlc_protocol(dev, &proto, + sizeof(raw_hdlc_proto)); + if (result) + return result; + memcpy(hdlc->state, &new_settings, size); + dev->type = ARPHRD_RAWHDLC; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + netif_dormant_off(dev); + return 0; + } + + return -EINVAL; +} + + +static int __init hdlc_raw_init(void) +{ + register_hdlc_protocol(&proto); + return 0; +} + + + +static void __exit hdlc_raw_exit(void) +{ + unregister_hdlc_protocol(&proto); +} + + +module_init(hdlc_raw_init); +module_exit(hdlc_raw_exit); + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c new file mode 100644 index 000000000..0a66b7356 --- /dev/null +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic HDLC support routines for Linux + * HDLC Ethernet emulation support + * + * Copyright (C) 2002-2006 Krzysztof Halasa <khc@pm.waw.pl> + */ + +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/gfp.h> +#include <linux/hdlc.h> +#include <linux/if_arp.h> +#include <linux/inetdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pkt_sched.h> +#include <linux/poll.h> +#include <linux/rtnetlink.h> +#include <linux/skbuff.h> + +static int raw_eth_ioctl(struct net_device *dev, struct if_settings *ifs); + +static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev) +{ + int pad = ETH_ZLEN - skb->len; + if (pad > 0) { /* Pad the frame with zeros */ + int len = skb->len; + if (skb_tailroom(skb) < pad) + if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) { + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; + } + skb_put(skb, pad); + memset(skb->data + len, 0, pad); + } + return dev_to_hdlc(dev)->xmit(skb, dev); +} + + +static struct hdlc_proto proto = { + .type_trans = eth_type_trans, + .xmit = eth_tx, + .ioctl = raw_eth_ioctl, + .module = THIS_MODULE, +}; + + +static int raw_eth_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + raw_hdlc_proto __user *raw_s = ifs->ifs_ifsu.raw_hdlc; + const size_t size = sizeof(raw_hdlc_proto); + raw_hdlc_proto new_settings; + hdlc_device *hdlc = dev_to_hdlc(dev); + unsigned int old_qlen; + int result; + + switch (ifs->type) { + case IF_GET_PROTO: + if (dev_to_hdlc(dev)->proto != &proto) + return -EINVAL; + ifs->type = IF_PROTO_HDLC_ETH; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(raw_s, hdlc->state, size)) + return -EFAULT; + return 0; + + case IF_PROTO_HDLC_ETH: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (dev->flags & IFF_UP) + return -EBUSY; + + if (copy_from_user(&new_settings, raw_s, size)) + return -EFAULT; + + if (new_settings.encoding == ENCODING_DEFAULT) + new_settings.encoding = ENCODING_NRZ; + + if (new_settings.parity == PARITY_DEFAULT) + new_settings.parity = PARITY_CRC16_PR1_CCITT; + + result = hdlc->attach(dev, new_settings.encoding, + new_settings.parity); + if (result) + return result; + + result = attach_hdlc_protocol(dev, &proto, + sizeof(raw_hdlc_proto)); + if (result) + return result; + memcpy(hdlc->state, &new_settings, size); + old_qlen = dev->tx_queue_len; + ether_setup(dev); + dev->tx_queue_len = old_qlen; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + eth_hw_addr_random(dev); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + netif_dormant_off(dev); + return 0; + } + + return -EINVAL; +} + + +static int __init hdlc_eth_init(void) +{ + register_hdlc_protocol(&proto); + return 0; +} + + + +static void __exit hdlc_eth_exit(void) +{ + unregister_hdlc_protocol(&proto); +} + + +module_init(hdlc_eth_init); +module_exit(hdlc_eth_exit); + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c new file mode 100644 index 000000000..f72c92c24 --- /dev/null +++ b/drivers/net/wan/hdlc_x25.c @@ -0,0 +1,384 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic HDLC support routines for Linux + * X.25 support + * + * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> + */ + +#include <linux/errno.h> +#include <linux/gfp.h> +#include <linux/hdlc.h> +#include <linux/if_arp.h> +#include <linux/inetdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/lapb.h> +#include <linux/module.h> +#include <linux/pkt_sched.h> +#include <linux/poll.h> +#include <linux/rtnetlink.h> +#include <linux/skbuff.h> +#include <net/x25device.h> + +struct x25_state { + x25_hdlc_proto settings; + bool up; + spinlock_t up_lock; /* Protects "up" */ + struct sk_buff_head rx_queue; + struct tasklet_struct rx_tasklet; +}; + +static int x25_ioctl(struct net_device *dev, struct if_settings *ifs); + +static struct x25_state *state(hdlc_device *hdlc) +{ + return hdlc->state; +} + +static void x25_rx_queue_kick(struct tasklet_struct *t) +{ + struct x25_state *x25st = from_tasklet(x25st, t, rx_tasklet); + struct sk_buff *skb = skb_dequeue(&x25st->rx_queue); + + while (skb) { + netif_receive_skb_core(skb); + skb = skb_dequeue(&x25st->rx_queue); + } +} + +/* These functions are callbacks called by LAPB layer */ + +static void x25_connect_disconnect(struct net_device *dev, int reason, int code) +{ + struct x25_state *x25st = state(dev_to_hdlc(dev)); + struct sk_buff *skb; + unsigned char *ptr; + + skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC); + if (!skb) + return; + + ptr = skb_put(skb, 1); + *ptr = code; + + skb->protocol = x25_type_trans(skb, dev); + + skb_queue_tail(&x25st->rx_queue, skb); + tasklet_schedule(&x25st->rx_tasklet); +} + +static void x25_connected(struct net_device *dev, int reason) +{ + x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT); +} + +static void x25_disconnected(struct net_device *dev, int reason) +{ + x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT); +} + +static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) +{ + struct x25_state *x25st = state(dev_to_hdlc(dev)); + unsigned char *ptr; + + if (skb_cow(skb, 1)) { + kfree_skb(skb); + return NET_RX_DROP; + } + + skb_push(skb, 1); + + ptr = skb->data; + *ptr = X25_IFACE_DATA; + + skb->protocol = x25_type_trans(skb, dev); + + skb_queue_tail(&x25st->rx_queue, skb); + tasklet_schedule(&x25st->rx_tasklet); + return NET_RX_SUCCESS; +} + +static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + + skb_reset_network_header(skb); + skb->protocol = hdlc_type_trans(skb, dev); + + if (dev_nit_active(dev)) + dev_queue_xmit_nit(skb, dev); + + hdlc->xmit(skb, dev); /* Ignore return value :-( */ +} + +static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); + int result; + + /* There should be a pseudo header of 1 byte added by upper layers. + * Check to make sure it is there before reading it. + */ + if (skb->len < 1) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + + spin_lock_bh(&x25st->up_lock); + if (!x25st->up) { + spin_unlock_bh(&x25st->up_lock); + kfree_skb(skb); + return NETDEV_TX_OK; + } + + switch (skb->data[0]) { + case X25_IFACE_DATA: /* Data to be transmitted */ + skb_pull(skb, 1); + result = lapb_data_request(dev, skb); + if (result != LAPB_OK) + dev_kfree_skb(skb); + spin_unlock_bh(&x25st->up_lock); + return NETDEV_TX_OK; + + case X25_IFACE_CONNECT: + result = lapb_connect_request(dev); + if (result != LAPB_OK) { + if (result == LAPB_CONNECTED) + /* Send connect confirm. msg to level 3 */ + x25_connected(dev, 0); + else + netdev_err(dev, "LAPB connect request failed, error code = %i\n", + result); + } + break; + + case X25_IFACE_DISCONNECT: + result = lapb_disconnect_request(dev); + if (result != LAPB_OK) { + if (result == LAPB_NOTCONNECTED) + /* Send disconnect confirm. msg to level 3 */ + x25_disconnected(dev, 0); + else + netdev_err(dev, "LAPB disconnect request failed, error code = %i\n", + result); + } + break; + + default: /* to be defined */ + break; + } + + spin_unlock_bh(&x25st->up_lock); + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int x25_open(struct net_device *dev) +{ + static const struct lapb_register_struct cb = { + .connect_confirmation = x25_connected, + .connect_indication = x25_connected, + .disconnect_confirmation = x25_disconnected, + .disconnect_indication = x25_disconnected, + .data_indication = x25_data_indication, + .data_transmit = x25_data_transmit, + }; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); + struct lapb_parms_struct params; + int result; + + result = lapb_register(dev, &cb); + if (result != LAPB_OK) + return -ENOMEM; + + result = lapb_getparms(dev, ¶ms); + if (result != LAPB_OK) + return -EINVAL; + + if (state(hdlc)->settings.dce) + params.mode = params.mode | LAPB_DCE; + + if (state(hdlc)->settings.modulo == 128) + params.mode = params.mode | LAPB_EXTENDED; + + params.window = state(hdlc)->settings.window; + params.t1 = state(hdlc)->settings.t1; + params.t2 = state(hdlc)->settings.t2; + params.n2 = state(hdlc)->settings.n2; + + result = lapb_setparms(dev, ¶ms); + if (result != LAPB_OK) + return -EINVAL; + + spin_lock_bh(&x25st->up_lock); + x25st->up = true; + spin_unlock_bh(&x25st->up_lock); + + return 0; +} + +static void x25_close(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); + + spin_lock_bh(&x25st->up_lock); + x25st->up = false; + spin_unlock_bh(&x25st->up_lock); + + lapb_unregister(dev); + tasklet_kill(&x25st->rx_tasklet); +} + +static int x25_rx(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) { + dev->stats.rx_dropped++; + return NET_RX_DROP; + } + + spin_lock_bh(&x25st->up_lock); + if (!x25st->up) { + spin_unlock_bh(&x25st->up_lock); + kfree_skb(skb); + dev->stats.rx_dropped++; + return NET_RX_DROP; + } + + if (lapb_data_received(dev, skb) == LAPB_OK) { + spin_unlock_bh(&x25st->up_lock); + return NET_RX_SUCCESS; + } + + spin_unlock_bh(&x25st->up_lock); + dev->stats.rx_errors++; + dev_kfree_skb_any(skb); + return NET_RX_DROP; +} + +static struct hdlc_proto proto = { + .open = x25_open, + .close = x25_close, + .ioctl = x25_ioctl, + .netif_rx = x25_rx, + .xmit = x25_xmit, + .module = THIS_MODULE, +}; + +static int x25_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + x25_hdlc_proto __user *x25_s = ifs->ifs_ifsu.x25; + const size_t size = sizeof(x25_hdlc_proto); + hdlc_device *hdlc = dev_to_hdlc(dev); + x25_hdlc_proto new_settings; + int result; + + switch (ifs->type) { + case IF_GET_PROTO: + if (dev_to_hdlc(dev)->proto != &proto) + return -EINVAL; + ifs->type = IF_PROTO_X25; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(x25_s, &state(hdlc)->settings, size)) + return -EFAULT; + return 0; + + case IF_PROTO_X25: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (dev->flags & IFF_UP) + return -EBUSY; + + /* backward compatibility */ + if (ifs->size == 0) { + new_settings.dce = 0; + new_settings.modulo = 8; + new_settings.window = 7; + new_settings.t1 = 3; + new_settings.t2 = 1; + new_settings.n2 = 10; + } else { + if (copy_from_user(&new_settings, x25_s, size)) + return -EFAULT; + + if ((new_settings.dce != 0 && + new_settings.dce != 1) || + (new_settings.modulo != 8 && + new_settings.modulo != 128) || + new_settings.window < 1 || + (new_settings.modulo == 8 && + new_settings.window > 7) || + (new_settings.modulo == 128 && + new_settings.window > 127) || + new_settings.t1 < 1 || + new_settings.t1 > 255 || + new_settings.t2 < 1 || + new_settings.t2 > 255 || + new_settings.n2 < 1 || + new_settings.n2 > 255) + return -EINVAL; + } + + result = hdlc->attach(dev, ENCODING_NRZ, + PARITY_CRC16_PR1_CCITT); + if (result) + return result; + + result = attach_hdlc_protocol(dev, &proto, + sizeof(struct x25_state)); + if (result) + return result; + + memcpy(&state(hdlc)->settings, &new_settings, size); + state(hdlc)->up = false; + spin_lock_init(&state(hdlc)->up_lock); + skb_queue_head_init(&state(hdlc)->rx_queue); + tasklet_setup(&state(hdlc)->rx_tasklet, x25_rx_queue_kick); + + /* There's no header_ops so hard_header_len should be 0. */ + dev->hard_header_len = 0; + /* When transmitting data: + * first we'll remove a pseudo header of 1 byte, + * then we'll prepend an LAPB header of at most 3 bytes. + */ + dev->needed_headroom = 3 - 1; + + dev->type = ARPHRD_X25; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + netif_dormant_off(dev); + return 0; + } + + return -EINVAL; +} + +static int __init hdlc_x25_init(void) +{ + register_hdlc_protocol(&proto); + return 0; +} + +static void __exit hdlc_x25_exit(void) +{ + unregister_hdlc_protocol(&proto); +} + +module_init(hdlc_x25_init); +module_exit(hdlc_x25_exit); + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("X.25 protocol support for generic HDLC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c new file mode 100644 index 000000000..e46b7f5ee --- /dev/null +++ b/drivers/net/wan/ixp4xx_hss.c @@ -0,0 +1,1545 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel IXP4xx HSS (synchronous serial port) driver for Linux + * + * Copyright (C) 2007-2008 Krzysztof HaÅ‚asa <khc@pm.waw.pl> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/bitops.h> +#include <linux/cdev.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/fs.h> +#include <linux/hdlc.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/poll.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/gpio/consumer.h> +#include <linux/of.h> +#include <linux/soc/ixp4xx/npe.h> +#include <linux/soc/ixp4xx/qmgr.h> +#include <linux/soc/ixp4xx/cpu.h> + +/* This is what all IXP4xx platforms we know uses, if more frequencies + * are needed, we need to migrate to the clock framework. + */ +#define IXP4XX_TIMER_FREQ 66666000 + +#define DEBUG_DESC 0 +#define DEBUG_RX 0 +#define DEBUG_TX 0 +#define DEBUG_PKT_BYTES 0 +#define DEBUG_CLOSE 0 + +#define DRV_NAME "ixp4xx_hss" + +#define PKT_EXTRA_FLAGS 0 /* orig 1 */ +#define PKT_NUM_PIPES 1 /* 1, 2 or 4 */ +#define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */ + +#define RX_DESCS 16 /* also length of all RX queues */ +#define TX_DESCS 16 /* also length of all TX queues */ + +#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) +#define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */ +#define MAX_CLOSE_WAIT 1000 /* microseconds */ +#define HSS_COUNT 2 +#define FRAME_SIZE 256 /* doesn't matter at this point */ +#define FRAME_OFFSET 0 +#define MAX_CHANNELS (FRAME_SIZE / 8) + +#define NAPI_WEIGHT 16 + +/* Queue IDs */ +#define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */ +#define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */ +#define HSS0_PKT_TX1_QUEUE 15 +#define HSS0_PKT_TX2_QUEUE 16 +#define HSS0_PKT_TX3_QUEUE 17 +#define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */ +#define HSS0_PKT_RXFREE1_QUEUE 19 +#define HSS0_PKT_RXFREE2_QUEUE 20 +#define HSS0_PKT_RXFREE3_QUEUE 21 +#define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */ + +#define HSS1_PKT_RX_QUEUE 0 +#define HSS1_PKT_TX0_QUEUE 5 +#define HSS1_PKT_TX1_QUEUE 6 +#define HSS1_PKT_TX2_QUEUE 7 +#define HSS1_PKT_TX3_QUEUE 8 +#define HSS1_PKT_RXFREE0_QUEUE 1 +#define HSS1_PKT_RXFREE1_QUEUE 2 +#define HSS1_PKT_RXFREE2_QUEUE 3 +#define HSS1_PKT_RXFREE3_QUEUE 4 +#define HSS1_PKT_TXDONE_QUEUE 9 + +#define NPE_PKT_MODE_HDLC 0 +#define NPE_PKT_MODE_RAW 1 +#define NPE_PKT_MODE_56KMODE 2 +#define NPE_PKT_MODE_56KENDIAN_MSB 4 + +/* PKT_PIPE_HDLC_CFG_WRITE flags */ +#define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */ +#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */ +#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */ + +/* hss_config, PCRs */ +/* Frame sync sampling, default = active low */ +#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000 +#define PCR_FRM_SYNC_FALLINGEDGE 0x80000000 +#define PCR_FRM_SYNC_RISINGEDGE 0xC0000000 + +/* Frame sync pin: input (default) or output generated off a given clk edge */ +#define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000 +#define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000 + +/* Frame and data clock sampling on edge, default = falling */ +#define PCR_FCLK_EDGE_RISING 0x08000000 +#define PCR_DCLK_EDGE_RISING 0x04000000 + +/* Clock direction, default = input */ +#define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000 + +/* Generate/Receive frame pulses, default = enabled */ +#define PCR_FRM_PULSE_DISABLED 0x01000000 + + /* Data rate is full (default) or half the configured clk speed */ +#define PCR_HALF_CLK_RATE 0x00200000 + +/* Invert data between NPE and HSS FIFOs? (default = no) */ +#define PCR_DATA_POLARITY_INVERT 0x00100000 + +/* TX/RX endianness, default = LSB */ +#define PCR_MSB_ENDIAN 0x00080000 + +/* Normal (default) / open drain mode (TX only) */ +#define PCR_TX_PINS_OPEN_DRAIN 0x00040000 + +/* No framing bit transmitted and expected on RX? (default = framing bit) */ +#define PCR_SOF_NO_FBIT 0x00020000 + +/* Drive data pins? */ +#define PCR_TX_DATA_ENABLE 0x00010000 + +/* Voice 56k type: drive the data pins low (default), high, high Z */ +#define PCR_TX_V56K_HIGH 0x00002000 +#define PCR_TX_V56K_HIGH_IMP 0x00004000 + +/* Unassigned type: drive the data pins low (default), high, high Z */ +#define PCR_TX_UNASS_HIGH 0x00000800 +#define PCR_TX_UNASS_HIGH_IMP 0x00001000 + +/* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */ +#define PCR_TX_FB_HIGH_IMP 0x00000400 + +/* 56k data endiannes - which bit unused: high (default) or low */ +#define PCR_TX_56KE_BIT_0_UNUSED 0x00000200 + +/* 56k data transmission type: 32/8 bit data (default) or 56K data */ +#define PCR_TX_56KS_56K_DATA 0x00000100 + +/* hss_config, cCR */ +/* Number of packetized clients, default = 1 */ +#define CCR_NPE_HFIFO_2_HDLC 0x04000000 +#define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000 + +/* default = no loopback */ +#define CCR_LOOPBACK 0x02000000 + +/* HSS number, default = 0 (first) */ +#define CCR_SECOND_HSS 0x01000000 + +/* hss_config, clkCR: main:10, num:10, denom:12 */ +#define CLK42X_SPEED_EXP ((0x3FF << 22) | (2 << 12) | 15) /*65 KHz*/ + +#define CLK42X_SPEED_512KHZ ((130 << 22) | (2 << 12) | 15) +#define CLK42X_SPEED_1536KHZ ((43 << 22) | (18 << 12) | 47) +#define CLK42X_SPEED_1544KHZ ((43 << 22) | (33 << 12) | 192) +#define CLK42X_SPEED_2048KHZ ((32 << 22) | (34 << 12) | 63) +#define CLK42X_SPEED_4096KHZ ((16 << 22) | (34 << 12) | 127) +#define CLK42X_SPEED_8192KHZ ((8 << 22) | (34 << 12) | 255) + +#define CLK46X_SPEED_512KHZ ((130 << 22) | (24 << 12) | 127) +#define CLK46X_SPEED_1536KHZ ((43 << 22) | (152 << 12) | 383) +#define CLK46X_SPEED_1544KHZ ((43 << 22) | (66 << 12) | 385) +#define CLK46X_SPEED_2048KHZ ((32 << 22) | (280 << 12) | 511) +#define CLK46X_SPEED_4096KHZ ((16 << 22) | (280 << 12) | 1023) +#define CLK46X_SPEED_8192KHZ ((8 << 22) | (280 << 12) | 2047) + +/* HSS_CONFIG_CLOCK_CR register consists of 3 parts: + * A (10 bits), B (10 bits) and C (12 bits). + * IXP42x HSS clock generator operation (verified with an oscilloscope): + * Each clock bit takes 7.5 ns (1 / 133.xx MHz). + * The clock sequence consists of (C - B) states of 0s and 1s, each state is + * A bits wide. It's followed by (B + 1) states of 0s and 1s, each state is + * (A + 1) bits wide. + * + * The resulting average clock frequency (assuming 33.333 MHz oscillator) is: + * freq = 66.666 MHz / (A + (B + 1) / (C + 1)) + * minimum freq = 66.666 MHz / (A + 1) + * maximum freq = 66.666 MHz / A + * + * Example: A = 2, B = 2, C = 7, CLOCK_CR register = 2 << 22 | 2 << 12 | 7 + * freq = 66.666 MHz / (2 + (2 + 1) / (7 + 1)) = 28.07 MHz (Mb/s). + * The clock sequence is: 1100110011 (5 doubles) 000111000 (3 triples). + * The sequence takes (C - B) * A + (B + 1) * (A + 1) = 5 * 2 + 3 * 3 bits + * = 19 bits (each 7.5 ns long) = 142.5 ns (then the sequence repeats). + * The sequence consists of 4 complete clock periods, thus the average + * frequency (= clock rate) is 4 / 142.5 ns = 28.07 MHz (Mb/s). + * (max specified clock rate for IXP42x HSS is 8.192 Mb/s). + */ + +/* hss_config, LUT entries */ +#define TDMMAP_UNASSIGNED 0 +#define TDMMAP_HDLC 1 /* HDLC - packetized */ +#define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */ +#define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */ + +/* offsets into HSS config */ +#define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */ +#define HSS_CONFIG_RX_PCR 0x04 +#define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */ +#define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */ +#define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */ +#define HSS_CONFIG_RX_FCR 0x14 +#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */ +#define HSS_CONFIG_RX_LUT 0x38 + +/* NPE command codes */ +/* writes the ConfigWord value to the location specified by offset */ +#define PORT_CONFIG_WRITE 0x40 + +/* triggers the NPE to load the contents of the configuration table */ +#define PORT_CONFIG_LOAD 0x41 + +/* triggers the NPE to return an HssErrorReadResponse message */ +#define PORT_ERROR_READ 0x42 + +/* triggers the NPE to reset internal status and enable the HssPacketized + * operation for the flow specified by pPipe + */ +#define PKT_PIPE_FLOW_ENABLE 0x50 +#define PKT_PIPE_FLOW_DISABLE 0x51 +#define PKT_NUM_PIPES_WRITE 0x52 +#define PKT_PIPE_FIFO_SIZEW_WRITE 0x53 +#define PKT_PIPE_HDLC_CFG_WRITE 0x54 +#define PKT_PIPE_IDLE_PATTERN_WRITE 0x55 +#define PKT_PIPE_RX_SIZE_WRITE 0x56 +#define PKT_PIPE_MODE_WRITE 0x57 + +/* HDLC packet status values - desc->status */ +#define ERR_SHUTDOWN 1 /* stop or shutdown occurrence */ +#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */ +#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */ +#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving + * this packet (if buf_len < pkt_len) + */ +#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */ +#define ERR_HDLC_ABORT 6 /* abort sequence received */ +#define ERR_DISCONNECTING 7 /* disconnect is in progress */ + +#ifdef __ARMEB__ +typedef struct sk_buff buffer_t; +#define free_buffer dev_kfree_skb +#define free_buffer_irq dev_consume_skb_irq +#else +typedef void buffer_t; +#define free_buffer kfree +#define free_buffer_irq kfree +#endif + +struct port { + struct device *dev; + struct npe *npe; + unsigned int txreadyq; + unsigned int rxtrigq; + unsigned int rxfreeq; + unsigned int rxq; + unsigned int txq; + unsigned int txdoneq; + struct gpio_desc *cts; + struct gpio_desc *rts; + struct gpio_desc *dcd; + struct gpio_desc *dtr; + struct gpio_desc *clk_internal; + struct net_device *netdev; + struct napi_struct napi; + buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; + struct desc *desc_tab; /* coherent */ + dma_addr_t desc_tab_phys; + unsigned int id; + unsigned int clock_type, clock_rate, loopback; + unsigned int initialized, carrier; + u8 hdlc_cfg; + u32 clock_reg; +}; + +/* NPE message structure */ +struct msg { +#ifdef __ARMEB__ + u8 cmd, unused, hss_port, index; + union { + struct { u8 data8a, data8b, data8c, data8d; }; + struct { u16 data16a, data16b; }; + struct { u32 data32; }; + }; +#else + u8 index, hss_port, unused, cmd; + union { + struct { u8 data8d, data8c, data8b, data8a; }; + struct { u16 data16b, data16a; }; + struct { u32 data32; }; + }; +#endif +}; + +/* HDLC packet descriptor */ +struct desc { + u32 next; /* pointer to next buffer, unused */ + +#ifdef __ARMEB__ + u16 buf_len; /* buffer length */ + u16 pkt_len; /* packet length */ + u32 data; /* pointer to data buffer in RAM */ + u8 status; + u8 error_count; + u16 __reserved; +#else + u16 pkt_len; /* packet length */ + u16 buf_len; /* buffer length */ + u32 data; /* pointer to data buffer in RAM */ + u16 __reserved; + u8 error_count; + u8 status; +#endif + u32 __reserved1[4]; +}; + +#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ + (n) * sizeof(struct desc)) +#define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) + +#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ + ((n) + RX_DESCS) * sizeof(struct desc)) +#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) + +/***************************************************************************** + * global variables + ****************************************************************************/ + +static int ports_open; +static struct dma_pool *dma_pool; +static DEFINE_SPINLOCK(npe_lock); + +/***************************************************************************** + * utility functions + ****************************************************************************/ + +static inline struct port *dev_to_port(struct net_device *dev) +{ + return dev_to_hdlc(dev)->priv; +} + +#ifndef __ARMEB__ +static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) +{ + int i; + + for (i = 0; i < cnt; i++) + dest[i] = swab32(src[i]); +} +#endif + +/***************************************************************************** + * HSS access + ****************************************************************************/ + +static void hss_npe_send(struct port *port, struct msg *msg, const char *what) +{ + u32 *val = (u32 *)msg; + + if (npe_send_message(port->npe, msg, what)) { + pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n", + port->id, val[0], val[1], npe_name(port->npe)); + BUG(); + } +} + +static void hss_config_set_lut(struct port *port) +{ + struct msg msg; + int ch; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PORT_CONFIG_WRITE; + msg.hss_port = port->id; + + for (ch = 0; ch < MAX_CHANNELS; ch++) { + msg.data32 >>= 2; + msg.data32 |= TDMMAP_HDLC << 30; + + if (ch % 16 == 15) { + msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3); + hss_npe_send(port, &msg, "HSS_SET_TX_LUT"); + + msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT; + hss_npe_send(port, &msg, "HSS_SET_RX_LUT"); + } + } +} + +static void hss_config(struct port *port) +{ + struct msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PORT_CONFIG_WRITE; + msg.hss_port = port->id; + msg.index = HSS_CONFIG_TX_PCR; + msg.data32 = PCR_FRM_PULSE_DISABLED | PCR_MSB_ENDIAN | + PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT; + if (port->clock_type == CLOCK_INT) + msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT; + hss_npe_send(port, &msg, "HSS_SET_TX_PCR"); + + msg.index = HSS_CONFIG_RX_PCR; + msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING; + hss_npe_send(port, &msg, "HSS_SET_RX_PCR"); + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PORT_CONFIG_WRITE; + msg.hss_port = port->id; + msg.index = HSS_CONFIG_CORE_CR; + msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) | + (port->id ? CCR_SECOND_HSS : 0); + hss_npe_send(port, &msg, "HSS_SET_CORE_CR"); + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PORT_CONFIG_WRITE; + msg.hss_port = port->id; + msg.index = HSS_CONFIG_CLOCK_CR; + msg.data32 = port->clock_reg; + hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR"); + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PORT_CONFIG_WRITE; + msg.hss_port = port->id; + msg.index = HSS_CONFIG_TX_FCR; + msg.data16a = FRAME_OFFSET; + msg.data16b = FRAME_SIZE - 1; + hss_npe_send(port, &msg, "HSS_SET_TX_FCR"); + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PORT_CONFIG_WRITE; + msg.hss_port = port->id; + msg.index = HSS_CONFIG_RX_FCR; + msg.data16a = FRAME_OFFSET; + msg.data16b = FRAME_SIZE - 1; + hss_npe_send(port, &msg, "HSS_SET_RX_FCR"); + + hss_config_set_lut(port); + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PORT_CONFIG_LOAD; + msg.hss_port = port->id; + hss_npe_send(port, &msg, "HSS_LOAD_CONFIG"); + + if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") || + /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */ + msg.cmd != PORT_CONFIG_LOAD || msg.data32) { + pr_crit("HSS-%i: HSS_LOAD_CONFIG failed\n", port->id); + BUG(); + } + + /* HDLC may stop working without this - check FIXME */ + npe_recv_message(port->npe, &msg, "FLUSH_IT"); +} + +static void hss_set_hdlc_cfg(struct port *port) +{ + struct msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PKT_PIPE_HDLC_CFG_WRITE; + msg.hss_port = port->id; + msg.data8a = port->hdlc_cfg; /* rx_cfg */ + msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */ + hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG"); +} + +static u32 hss_get_status(struct port *port) +{ + struct msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PORT_ERROR_READ; + msg.hss_port = port->id; + hss_npe_send(port, &msg, "PORT_ERROR_READ"); + if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) { + pr_crit("HSS-%i: unable to read HSS status\n", port->id); + BUG(); + } + + return msg.data32; +} + +static void hss_start_hdlc(struct port *port) +{ + struct msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PKT_PIPE_FLOW_ENABLE; + msg.hss_port = port->id; + msg.data32 = 0; + hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE"); +} + +static void hss_stop_hdlc(struct port *port) +{ + struct msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = PKT_PIPE_FLOW_DISABLE; + msg.hss_port = port->id; + hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE"); + hss_get_status(port); /* make sure it's halted */ +} + +static int hss_load_firmware(struct port *port) +{ + struct msg msg; + int err; + + if (port->initialized) + return 0; + + if (!npe_running(port->npe)) { + err = npe_load_firmware(port->npe, npe_name(port->npe), + port->dev); + if (err) + return err; + } + + /* HDLC mode configuration */ + memset(&msg, 0, sizeof(msg)); + msg.cmd = PKT_NUM_PIPES_WRITE; + msg.hss_port = port->id; + msg.data8a = PKT_NUM_PIPES; + hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES"); + + msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE; + msg.data8a = PKT_PIPE_FIFO_SIZEW; + hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO"); + + msg.cmd = PKT_PIPE_MODE_WRITE; + msg.data8a = NPE_PKT_MODE_HDLC; + /* msg.data8b = inv_mask */ + /* msg.data8c = or_mask */ + hss_npe_send(port, &msg, "HSS_SET_PKT_MODE"); + + msg.cmd = PKT_PIPE_RX_SIZE_WRITE; + msg.data16a = HDLC_MAX_MRU; /* including CRC */ + hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE"); + + msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE; + msg.data32 = 0x7F7F7F7F; /* ??? FIXME */ + hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE"); + + port->initialized = 1; + return 0; +} + +/***************************************************************************** + * packetized (HDLC) operation + ****************************************************************************/ + +static inline void debug_pkt(struct net_device *dev, const char *func, + u8 *data, int len) +{ +#if DEBUG_PKT_BYTES + int i; + + printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len); + for (i = 0; i < len; i++) { + if (i >= DEBUG_PKT_BYTES) + break; + printk("%s%02X", !(i % 4) ? " " : "", data[i]); + } + printk("\n"); +#endif +} + +static inline void debug_desc(u32 phys, struct desc *desc) +{ +#if DEBUG_DESC + printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n", + phys, desc->next, desc->buf_len, desc->pkt_len, + desc->data, desc->status, desc->error_count); +#endif +} + +static inline int queue_get_desc(unsigned int queue, struct port *port, + int is_tx) +{ + u32 phys, tab_phys, n_desc; + struct desc *tab; + + phys = qmgr_get_entry(queue); + if (!phys) + return -1; + + BUG_ON(phys & 0x1F); + tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); + tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); + n_desc = (phys - tab_phys) / sizeof(struct desc); + BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); + debug_desc(phys, &tab[n_desc]); + BUG_ON(tab[n_desc].next); + return n_desc; +} + +static inline void queue_put_desc(unsigned int queue, u32 phys, + struct desc *desc) +{ + debug_desc(phys, desc); + BUG_ON(phys & 0x1F); + qmgr_put_entry(queue, phys); + /* Don't check for queue overflow here, we've allocated sufficient + * length and queues >= 32 don't support this check anyway. + */ +} + +static inline void dma_unmap_tx(struct port *port, struct desc *desc) +{ +#ifdef __ARMEB__ + dma_unmap_single(&port->netdev->dev, desc->data, + desc->buf_len, DMA_TO_DEVICE); +#else + dma_unmap_single(&port->netdev->dev, desc->data & ~3, + ALIGN((desc->data & 3) + desc->buf_len, 4), + DMA_TO_DEVICE); +#endif +} + +static void hss_hdlc_set_carrier(void *pdev, int carrier) +{ + struct net_device *netdev = pdev; + struct port *port = dev_to_port(netdev); + unsigned long flags; + + spin_lock_irqsave(&npe_lock, flags); + port->carrier = carrier; + if (!port->loopback) { + if (carrier) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); + } + spin_unlock_irqrestore(&npe_lock, flags); +} + +static void hss_hdlc_rx_irq(void *pdev) +{ + struct net_device *dev = pdev; + struct port *port = dev_to_port(dev); + +#if DEBUG_RX + printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); +#endif + qmgr_disable_irq(port->rxq); + napi_schedule(&port->napi); +} + +static int hss_hdlc_poll(struct napi_struct *napi, int budget) +{ + struct port *port = container_of(napi, struct port, napi); + struct net_device *dev = port->netdev; + unsigned int rxq = port->rxq; + unsigned int rxfreeq = port->rxfreeq; + int received = 0; + +#if DEBUG_RX + printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name); +#endif + + while (received < budget) { + struct sk_buff *skb; + struct desc *desc; + int n; +#ifdef __ARMEB__ + struct sk_buff *temp; + u32 phys; +#endif + + n = queue_get_desc(rxq, port, 0); + if (n < 0) { +#if DEBUG_RX + printk(KERN_DEBUG "%s: hss_hdlc_poll" + " napi_complete\n", dev->name); +#endif + napi_complete(napi); + qmgr_enable_irq(rxq); + if (!qmgr_stat_empty(rxq) && + napi_reschedule(napi)) { +#if DEBUG_RX + printk(KERN_DEBUG "%s: hss_hdlc_poll" + " napi_reschedule succeeded\n", + dev->name); +#endif + qmgr_disable_irq(rxq); + continue; + } +#if DEBUG_RX + printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n", + dev->name); +#endif + return received; /* all work done */ + } + + desc = rx_desc_ptr(port, n); +#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */ + if (desc->error_count) + printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X" + " errors %u\n", dev->name, desc->status, + desc->error_count); +#endif + skb = NULL; + switch (desc->status) { + case 0: +#ifdef __ARMEB__ + skb = netdev_alloc_skb(dev, RX_SIZE); + if (skb) { + phys = dma_map_single(&dev->dev, skb->data, + RX_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&dev->dev, phys)) { + dev_kfree_skb(skb); + skb = NULL; + } + } +#else + skb = netdev_alloc_skb(dev, desc->pkt_len); +#endif + if (!skb) + dev->stats.rx_dropped++; + break; + case ERR_HDLC_ALIGN: + case ERR_HDLC_ABORT: + dev->stats.rx_frame_errors++; + dev->stats.rx_errors++; + break; + case ERR_HDLC_FCS: + dev->stats.rx_crc_errors++; + dev->stats.rx_errors++; + break; + case ERR_HDLC_TOO_LONG: + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + break; + default: /* FIXME - remove printk */ + netdev_err(dev, "hss_hdlc_poll: status 0x%02X errors %u\n", + desc->status, desc->error_count); + dev->stats.rx_errors++; + } + + if (!skb) { + /* put the desc back on RX-ready queue */ + desc->buf_len = RX_SIZE; + desc->pkt_len = desc->status = 0; + queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); + continue; + } + + /* process received frame */ +#ifdef __ARMEB__ + temp = skb; + skb = port->rx_buff_tab[n]; + dma_unmap_single(&dev->dev, desc->data, + RX_SIZE, DMA_FROM_DEVICE); +#else + dma_sync_single_for_cpu(&dev->dev, desc->data, + RX_SIZE, DMA_FROM_DEVICE); + memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], + ALIGN(desc->pkt_len, 4) / 4); +#endif + skb_put(skb, desc->pkt_len); + + debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len); + + skb->protocol = hdlc_type_trans(skb, dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + netif_receive_skb(skb); + + /* put the new buffer on RX-free queue */ +#ifdef __ARMEB__ + port->rx_buff_tab[n] = temp; + desc->data = phys; +#endif + desc->buf_len = RX_SIZE; + desc->pkt_len = 0; + queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); + received++; + } +#if DEBUG_RX + printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n"); +#endif + return received; /* not all work done */ +} + +static void hss_hdlc_txdone_irq(void *pdev) +{ + struct net_device *dev = pdev; + struct port *port = dev_to_port(dev); + int n_desc; + +#if DEBUG_TX + printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n"); +#endif + while ((n_desc = queue_get_desc(port->txdoneq, + port, 1)) >= 0) { + struct desc *desc; + int start; + + desc = tx_desc_ptr(port, n_desc); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += desc->pkt_len; + + dma_unmap_tx(port, desc); +#if DEBUG_TX + printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n", + dev->name, port->tx_buff_tab[n_desc]); +#endif + free_buffer_irq(port->tx_buff_tab[n_desc]); + port->tx_buff_tab[n_desc] = NULL; + + start = qmgr_stat_below_low_watermark(port->txreadyq); + queue_put_desc(port->txreadyq, + tx_desc_phys(port, n_desc), desc); + if (start) { /* TX-ready queue was empty */ +#if DEBUG_TX + printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit" + " ready\n", dev->name); +#endif + netif_wake_queue(dev); + } + } +} + +static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct port *port = dev_to_port(dev); + unsigned int txreadyq = port->txreadyq; + int len, offset, bytes, n; + void *mem; + u32 phys; + struct desc *desc; + +#if DEBUG_TX + printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name); +#endif + + if (unlikely(skb->len > HDLC_MAX_MRU)) { + dev_kfree_skb(skb); + dev->stats.tx_errors++; + return NETDEV_TX_OK; + } + + debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len); + + len = skb->len; +#ifdef __ARMEB__ + offset = 0; /* no need to keep alignment */ + bytes = len; + mem = skb->data; +#else + offset = (int)skb->data & 3; /* keep 32-bit alignment */ + bytes = ALIGN(offset + len, 4); + mem = kmalloc(bytes, GFP_ATOMIC); + if (!mem) { + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4); + dev_kfree_skb(skb); +#endif + + phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); + if (dma_mapping_error(&dev->dev, phys)) { +#ifdef __ARMEB__ + dev_kfree_skb(skb); +#else + kfree(mem); +#endif + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + n = queue_get_desc(txreadyq, port, 1); + BUG_ON(n < 0); + desc = tx_desc_ptr(port, n); + +#ifdef __ARMEB__ + port->tx_buff_tab[n] = skb; +#else + port->tx_buff_tab[n] = mem; +#endif + desc->data = phys + offset; + desc->buf_len = desc->pkt_len = len; + + wmb(); + queue_put_desc(port->txq, tx_desc_phys(port, n), desc); + + if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ +#if DEBUG_TX + printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name); +#endif + netif_stop_queue(dev); + /* we could miss TX ready interrupt */ + if (!qmgr_stat_below_low_watermark(txreadyq)) { +#if DEBUG_TX + printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n", + dev->name); +#endif + netif_wake_queue(dev); + } + } + +#if DEBUG_TX + printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name); +#endif + return NETDEV_TX_OK; +} + +static int request_hdlc_queues(struct port *port) +{ + int err; + + err = qmgr_request_queue(port->rxfreeq, RX_DESCS, 0, 0, + "%s:RX-free", port->netdev->name); + if (err) + return err; + + err = qmgr_request_queue(port->rxq, RX_DESCS, 0, 0, + "%s:RX", port->netdev->name); + if (err) + goto rel_rxfree; + + err = qmgr_request_queue(port->txq, TX_DESCS, 0, 0, + "%s:TX", port->netdev->name); + if (err) + goto rel_rx; + + err = qmgr_request_queue(port->txreadyq, TX_DESCS, 0, 0, + "%s:TX-ready", port->netdev->name); + if (err) + goto rel_tx; + + err = qmgr_request_queue(port->txdoneq, TX_DESCS, 0, 0, + "%s:TX-done", port->netdev->name); + if (err) + goto rel_txready; + return 0; + +rel_txready: + qmgr_release_queue(port->txreadyq); +rel_tx: + qmgr_release_queue(port->txq); +rel_rx: + qmgr_release_queue(port->rxq); +rel_rxfree: + qmgr_release_queue(port->rxfreeq); + printk(KERN_DEBUG "%s: unable to request hardware queues\n", + port->netdev->name); + return err; +} + +static void release_hdlc_queues(struct port *port) +{ + qmgr_release_queue(port->rxfreeq); + qmgr_release_queue(port->rxq); + qmgr_release_queue(port->txdoneq); + qmgr_release_queue(port->txq); + qmgr_release_queue(port->txreadyq); +} + +static int init_hdlc_queues(struct port *port) +{ + int i; + + if (!ports_open) { + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, + POOL_ALLOC_SIZE, 32, 0); + if (!dma_pool) + return -ENOMEM; + } + + port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, + &port->desc_tab_phys); + if (!port->desc_tab) + return -ENOMEM; + memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ + memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); + + /* Setup RX buffers */ + for (i = 0; i < RX_DESCS; i++) { + struct desc *desc = rx_desc_ptr(port, i); + buffer_t *buff; + void *data; +#ifdef __ARMEB__ + buff = netdev_alloc_skb(port->netdev, RX_SIZE); + if (!buff) + return -ENOMEM; + data = buff->data; +#else + buff = kmalloc(RX_SIZE, GFP_KERNEL); + if (!buff) + return -ENOMEM; + data = buff; +#endif + desc->buf_len = RX_SIZE; + desc->data = dma_map_single(&port->netdev->dev, data, + RX_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&port->netdev->dev, desc->data)) { + free_buffer(buff); + return -EIO; + } + port->rx_buff_tab[i] = buff; + } + + return 0; +} + +static void destroy_hdlc_queues(struct port *port) +{ + int i; + + if (port->desc_tab) { + for (i = 0; i < RX_DESCS; i++) { + struct desc *desc = rx_desc_ptr(port, i); + buffer_t *buff = port->rx_buff_tab[i]; + + if (buff) { + dma_unmap_single(&port->netdev->dev, + desc->data, RX_SIZE, + DMA_FROM_DEVICE); + free_buffer(buff); + } + } + for (i = 0; i < TX_DESCS; i++) { + struct desc *desc = tx_desc_ptr(port, i); + buffer_t *buff = port->tx_buff_tab[i]; + + if (buff) { + dma_unmap_tx(port, desc); + free_buffer(buff); + } + } + dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); + port->desc_tab = NULL; + } + + if (!ports_open && dma_pool) { + dma_pool_destroy(dma_pool); + dma_pool = NULL; + } +} + +static irqreturn_t hss_hdlc_dcd_irq(int irq, void *data) +{ + struct net_device *dev = data; + struct port *port = dev_to_port(dev); + int val; + + val = gpiod_get_value(port->dcd); + hss_hdlc_set_carrier(dev, val); + + return IRQ_HANDLED; +} + +static int hss_hdlc_open(struct net_device *dev) +{ + struct port *port = dev_to_port(dev); + unsigned long flags; + int i, err = 0; + int val; + + err = hdlc_open(dev); + if (err) + return err; + + err = hss_load_firmware(port); + if (err) + goto err_hdlc_close; + + err = request_hdlc_queues(port); + if (err) + goto err_hdlc_close; + + err = init_hdlc_queues(port); + if (err) + goto err_destroy_queues; + + spin_lock_irqsave(&npe_lock, flags); + + /* Set the carrier, the GPIO is flagged active low so this will return + * 1 if DCD is asserted. + */ + val = gpiod_get_value(port->dcd); + hss_hdlc_set_carrier(dev, val); + + /* Set up an IRQ for DCD */ + err = request_irq(gpiod_to_irq(port->dcd), hss_hdlc_dcd_irq, 0, "IXP4xx HSS", dev); + if (err) { + dev_err(&dev->dev, "ixp4xx_hss: failed to request DCD IRQ (%i)\n", err); + goto err_unlock; + } + + /* GPIOs are flagged active low so this asserts DTR and RTS */ + gpiod_set_value(port->dtr, 1); + gpiod_set_value(port->rts, 1); + + spin_unlock_irqrestore(&npe_lock, flags); + + /* Populate queues with buffers, no failure after this point */ + for (i = 0; i < TX_DESCS; i++) + queue_put_desc(port->txreadyq, + tx_desc_phys(port, i), tx_desc_ptr(port, i)); + + for (i = 0; i < RX_DESCS; i++) + queue_put_desc(port->rxfreeq, + rx_desc_phys(port, i), rx_desc_ptr(port, i)); + + napi_enable(&port->napi); + netif_start_queue(dev); + + qmgr_set_irq(port->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, + hss_hdlc_rx_irq, dev); + + qmgr_set_irq(port->txdoneq, QUEUE_IRQ_SRC_NOT_EMPTY, + hss_hdlc_txdone_irq, dev); + qmgr_enable_irq(port->txdoneq); + + ports_open++; + + hss_set_hdlc_cfg(port); + hss_config(port); + + hss_start_hdlc(port); + + /* we may already have RX data, enables IRQ */ + napi_schedule(&port->napi); + return 0; + +err_unlock: + spin_unlock_irqrestore(&npe_lock, flags); +err_destroy_queues: + destroy_hdlc_queues(port); + release_hdlc_queues(port); +err_hdlc_close: + hdlc_close(dev); + return err; +} + +static int hss_hdlc_close(struct net_device *dev) +{ + struct port *port = dev_to_port(dev); + unsigned long flags; + int i, buffs = RX_DESCS; /* allocated RX buffers */ + + spin_lock_irqsave(&npe_lock, flags); + ports_open--; + qmgr_disable_irq(port->rxq); + netif_stop_queue(dev); + napi_disable(&port->napi); + + hss_stop_hdlc(port); + + while (queue_get_desc(port->rxfreeq, port, 0) >= 0) + buffs--; + while (queue_get_desc(port->rxq, port, 0) >= 0) + buffs--; + + if (buffs) + netdev_crit(dev, "unable to drain RX queue, %i buffer(s) left in NPE\n", + buffs); + + buffs = TX_DESCS; + while (queue_get_desc(port->txq, port, 1) >= 0) + buffs--; /* cancel TX */ + + i = 0; + do { + while (queue_get_desc(port->txreadyq, port, 1) >= 0) + buffs--; + if (!buffs) + break; + } while (++i < MAX_CLOSE_WAIT); + + if (buffs) + netdev_crit(dev, "unable to drain TX queue, %i buffer(s) left in NPE\n", + buffs); +#if DEBUG_CLOSE + if (!buffs) + printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); +#endif + qmgr_disable_irq(port->txdoneq); + + free_irq(gpiod_to_irq(port->dcd), dev); + /* GPIOs are flagged active low so this de-asserts DTR and RTS */ + gpiod_set_value(port->dtr, 0); + gpiod_set_value(port->rts, 0); + spin_unlock_irqrestore(&npe_lock, flags); + + destroy_hdlc_queues(port); + release_hdlc_queues(port); + hdlc_close(dev); + return 0; +} + +static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + struct port *port = dev_to_port(dev); + + if (encoding != ENCODING_NRZ) + return -EINVAL; + + switch (parity) { + case PARITY_CRC16_PR1_CCITT: + port->hdlc_cfg = 0; + return 0; + + case PARITY_CRC32_PR1_CCITT: + port->hdlc_cfg = PKT_HDLC_CRC_32; + return 0; + + default: + return -EINVAL; + } +} + +static u32 check_clock(u32 timer_freq, u32 rate, u32 a, u32 b, u32 c, + u32 *best, u32 *best_diff, u32 *reg) +{ + /* a is 10-bit, b is 10-bit, c is 12-bit */ + u64 new_rate; + u32 new_diff; + + new_rate = timer_freq * (u64)(c + 1); + do_div(new_rate, a * (c + 1) + b + 1); + new_diff = abs((u32)new_rate - rate); + + if (new_diff < *best_diff) { + *best = new_rate; + *best_diff = new_diff; + *reg = (a << 22) | (b << 12) | c; + } + return new_diff; +} + +static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg) +{ + u32 a, b, diff = 0xFFFFFFFF; + + a = timer_freq / rate; + + if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */ + check_clock(timer_freq, rate, 0x3FF, 1, 1, best, &diff, reg); + return; + } + if (a == 0) { /* > 66.666 MHz */ + a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */ + rate = timer_freq; + } + + if (rate * a == timer_freq) { /* don't divide by 0 later */ + check_clock(timer_freq, rate, a - 1, 1, 1, best, &diff, reg); + return; + } + + for (b = 0; b < 0x400; b++) { + u64 c = (b + 1) * (u64)rate; + + do_div(c, timer_freq - rate * a); + c--; + if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */ + if (b == 0 && /* also try a bit higher rate */ + !check_clock(timer_freq, rate, a - 1, 1, 1, best, + &diff, reg)) + return; + check_clock(timer_freq, rate, a, b, 0xFFF, best, + &diff, reg); + return; + } + if (!check_clock(timer_freq, rate, a, b, c, best, &diff, reg)) + return; + if (!check_clock(timer_freq, rate, a, b, c + 1, best, &diff, + reg)) + return; + } +} + +static int hss_hdlc_set_clock(struct port *port, unsigned int clock_type) +{ + switch (clock_type) { + case CLOCK_DEFAULT: + case CLOCK_EXT: + gpiod_set_value(port->clk_internal, 0); + return CLOCK_EXT; + case CLOCK_INT: + gpiod_set_value(port->clk_internal, 1); + return CLOCK_INT; + default: + return -EINVAL; + } +} + +static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifs->ifs_ifsu.sync; + struct port *port = dev_to_port(dev); + unsigned long flags; + int clk; + + switch (ifs->type) { + case IF_GET_IFACE: + ifs->type = IF_IFACE_V35; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + memset(&new_line, 0, sizeof(new_line)); + new_line.clock_type = port->clock_type; + new_line.clock_rate = port->clock_rate; + new_line.loopback = port->loopback; + if (copy_to_user(line, &new_line, size)) + return -EFAULT; + return 0; + + case IF_IFACE_SYNC_SERIAL: + case IF_IFACE_V35: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&new_line, line, size)) + return -EFAULT; + + clk = new_line.clock_type; + hss_hdlc_set_clock(port, clk); + + if (clk != CLOCK_EXT && clk != CLOCK_INT) + return -EINVAL; /* No such clock setting */ + + if (new_line.loopback != 0 && new_line.loopback != 1) + return -EINVAL; + + port->clock_type = clk; /* Update settings */ + if (clk == CLOCK_INT) { + find_best_clock(IXP4XX_TIMER_FREQ, + new_line.clock_rate, + &port->clock_rate, &port->clock_reg); + } else { + port->clock_rate = 0; + port->clock_reg = CLK42X_SPEED_2048KHZ; + } + port->loopback = new_line.loopback; + + spin_lock_irqsave(&npe_lock, flags); + + if (dev->flags & IFF_UP) + hss_config(port); + + if (port->loopback || port->carrier) + netif_carrier_on(port->netdev); + else + netif_carrier_off(port->netdev); + spin_unlock_irqrestore(&npe_lock, flags); + + return 0; + + default: + return hdlc_ioctl(dev, ifs); + } +} + +/***************************************************************************** + * initialization + ****************************************************************************/ + +static const struct net_device_ops hss_hdlc_ops = { + .ndo_open = hss_hdlc_open, + .ndo_stop = hss_hdlc_close, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_siocwandev = hss_hdlc_ioctl, +}; + +static int ixp4xx_hss_probe(struct platform_device *pdev) +{ + struct of_phandle_args queue_spec; + struct of_phandle_args npe_spec; + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct device_node *np; + struct regmap *rmap; + struct port *port; + hdlc_device *hdlc; + int err; + u32 val; + + /* + * Go into the syscon and check if we have the HSS and HDLC + * features available, else this will not work. + */ + rmap = syscon_regmap_lookup_by_compatible("syscon"); + if (IS_ERR(rmap)) + return dev_err_probe(dev, PTR_ERR(rmap), + "failed to look up syscon\n"); + + val = cpu_ixp4xx_features(rmap); + + if ((val & (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) != + (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) { + dev_err(dev, "HDLC and HSS feature unavailable in platform\n"); + return -ENODEV; + } + + np = dev->of_node; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + err = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0, + &npe_spec); + if (err) + return dev_err_probe(dev, err, "no NPE engine specified\n"); + /* NPE ID 0x00, 0x10, 0x20... */ + port->npe = npe_request(npe_spec.args[0] << 4); + if (!port->npe) { + dev_err(dev, "unable to obtain NPE instance\n"); + return -ENODEV; + } + + /* Get the TX ready queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-txready", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no txready queue phandle\n"); + port->txreadyq = queue_spec.args[0]; + /* Get the RX trig queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-rxtrig", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no rxtrig queue phandle\n"); + port->rxtrigq = queue_spec.args[0]; + /* Get the RX queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rx", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no RX queue phandle\n"); + port->rxq = queue_spec.args[0]; + /* Get the TX queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-tx", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no RX queue phandle\n"); + port->txq = queue_spec.args[0]; + /* Get the RX free queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rxfree", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no RX free queue phandle\n"); + port->rxfreeq = queue_spec.args[0]; + /* Get the TX done queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-txdone", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no TX done queue phandle\n"); + port->txdoneq = queue_spec.args[0]; + + /* Obtain all the line control GPIOs */ + port->cts = devm_gpiod_get(dev, "cts", GPIOD_OUT_LOW); + if (IS_ERR(port->cts)) + return dev_err_probe(dev, PTR_ERR(port->cts), "unable to get CTS GPIO\n"); + port->rts = devm_gpiod_get(dev, "rts", GPIOD_OUT_LOW); + if (IS_ERR(port->rts)) + return dev_err_probe(dev, PTR_ERR(port->rts), "unable to get RTS GPIO\n"); + port->dcd = devm_gpiod_get(dev, "dcd", GPIOD_IN); + if (IS_ERR(port->dcd)) + return dev_err_probe(dev, PTR_ERR(port->dcd), "unable to get DCD GPIO\n"); + port->dtr = devm_gpiod_get(dev, "dtr", GPIOD_OUT_LOW); + if (IS_ERR(port->dtr)) + return dev_err_probe(dev, PTR_ERR(port->dtr), "unable to get DTR GPIO\n"); + port->clk_internal = devm_gpiod_get(dev, "clk-internal", GPIOD_OUT_LOW); + if (IS_ERR(port->clk_internal)) + return dev_err_probe(dev, PTR_ERR(port->clk_internal), + "unable to get CLK internal GPIO\n"); + + ndev = alloc_hdlcdev(port); + port->netdev = alloc_hdlcdev(port); + if (!port->netdev) { + err = -ENOMEM; + goto err_plat; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + hdlc = dev_to_hdlc(ndev); + hdlc->attach = hss_hdlc_attach; + hdlc->xmit = hss_hdlc_xmit; + ndev->netdev_ops = &hss_hdlc_ops; + ndev->tx_queue_len = 100; + port->clock_type = CLOCK_EXT; + port->clock_rate = 0; + port->clock_reg = CLK42X_SPEED_2048KHZ; + port->id = pdev->id; + port->dev = &pdev->dev; + netif_napi_add_weight(ndev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT); + + err = register_hdlc_device(ndev); + if (err) + goto err_free_netdev; + + platform_set_drvdata(pdev, port); + + netdev_info(ndev, "initialized\n"); + return 0; + +err_free_netdev: + free_netdev(ndev); +err_plat: + npe_release(port->npe); + return err; +} + +static int ixp4xx_hss_remove(struct platform_device *pdev) +{ + struct port *port = platform_get_drvdata(pdev); + + unregister_hdlc_device(port->netdev); + free_netdev(port->netdev); + npe_release(port->npe); + return 0; +} + +static struct platform_driver ixp4xx_hss_driver = { + .driver.name = DRV_NAME, + .probe = ixp4xx_hss_probe, + .remove = ixp4xx_hss_remove, +}; +module_platform_driver(ixp4xx_hss_driver); + +MODULE_AUTHOR("Krzysztof Halasa"); +MODULE_DESCRIPTION("Intel IXP4xx HSS driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:ixp4xx_hss"); diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c new file mode 100644 index 000000000..56326f38f --- /dev/null +++ b/drivers/net/wan/lapbether.c @@ -0,0 +1,526 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * "LAPB via ethernet" driver release 001 + * + * This code REQUIRES 2.1.15 or higher/ NET3.038 + * + * This is a "pseudo" network driver to allow LAPB over Ethernet. + * + * This driver can use any ethernet destination address, and can be + * limited to accept frames from one dedicated ethernet card only. + * + * History + * LAPBETH 001 Jonathan Naylor Cloned from bpqether.c + * 2000-10-29 Henner Eisen lapb_data_indication() return status. + * 2000-11-14 Henner Eisen dev_hold/put, NETDEV_GOING_DOWN support + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/net.h> +#include <linux/inet.h> +#include <linux/netdevice.h> +#include <linux/if_arp.h> +#include <linux/skbuff.h> +#include <net/sock.h> +#include <linux/uaccess.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/notifier.h> +#include <linux/stat.h> +#include <linux/module.h> +#include <linux/lapb.h> +#include <linux/init.h> + +#include <net/x25device.h> + +static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + +/* If this number is made larger, check that the temporary string buffer + * in lapbeth_new_device is large enough to store the probe device name. + */ +#define MAXLAPBDEV 100 + +struct lapbethdev { + struct list_head node; + struct net_device *ethdev; /* link to ethernet device */ + struct net_device *axdev; /* lapbeth device (lapb#) */ + bool up; + spinlock_t up_lock; /* Protects "up" */ + struct sk_buff_head rx_queue; + struct napi_struct napi; +}; + +static LIST_HEAD(lapbeth_devices); + +static void lapbeth_connected(struct net_device *dev, int reason); +static void lapbeth_disconnected(struct net_device *dev, int reason); + +/* ------------------------------------------------------------------------ */ + +/* Get the LAPB device for the ethernet device + */ +static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev) +{ + struct lapbethdev *lapbeth; + + list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) { + if (lapbeth->ethdev == dev) + return lapbeth; + } + return NULL; +} + +static __inline__ int dev_is_ethdev(struct net_device *dev) +{ + return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5); +} + +/* ------------------------------------------------------------------------ */ + +static int lapbeth_napi_poll(struct napi_struct *napi, int budget) +{ + struct lapbethdev *lapbeth = container_of(napi, struct lapbethdev, + napi); + struct sk_buff *skb; + int processed = 0; + + for (; processed < budget; ++processed) { + skb = skb_dequeue(&lapbeth->rx_queue); + if (!skb) + break; + netif_receive_skb_core(skb); + } + + if (processed < budget) + napi_complete(napi); + + return processed; +} + +/* Receive a LAPB frame via an ethernet interface. + */ +static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev) +{ + int len, err; + struct lapbethdev *lapbeth; + + if (dev_net(dev) != &init_net) + goto drop; + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return NET_RX_DROP; + + if (!pskb_may_pull(skb, 2)) + goto drop; + + rcu_read_lock(); + lapbeth = lapbeth_get_x25_dev(dev); + if (!lapbeth) + goto drop_unlock_rcu; + spin_lock_bh(&lapbeth->up_lock); + if (!lapbeth->up) + goto drop_unlock; + + len = skb->data[0] + skb->data[1] * 256; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + + skb_pull(skb, 2); /* Remove the length bytes */ + skb_trim(skb, len); /* Set the length of the data */ + + err = lapb_data_received(lapbeth->axdev, skb); + if (err != LAPB_OK) { + printk(KERN_DEBUG "lapbether: lapb_data_received err - %d\n", err); + goto drop_unlock; + } +out: + spin_unlock_bh(&lapbeth->up_lock); + rcu_read_unlock(); + return 0; +drop_unlock: + kfree_skb(skb); + goto out; +drop_unlock_rcu: + rcu_read_unlock(); +drop: + kfree_skb(skb); + return 0; +} + +static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) +{ + struct lapbethdev *lapbeth = netdev_priv(dev); + unsigned char *ptr; + + if (skb_cow(skb, 1)) { + kfree_skb(skb); + return NET_RX_DROP; + } + + skb_push(skb, 1); + + ptr = skb->data; + *ptr = X25_IFACE_DATA; + + skb->protocol = x25_type_trans(skb, dev); + + skb_queue_tail(&lapbeth->rx_queue, skb); + napi_schedule(&lapbeth->napi); + return NET_RX_SUCCESS; +} + +/* Send a LAPB frame via an ethernet interface + */ +static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct lapbethdev *lapbeth = netdev_priv(dev); + int err; + + spin_lock_bh(&lapbeth->up_lock); + if (!lapbeth->up) + goto drop; + + /* There should be a pseudo header of 1 byte added by upper layers. + * Check to make sure it is there before reading it. + */ + if (skb->len < 1) + goto drop; + + switch (skb->data[0]) { + case X25_IFACE_DATA: + break; + case X25_IFACE_CONNECT: + err = lapb_connect_request(dev); + if (err == LAPB_CONNECTED) + lapbeth_connected(dev, LAPB_OK); + else if (err != LAPB_OK) + pr_err("lapb_connect_request error: %d\n", err); + goto drop; + case X25_IFACE_DISCONNECT: + err = lapb_disconnect_request(dev); + if (err == LAPB_NOTCONNECTED) + lapbeth_disconnected(dev, LAPB_OK); + else if (err != LAPB_OK) + pr_err("lapb_disconnect_request err: %d\n", err); + fallthrough; + default: + goto drop; + } + + skb_pull(skb, 1); + + err = lapb_data_request(dev, skb); + if (err != LAPB_OK) { + pr_err("lapb_data_request error - %d\n", err); + goto drop; + } +out: + spin_unlock_bh(&lapbeth->up_lock); + return NETDEV_TX_OK; +drop: + kfree_skb(skb); + goto out; +} + +static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) +{ + struct lapbethdev *lapbeth = netdev_priv(ndev); + unsigned char *ptr; + struct net_device *dev; + int size = skb->len; + + ptr = skb_push(skb, 2); + + *ptr++ = size % 256; + *ptr++ = size / 256; + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += size; + + skb->dev = dev = lapbeth->ethdev; + + skb->protocol = htons(ETH_P_DEC); + + skb_reset_network_header(skb); + + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); + + dev_queue_xmit(skb); +} + +static void lapbeth_connected(struct net_device *dev, int reason) +{ + struct lapbethdev *lapbeth = netdev_priv(dev); + unsigned char *ptr; + struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC); + + if (!skb) + return; + + ptr = skb_put(skb, 1); + *ptr = X25_IFACE_CONNECT; + + skb->protocol = x25_type_trans(skb, dev); + + skb_queue_tail(&lapbeth->rx_queue, skb); + napi_schedule(&lapbeth->napi); +} + +static void lapbeth_disconnected(struct net_device *dev, int reason) +{ + struct lapbethdev *lapbeth = netdev_priv(dev); + unsigned char *ptr; + struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC); + + if (!skb) + return; + + ptr = skb_put(skb, 1); + *ptr = X25_IFACE_DISCONNECT; + + skb->protocol = x25_type_trans(skb, dev); + + skb_queue_tail(&lapbeth->rx_queue, skb); + napi_schedule(&lapbeth->napi); +} + +/* Set AX.25 callsign + */ +static int lapbeth_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + + dev_addr_set(dev, sa->sa_data); + return 0; +} + +static const struct lapb_register_struct lapbeth_callbacks = { + .connect_confirmation = lapbeth_connected, + .connect_indication = lapbeth_connected, + .disconnect_confirmation = lapbeth_disconnected, + .disconnect_indication = lapbeth_disconnected, + .data_indication = lapbeth_data_indication, + .data_transmit = lapbeth_data_transmit, +}; + +/* open/close a device + */ +static int lapbeth_open(struct net_device *dev) +{ + struct lapbethdev *lapbeth = netdev_priv(dev); + int err; + + napi_enable(&lapbeth->napi); + + err = lapb_register(dev, &lapbeth_callbacks); + if (err != LAPB_OK) { + napi_disable(&lapbeth->napi); + pr_err("lapb_register error: %d\n", err); + return -ENODEV; + } + + spin_lock_bh(&lapbeth->up_lock); + lapbeth->up = true; + spin_unlock_bh(&lapbeth->up_lock); + + return 0; +} + +static int lapbeth_close(struct net_device *dev) +{ + struct lapbethdev *lapbeth = netdev_priv(dev); + int err; + + spin_lock_bh(&lapbeth->up_lock); + lapbeth->up = false; + spin_unlock_bh(&lapbeth->up_lock); + + err = lapb_unregister(dev); + if (err != LAPB_OK) + pr_err("lapb_unregister error: %d\n", err); + + napi_disable(&lapbeth->napi); + + return 0; +} + +/* ------------------------------------------------------------------------ */ + +static const struct net_device_ops lapbeth_netdev_ops = { + .ndo_open = lapbeth_open, + .ndo_stop = lapbeth_close, + .ndo_start_xmit = lapbeth_xmit, + .ndo_set_mac_address = lapbeth_set_mac_address, +}; + +static void lapbeth_setup(struct net_device *dev) +{ + dev->netdev_ops = &lapbeth_netdev_ops; + dev->needs_free_netdev = true; + dev->type = ARPHRD_X25; + dev->hard_header_len = 0; + dev->mtu = 1000; + dev->addr_len = 0; +} + +/* Setup a new device. + */ +static int lapbeth_new_device(struct net_device *dev) +{ + struct net_device *ndev; + struct lapbethdev *lapbeth; + int rc = -ENOMEM; + + ASSERT_RTNL(); + + if (dev->type != ARPHRD_ETHER) + return -EINVAL; + + ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN, + lapbeth_setup); + if (!ndev) + goto out; + + /* When transmitting data: + * first this driver removes a pseudo header of 1 byte, + * then the lapb module prepends an LAPB header of at most 3 bytes, + * then this driver prepends a length field of 2 bytes, + * then the underlying Ethernet device prepends its own header. + */ + ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len + + dev->needed_headroom; + ndev->needed_tailroom = dev->needed_tailroom; + + lapbeth = netdev_priv(ndev); + lapbeth->axdev = ndev; + + dev_hold(dev); + lapbeth->ethdev = dev; + + lapbeth->up = false; + spin_lock_init(&lapbeth->up_lock); + + skb_queue_head_init(&lapbeth->rx_queue); + netif_napi_add_weight(ndev, &lapbeth->napi, lapbeth_napi_poll, 16); + + rc = -EIO; + if (register_netdevice(ndev)) + goto fail; + + list_add_rcu(&lapbeth->node, &lapbeth_devices); + rc = 0; +out: + return rc; +fail: + dev_put(dev); + free_netdev(ndev); + goto out; +} + +/* Free a lapb network device. + */ +static void lapbeth_free_device(struct lapbethdev *lapbeth) +{ + dev_put(lapbeth->ethdev); + list_del_rcu(&lapbeth->node); + unregister_netdevice(lapbeth->axdev); +} + +/* Handle device status changes. + * + * Called from notifier with RTNL held. + */ +static int lapbeth_device_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct lapbethdev *lapbeth; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + if (dev_net(dev) != &init_net) + return NOTIFY_DONE; + + if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UP: + /* New ethernet device -> new LAPB interface */ + if (!lapbeth_get_x25_dev(dev)) + lapbeth_new_device(dev); + break; + case NETDEV_GOING_DOWN: + /* ethernet device closes -> close LAPB interface */ + lapbeth = lapbeth_get_x25_dev(dev); + if (lapbeth) + dev_close(lapbeth->axdev); + break; + case NETDEV_UNREGISTER: + /* ethernet device disappears -> remove LAPB interface */ + lapbeth = lapbeth_get_x25_dev(dev); + if (lapbeth) + lapbeth_free_device(lapbeth); + break; + } + + return NOTIFY_DONE; +} + +/* ------------------------------------------------------------------------ */ + +static struct packet_type lapbeth_packet_type __read_mostly = { + .type = cpu_to_be16(ETH_P_DEC), + .func = lapbeth_rcv, +}; + +static struct notifier_block lapbeth_dev_notifier = { + .notifier_call = lapbeth_device_event, +}; + +static const char banner[] __initconst = + KERN_INFO "LAPB Ethernet driver version 0.02\n"; + +static int __init lapbeth_init_driver(void) +{ + dev_add_pack(&lapbeth_packet_type); + + register_netdevice_notifier(&lapbeth_dev_notifier); + + printk(banner); + + return 0; +} +module_init(lapbeth_init_driver); + +static void __exit lapbeth_cleanup_driver(void) +{ + struct lapbethdev *lapbeth; + struct list_head *entry, *tmp; + + dev_remove_pack(&lapbeth_packet_type); + unregister_netdevice_notifier(&lapbeth_dev_notifier); + + rtnl_lock(); + list_for_each_safe(entry, tmp, &lapbeth_devices) { + lapbeth = list_entry(entry, struct lapbethdev, node); + + dev_put(lapbeth->ethdev); + unregister_netdevice(lapbeth->axdev); + } + rtnl_unlock(); +} +module_exit(lapbeth_cleanup_driver); + +MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>"); +MODULE_DESCRIPTION("The unofficial LAPB over Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c new file mode 100644 index 000000000..f3e80722b --- /dev/null +++ b/drivers/net/wan/n2.c @@ -0,0 +1,546 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SDL Inc. RISCom/N2 synchronous serial card driver for Linux + * + * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl> + * + * For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/> + * + * Note: integrated CSU/DSU/DDS are not supported by this driver + * + * Sources of information: + * Hitachi HD64570 SCA User's Manual + * SDL Inc. PPP/HDLC/CISCO driver + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/capability.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/in.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/moduleparam.h> +#include <linux/netdevice.h> +#include <linux/hdlc.h> +#include <asm/io.h> +#include "hd64570.h" + +static const char *version = "SDL RISCom/N2 driver version: 1.15"; +static const char *devname = "RISCom/N2"; + +#undef DEBUG_PKT +#define DEBUG_RINGS + +#define USE_WINDOWSIZE 16384 +#define USE_BUS16BITS 1 +#define CLOCK_BASE 9830400 /* 9.8304 MHz */ +#define MAX_PAGES 16 /* 16 RAM pages at max */ +#define MAX_RAM_SIZE 0x80000 /* 512 KB */ +#if MAX_RAM_SIZE > MAX_PAGES * USE_WINDOWSIZE +#undef MAX_RAM_SIZE +#define MAX_RAM_SIZE (MAX_PAGES * USE_WINDOWSIZE) +#endif +#define N2_IOPORTS 0x10 +#define NEED_DETECT_RAM +#define NEED_SCA_MSCI_INTR +#define MAX_TX_BUFFERS 10 + +static char *hw; /* pointer to hw=xxx command line string */ + +/* RISCom/N2 Board Registers */ + +/* PC Control Register */ +#define N2_PCR 0 +#define PCR_RUNSCA 1 /* Run 64570 */ +#define PCR_VPM 2 /* Enable VPM - needed if using RAM above 1 MB */ +#define PCR_ENWIN 4 /* Open window */ +#define PCR_BUS16 8 /* 16-bit bus */ + +/* Memory Base Address Register */ +#define N2_BAR 2 + +/* Page Scan Register */ +#define N2_PSR 4 +#define WIN16K 0x00 +#define WIN32K 0x20 +#define WIN64K 0x40 +#define PSR_WINBITS 0x60 +#define PSR_DMAEN 0x80 +#define PSR_PAGEBITS 0x0F + +/* Modem Control Reg */ +#define N2_MCR 6 +#define CLOCK_OUT_PORT1 0x80 +#define CLOCK_OUT_PORT0 0x40 +#define TX422_PORT1 0x20 +#define TX422_PORT0 0x10 +#define DSR_PORT1 0x08 +#define DSR_PORT0 0x04 +#define DTR_PORT1 0x02 +#define DTR_PORT0 0x01 + +typedef struct port_s { + struct net_device *dev; + struct card_s *card; + spinlock_t lock; /* TX lock */ + sync_serial_settings settings; + int valid; /* port enabled */ + int rxpart; /* partial frame received, next frame invalid*/ + unsigned short encoding; + unsigned short parity; + u16 rxin; /* rx ring buffer 'in' pointer */ + u16 txin; /* tx ring buffer 'in' and 'last' pointers */ + u16 txlast; + u8 rxs, txs, tmc; /* SCA registers */ + u8 phy_node; /* physical port # - 0 or 1 */ + u8 log_node; /* logical port # */ +} port_t; + +typedef struct card_s { + u8 __iomem *winbase; /* ISA window base address */ + u32 phy_winbase; /* ISA physical base address */ + u32 ram_size; /* number of bytes */ + u16 io; /* IO Base address */ + u16 buff_offset; /* offset of first buffer of first channel */ + u16 rx_ring_buffers; /* number of buffers in a ring */ + u16 tx_ring_buffers; + u8 irq; /* IRQ (3-15) */ + + port_t ports[2]; + struct card_s *next_card; +} card_t; + +static card_t *first_card; +static card_t **new_card = &first_card; + +#define sca_reg(reg, card) (0x8000 | (card)->io | \ + ((reg) & 0x0F) | (((reg) & 0xF0) << 6)) +#define sca_in(reg, card) inb(sca_reg(reg, card)) +#define sca_out(value, reg, card) outb(value, sca_reg(reg, card)) +#define sca_inw(reg, card) inw(sca_reg(reg, card)) +#define sca_outw(value, reg, card) outw(value, sca_reg(reg, card)) + +#define port_to_card(port) ((port)->card) +#define log_node(port) ((port)->log_node) +#define phy_node(port) ((port)->phy_node) +#define winsize(card) (USE_WINDOWSIZE) +#define winbase(card) ((card)->winbase) +#define get_port(card, port) ((card)->ports[port].valid ? \ + &(card)->ports[port] : NULL) + +static __inline__ u8 sca_get_page(card_t *card) +{ + return inb(card->io + N2_PSR) & PSR_PAGEBITS; +} + +static __inline__ void openwin(card_t *card, u8 page) +{ + u8 psr = inb(card->io + N2_PSR); + + outb((psr & ~PSR_PAGEBITS) | page, card->io + N2_PSR); +} + +#include "hd64570.c" + +static void n2_set_iface(port_t *port) +{ + card_t *card = port->card; + int io = card->io; + u8 mcr = inb(io + N2_MCR); + u8 msci = get_msci(port); + u8 rxs = port->rxs & CLK_BRG_MASK; + u8 txs = port->txs & CLK_BRG_MASK; + + switch (port->settings.clock_type) { + case CLOCK_INT: + mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0; + rxs |= CLK_BRG_RX; /* BRG output */ + txs |= CLK_RXCLK_TX; /* RX clock */ + break; + + case CLOCK_TXINT: + mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0; + rxs |= CLK_LINE_RX; /* RXC input */ + txs |= CLK_BRG_TX; /* BRG output */ + break; + + case CLOCK_TXFROMRX: + mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0; + rxs |= CLK_LINE_RX; /* RXC input */ + txs |= CLK_RXCLK_TX; /* RX clock */ + break; + + default: /* Clock EXTernal */ + mcr &= port->phy_node ? ~CLOCK_OUT_PORT1 : ~CLOCK_OUT_PORT0; + rxs |= CLK_LINE_RX; /* RXC input */ + txs |= CLK_LINE_TX; /* TXC input */ + } + + outb(mcr, io + N2_MCR); + port->rxs = rxs; + port->txs = txs; + sca_out(rxs, msci + RXS, card); + sca_out(txs, msci + TXS, card); + sca_set_port(port); +} + +static int n2_open(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + int io = port->card->io; + u8 mcr = inb(io + N2_MCR) | + (port->phy_node ? TX422_PORT1 : TX422_PORT0); + int result; + + result = hdlc_open(dev); + if (result) + return result; + + mcr &= port->phy_node ? ~DTR_PORT1 : ~DTR_PORT0; /* set DTR ON */ + outb(mcr, io + N2_MCR); + + outb(inb(io + N2_PCR) | PCR_ENWIN, io + N2_PCR); /* open window */ + outb(inb(io + N2_PSR) | PSR_DMAEN, io + N2_PSR); /* enable dma */ + sca_open(dev); + n2_set_iface(port); + return 0; +} + +static int n2_close(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + int io = port->card->io; + u8 mcr = inb(io + N2_MCR) | + (port->phy_node ? TX422_PORT1 : TX422_PORT0); + + sca_close(dev); + mcr |= port->phy_node ? DTR_PORT1 : DTR_PORT0; /* set DTR OFF */ + outb(mcr, io + N2_MCR); + hdlc_close(dev); + return 0; +} + +static int n2_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) +{ +#ifdef DEBUG_RINGS + if (cmd == SIOCDEVPRIVATE) { + sca_dump_rings(dev); + return 0; + } +#endif + return -EOPNOTSUPP; +} + +static int n2_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifs->ifs_ifsu.sync; + port_t *port = dev_to_port(dev); + + switch (ifs->type) { + case IF_GET_IFACE: + ifs->type = IF_IFACE_SYNC_SERIAL; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(line, &port->settings, size)) + return -EFAULT; + return 0; + + case IF_IFACE_SYNC_SERIAL: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&new_line, line, size)) + return -EFAULT; + + if (new_line.clock_type != CLOCK_EXT && + new_line.clock_type != CLOCK_TXFROMRX && + new_line.clock_type != CLOCK_INT && + new_line.clock_type != CLOCK_TXINT) + return -EINVAL; /* No such clock setting */ + + if (new_line.loopback != 0 && new_line.loopback != 1) + return -EINVAL; + + memcpy(&port->settings, &new_line, size); /* Update settings */ + n2_set_iface(port); + return 0; + + default: + return hdlc_ioctl(dev, ifs); + } +} + +static void n2_destroy_card(card_t *card) +{ + int cnt; + + for (cnt = 0; cnt < 2; cnt++) + if (card->ports[cnt].card) { + struct net_device *dev = port_to_dev(&card->ports[cnt]); + + unregister_hdlc_device(dev); + } + + if (card->irq) + free_irq(card->irq, card); + + if (card->winbase) { + iounmap(card->winbase); + release_mem_region(card->phy_winbase, USE_WINDOWSIZE); + } + + if (card->io) + release_region(card->io, N2_IOPORTS); + if (card->ports[0].dev) + free_netdev(card->ports[0].dev); + if (card->ports[1].dev) + free_netdev(card->ports[1].dev); + kfree(card); +} + +static const struct net_device_ops n2_ops = { + .ndo_open = n2_open, + .ndo_stop = n2_close, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_siocwandev = n2_ioctl, + .ndo_siocdevprivate = n2_siocdevprivate, +}; + +static int __init n2_run(unsigned long io, unsigned long irq, + unsigned long winbase, long valid0, long valid1) +{ + card_t *card; + u8 cnt, pcr; + int i; + + if (io < 0x200 || io > 0x3FF || (io % N2_IOPORTS) != 0) { + pr_err("invalid I/O port value\n"); + return -ENODEV; + } + + if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ { + pr_err("invalid IRQ value\n"); + return -ENODEV; + } + + if (winbase < 0xA0000 || winbase > 0xFFFFF || (winbase & 0xFFF) != 0) { + pr_err("invalid RAM value\n"); + return -ENODEV; + } + + card = kzalloc(sizeof(card_t), GFP_KERNEL); + if (!card) + return -ENOBUFS; + + card->ports[0].dev = alloc_hdlcdev(&card->ports[0]); + card->ports[1].dev = alloc_hdlcdev(&card->ports[1]); + if (!card->ports[0].dev || !card->ports[1].dev) { + pr_err("unable to allocate memory\n"); + n2_destroy_card(card); + return -ENOMEM; + } + + if (!request_region(io, N2_IOPORTS, devname)) { + pr_err("I/O port region in use\n"); + n2_destroy_card(card); + return -EBUSY; + } + card->io = io; + + if (request_irq(irq, sca_intr, 0, devname, card)) { + pr_err("could not allocate IRQ\n"); + n2_destroy_card(card); + return -EBUSY; + } + card->irq = irq; + + if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) { + pr_err("could not request RAM window\n"); + n2_destroy_card(card); + return -EBUSY; + } + card->phy_winbase = winbase; + card->winbase = ioremap(winbase, USE_WINDOWSIZE); + if (!card->winbase) { + pr_err("ioremap() failed\n"); + n2_destroy_card(card); + return -EFAULT; + } + + outb(0, io + N2_PCR); + outb(winbase >> 12, io + N2_BAR); + + switch (USE_WINDOWSIZE) { + case 16384: + outb(WIN16K, io + N2_PSR); + break; + + case 32768: + outb(WIN32K, io + N2_PSR); + break; + + case 65536: + outb(WIN64K, io + N2_PSR); + break; + + default: + pr_err("invalid window size\n"); + n2_destroy_card(card); + return -ENODEV; + } + + pcr = PCR_ENWIN | PCR_VPM | (USE_BUS16BITS ? PCR_BUS16 : 0); + outb(pcr, io + N2_PCR); + + card->ram_size = sca_detect_ram(card, card->winbase, MAX_RAM_SIZE); + + /* number of TX + RX buffers for one port */ + i = card->ram_size / ((valid0 + valid1) * (sizeof(pkt_desc) + + HDLC_MAX_MRU)); + + card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS); + card->rx_ring_buffers = i - card->tx_ring_buffers; + + card->buff_offset = (valid0 + valid1) * sizeof(pkt_desc) * + (card->tx_ring_buffers + card->rx_ring_buffers); + + pr_info("RISCom/N2 %u KB RAM, IRQ%u, using %u TX + %u RX packets rings\n", + card->ram_size / 1024, card->irq, + card->tx_ring_buffers, card->rx_ring_buffers); + + if (card->tx_ring_buffers < 1) { + pr_err("RAM test failed\n"); + n2_destroy_card(card); + return -EIO; + } + + pcr |= PCR_RUNSCA; /* run SCA */ + outb(pcr, io + N2_PCR); + outb(0, io + N2_MCR); + + sca_init(card, 0); + for (cnt = 0; cnt < 2; cnt++) { + port_t *port = &card->ports[cnt]; + struct net_device *dev = port_to_dev(port); + hdlc_device *hdlc = dev_to_hdlc(dev); + + if ((cnt == 0 && !valid0) || (cnt == 1 && !valid1)) + continue; + + port->phy_node = cnt; + port->valid = 1; + + if ((cnt == 1) && valid0) + port->log_node = 1; + + spin_lock_init(&port->lock); + dev->irq = irq; + dev->mem_start = winbase; + dev->mem_end = winbase + USE_WINDOWSIZE - 1; + dev->tx_queue_len = 50; + dev->netdev_ops = &n2_ops; + hdlc->attach = sca_attach; + hdlc->xmit = sca_xmit; + port->settings.clock_type = CLOCK_EXT; + port->card = card; + + if (register_hdlc_device(dev)) { + pr_warn("unable to register hdlc device\n"); + port->card = NULL; + n2_destroy_card(card); + return -ENOBUFS; + } + sca_init_port(port); /* Set up SCA memory */ + + netdev_info(dev, "RISCom/N2 node %d\n", port->phy_node); + } + + *new_card = card; + new_card = &card->next_card; + + return 0; +} + +static int __init n2_init(void) +{ + if (!hw) { +#ifdef MODULE + pr_info("no card initialized\n"); +#endif + return -EINVAL; /* no parameters specified, abort */ + } + + pr_info("%s\n", version); + + do { + unsigned long io, irq, ram; + long valid[2] = { 0, 0 }; /* Default = both ports disabled */ + + io = simple_strtoul(hw, &hw, 0); + + if (*hw++ != ',') + break; + irq = simple_strtoul(hw, &hw, 0); + + if (*hw++ != ',') + break; + ram = simple_strtoul(hw, &hw, 0); + + if (*hw++ != ',') + break; + while (1) { + if (*hw == '0' && !valid[0]) + valid[0] = 1; /* Port 0 enabled */ + else if (*hw == '1' && !valid[1]) + valid[1] = 1; /* Port 1 enabled */ + else + break; + hw++; + } + + if (!valid[0] && !valid[1]) + break; /* at least one port must be used */ + + if (*hw == ':' || *hw == '\x0') + n2_run(io, irq, ram, valid[0], valid[1]); + + if (*hw == '\x0') + return first_card ? 0 : -EINVAL; + } while (*hw++ == ':'); + + pr_err("invalid hardware parameters\n"); + return first_card ? 0 : -EINVAL; +} + +static void __exit n2_cleanup(void) +{ + card_t *card = first_card; + + while (card) { + card_t *ptr = card; + + card = card->next_card; + n2_destroy_card(ptr); + } +} + +module_init(n2_init); +module_exit(n2_cleanup); + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("RISCom/N2 serial port driver"); +MODULE_LICENSE("GPL v2"); +module_param(hw, charp, 0444); +MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,..."); diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c new file mode 100644 index 000000000..4766446f0 --- /dev/null +++ b/drivers/net/wan/pc300too.c @@ -0,0 +1,513 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Cyclades PC300 synchronous serial card driver for Linux + * + * Copyright (C) 2000-2008 Krzysztof Halasa <khc@pm.waw.pl> + * + * For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/>. + * + * Sources of information: + * Hitachi HD64572 SCA-II User's Manual + * Original Cyclades PC300 Linux driver + * + * This driver currently supports only PC300/RSV (V.24/V.35) and + * PC300/X21 cards. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/in.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/moduleparam.h> +#include <linux/netdevice.h> +#include <linux/hdlc.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <asm/io.h> + +#include "hd64572.h" + +#undef DEBUG_PKT +#define DEBUG_RINGS + +#define PC300_PLX_SIZE 0x80 /* PLX control window size (128 B) */ +#define PC300_SCA_SIZE 0x400 /* SCA window size (1 KB) */ +#define MAX_TX_BUFFERS 10 + +static int pci_clock_freq = 33000000; +static int use_crystal_clock; +static unsigned int CLOCK_BASE; + +/* Masks to access the init_ctrl PLX register */ +#define PC300_CLKSEL_MASK (0x00000004UL) +#define PC300_CHMEDIA_MASK(port) (0x00000020UL << ((port) * 3)) +#define PC300_CTYPE_MASK (0x00000800UL) + +enum { PC300_RSV = 1, PC300_X21, PC300_TE }; /* card types */ + +/* PLX PCI9050-1 local configuration and shared runtime registers. + * This structure can be used to access 9050 registers (memory mapped). + */ +typedef struct { + u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ + u32 loc_rom_range; /* 10h : Local ROM Range */ + u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ + u32 loc_rom_base; /* 24h : Local ROM Base */ + u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ + u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */ + u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ + u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ + u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ +} plx9050; + +typedef struct port_s { + struct napi_struct napi; + struct net_device *netdev; + struct card_s *card; + spinlock_t lock; /* TX lock */ + sync_serial_settings settings; + int rxpart; /* partial frame received, next frame invalid*/ + unsigned short encoding; + unsigned short parity; + unsigned int iface; + u16 rxin; /* rx ring buffer 'in' pointer */ + u16 txin; /* tx ring buffer 'in' and 'last' pointers */ + u16 txlast; + u8 rxs, txs, tmc; /* SCA registers */ + u8 chan; /* physical port # - 0 or 1 */ +} port_t; + +typedef struct card_s { + int type; /* RSV, X21, etc. */ + int n_ports; /* 1 or 2 ports */ + u8 __iomem *rambase; /* buffer memory base (virtual) */ + u8 __iomem *scabase; /* SCA memory base (virtual) */ + plx9050 __iomem *plxbase; /* PLX registers memory base (virtual) */ + u32 init_ctrl_value; /* Saved value - 9050 bug workaround */ + u16 rx_ring_buffers; /* number of buffers in a ring */ + u16 tx_ring_buffers; + u16 buff_offset; /* offset of first buffer of first channel */ + u8 irq; /* interrupt request level */ + + port_t ports[2]; +} card_t; + +#define get_port(card, port) ((port) < (card)->n_ports ? \ + (&(card)->ports[port]) : (NULL)) + +#include "hd64572.c" + +static void pc300_set_iface(port_t *port) +{ + card_t *card = port->card; + u32 __iomem *init_ctrl = &card->plxbase->init_ctrl; + u16 msci = get_msci(port); + u8 rxs = port->rxs & CLK_BRG_MASK; + u8 txs = port->txs & CLK_BRG_MASK; + + sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS, + port->card); + switch (port->settings.clock_type) { + case CLOCK_INT: + rxs |= CLK_BRG; /* BRG output */ + txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */ + break; + + case CLOCK_TXINT: + rxs |= CLK_LINE; /* RXC input */ + txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */ + break; + + case CLOCK_TXFROMRX: + rxs |= CLK_LINE; /* RXC input */ + txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */ + break; + + default: /* EXTernal clock */ + rxs |= CLK_LINE; /* RXC input */ + txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */ + break; + } + + port->rxs = rxs; + port->txs = txs; + sca_out(rxs, msci + RXS, card); + sca_out(txs, msci + TXS, card); + sca_set_port(port); + + if (port->card->type == PC300_RSV) { + if (port->iface == IF_IFACE_V35) + writel(card->init_ctrl_value | + PC300_CHMEDIA_MASK(port->chan), init_ctrl); + else + writel(card->init_ctrl_value & + ~PC300_CHMEDIA_MASK(port->chan), init_ctrl); + } +} + +static int pc300_open(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + int result = hdlc_open(dev); + + if (result) + return result; + + sca_open(dev); + pc300_set_iface(port); + return 0; +} + +static int pc300_close(struct net_device *dev) +{ + sca_close(dev); + hdlc_close(dev); + return 0; +} + +static int pc300_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) +{ +#ifdef DEBUG_RINGS + if (cmd == SIOCDEVPRIVATE) { + sca_dump_rings(dev); + return 0; + } +#endif + return -EOPNOTSUPP; +} + +static int pc300_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifs->ifs_ifsu.sync; + int new_type; + port_t *port = dev_to_port(dev); + + if (ifs->type == IF_GET_IFACE) { + ifs->type = port->iface; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(line, &port->settings, size)) + return -EFAULT; + return 0; + } + + if (port->card->type == PC300_X21 && + (ifs->type == IF_IFACE_SYNC_SERIAL || + ifs->type == IF_IFACE_X21)) + new_type = IF_IFACE_X21; + + else if (port->card->type == PC300_RSV && + (ifs->type == IF_IFACE_SYNC_SERIAL || + ifs->type == IF_IFACE_V35)) + new_type = IF_IFACE_V35; + + else if (port->card->type == PC300_RSV && + ifs->type == IF_IFACE_V24) + new_type = IF_IFACE_V24; + + else + return hdlc_ioctl(dev, ifs); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&new_line, line, size)) + return -EFAULT; + + if (new_line.clock_type != CLOCK_EXT && + new_line.clock_type != CLOCK_TXFROMRX && + new_line.clock_type != CLOCK_INT && + new_line.clock_type != CLOCK_TXINT) + return -EINVAL; /* No such clock setting */ + + if (new_line.loopback != 0 && new_line.loopback != 1) + return -EINVAL; + + memcpy(&port->settings, &new_line, size); /* Update settings */ + port->iface = new_type; + pc300_set_iface(port); + return 0; +} + +static void pc300_pci_remove_one(struct pci_dev *pdev) +{ + int i; + card_t *card = pci_get_drvdata(pdev); + + for (i = 0; i < 2; i++) + if (card->ports[i].card) + unregister_hdlc_device(card->ports[i].netdev); + + if (card->irq) + free_irq(card->irq, card); + + if (card->rambase) + iounmap(card->rambase); + if (card->scabase) + iounmap(card->scabase); + if (card->plxbase) + iounmap(card->plxbase); + + pci_release_regions(pdev); + pci_disable_device(pdev); + if (card->ports[0].netdev) + free_netdev(card->ports[0].netdev); + if (card->ports[1].netdev) + free_netdev(card->ports[1].netdev); + kfree(card); +} + +static const struct net_device_ops pc300_ops = { + .ndo_open = pc300_open, + .ndo_stop = pc300_close, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_siocwandev = pc300_ioctl, + .ndo_siocdevprivate = pc300_siocdevprivate, +}; + +static int pc300_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + card_t *card; + u32 __iomem *p; + int i; + u32 ramsize; + u32 ramphys; /* buffer memory base */ + u32 scaphys; /* SCA memory base */ + u32 plxphys; /* PLX registers memory base */ + + i = pci_enable_device(pdev); + if (i) + return i; + + i = pci_request_regions(pdev, "PC300"); + if (i) { + pci_disable_device(pdev); + return i; + } + + card = kzalloc(sizeof(card_t), GFP_KERNEL); + if (!card) { + pci_release_regions(pdev); + pci_disable_device(pdev); + return -ENOBUFS; + } + pci_set_drvdata(pdev, card); + + if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE || + pci_resource_len(pdev, 2) != PC300_SCA_SIZE || + pci_resource_len(pdev, 3) < 16384) { + pr_err("invalid card EEPROM parameters\n"); + pc300_pci_remove_one(pdev); + return -EFAULT; + } + + plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK; + card->plxbase = ioremap(plxphys, PC300_PLX_SIZE); + + scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK; + card->scabase = ioremap(scaphys, PC300_SCA_SIZE); + + ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK; + card->rambase = pci_ioremap_bar(pdev, 3); + + if (!card->plxbase || !card->scabase || !card->rambase) { + pr_err("ioremap() failed\n"); + pc300_pci_remove_one(pdev); + return -ENOMEM; + } + + /* PLX PCI 9050 workaround for local configuration register read bug */ + pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys); + card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl); + pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys); + + if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 || + pdev->device == PCI_DEVICE_ID_PC300_TE_2) + card->type = PC300_TE; /* not fully supported */ + else if (card->init_ctrl_value & PC300_CTYPE_MASK) + card->type = PC300_X21; + else + card->type = PC300_RSV; + + if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 || + pdev->device == PCI_DEVICE_ID_PC300_TE_1) + card->n_ports = 1; + else + card->n_ports = 2; + + for (i = 0; i < card->n_ports; i++) { + card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]); + if (!card->ports[i].netdev) { + pr_err("unable to allocate memory\n"); + pc300_pci_remove_one(pdev); + return -ENOMEM; + } + } + + /* Reset PLX */ + p = &card->plxbase->init_ctrl; + writel(card->init_ctrl_value | 0x40000000, p); + readl(p); /* Flush the write - do not use sca_flush */ + udelay(1); + + writel(card->init_ctrl_value, p); + readl(p); /* Flush the write - do not use sca_flush */ + udelay(1); + + /* Reload Config. Registers from EEPROM */ + writel(card->init_ctrl_value | 0x20000000, p); + readl(p); /* Flush the write - do not use sca_flush */ + udelay(1); + + writel(card->init_ctrl_value, p); + readl(p); /* Flush the write - do not use sca_flush */ + udelay(1); + + ramsize = sca_detect_ram(card, card->rambase, + pci_resource_len(pdev, 3)); + + if (use_crystal_clock) + card->init_ctrl_value &= ~PC300_CLKSEL_MASK; + else + card->init_ctrl_value |= PC300_CLKSEL_MASK; + + writel(card->init_ctrl_value, &card->plxbase->init_ctrl); + /* number of TX + RX buffers for one port */ + i = ramsize / (card->n_ports * (sizeof(pkt_desc) + HDLC_MAX_MRU)); + card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS); + card->rx_ring_buffers = i - card->tx_ring_buffers; + + card->buff_offset = card->n_ports * sizeof(pkt_desc) * + (card->tx_ring_buffers + card->rx_ring_buffers); + + pr_info("PC300/%s, %u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n", + card->type == PC300_X21 ? "X21" : + card->type == PC300_TE ? "TE" : "RSV", + ramsize / 1024, ramphys, pdev->irq, + card->tx_ring_buffers, card->rx_ring_buffers); + + if (card->tx_ring_buffers < 1) { + pr_err("RAM test failed\n"); + pc300_pci_remove_one(pdev); + return -EFAULT; + } + + /* Enable interrupts on the PCI bridge, LINTi1 active low */ + writew(0x0041, &card->plxbase->intr_ctrl_stat); + + /* Allocate IRQ */ + if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pc300", card)) { + pr_warn("could not allocate IRQ%d\n", pdev->irq); + pc300_pci_remove_one(pdev); + return -EBUSY; + } + card->irq = pdev->irq; + + sca_init(card, 0); + + // COTE not set - allows better TX DMA settings + // sca_out(sca_in(PCR, card) | PCR_COTE, PCR, card); + + sca_out(0x10, BTCR, card); + + for (i = 0; i < card->n_ports; i++) { + port_t *port = &card->ports[i]; + struct net_device *dev = port->netdev; + hdlc_device *hdlc = dev_to_hdlc(dev); + + port->chan = i; + + spin_lock_init(&port->lock); + dev->irq = card->irq; + dev->mem_start = ramphys; + dev->mem_end = ramphys + ramsize - 1; + dev->tx_queue_len = 50; + dev->netdev_ops = &pc300_ops; + hdlc->attach = sca_attach; + hdlc->xmit = sca_xmit; + port->settings.clock_type = CLOCK_EXT; + port->card = card; + if (card->type == PC300_X21) + port->iface = IF_IFACE_X21; + else + port->iface = IF_IFACE_V35; + + sca_init_port(port); + if (register_hdlc_device(dev)) { + pr_err("unable to register hdlc device\n"); + port->card = NULL; + pc300_pci_remove_one(pdev); + return -ENOBUFS; + } + + netdev_info(dev, "PC300 channel %d\n", port->chan); + } + return 0; +} + +static const struct pci_device_id pc300_pci_tbl[] = { + { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_1, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_2, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { 0, } +}; + +static struct pci_driver pc300_pci_driver = { + .name = "PC300", + .id_table = pc300_pci_tbl, + .probe = pc300_pci_init_one, + .remove = pc300_pci_remove_one, +}; + +static int __init pc300_init_module(void) +{ + if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) { + pr_err("Invalid PCI clock frequency\n"); + return -EINVAL; + } + if (use_crystal_clock != 0 && use_crystal_clock != 1) { + pr_err("Invalid 'use_crystal_clock' value\n"); + return -EINVAL; + } + + CLOCK_BASE = use_crystal_clock ? 24576000 : pci_clock_freq; + + return pci_register_driver(&pc300_pci_driver); +} + +static void __exit pc300_cleanup_module(void) +{ + pci_unregister_driver(&pc300_pci_driver); +} + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("Cyclades PC300 serial port driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, pc300_pci_tbl); +module_param(pci_clock_freq, int, 0444); +MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz"); +module_param(use_crystal_clock, int, 0444); +MODULE_PARM_DESC(use_crystal_clock, + "Use 24.576 MHz clock instead of PCI clock"); +module_init(pc300_init_module); +module_exit(pc300_cleanup_module); diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c new file mode 100644 index 000000000..ea86c7035 --- /dev/null +++ b/drivers/net/wan/pci200syn.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Goramo PCI200SYN synchronous serial card driver for Linux + * + * Copyright (C) 2002-2008 Krzysztof Halasa <khc@pm.waw.pl> + * + * For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/> + * + * Sources of information: + * Hitachi HD64572 SCA-II User's Manual + * PLX Technology Inc. PCI9052 Data Book + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/capability.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/in.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/netdevice.h> +#include <linux/hdlc.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <asm/io.h> + +#include "hd64572.h" + +#undef DEBUG_PKT +#define DEBUG_RINGS + +#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */ +#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */ +#define MAX_TX_BUFFERS 10 + +static int pci_clock_freq = 33000000; +#define CLOCK_BASE pci_clock_freq + +/* PLX PCI9052 local configuration and shared runtime registers. + * This structure can be used to access 9052 registers (memory mapped). + */ +typedef struct { + u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ + u32 loc_rom_range; /* 10h : Local ROM Range */ + u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ + u32 loc_rom_base; /* 24h : Local ROM Base */ + u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ + u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */ + u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ + u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ + u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ +} plx9052; + +typedef struct port_s { + struct napi_struct napi; + struct net_device *netdev; + struct card_s *card; + spinlock_t lock; /* TX lock */ + sync_serial_settings settings; + int rxpart; /* partial frame received, next frame invalid*/ + unsigned short encoding; + unsigned short parity; + u16 rxin; /* rx ring buffer 'in' pointer */ + u16 txin; /* tx ring buffer 'in' and 'last' pointers */ + u16 txlast; + u8 rxs, txs, tmc; /* SCA registers */ + u8 chan; /* physical port # - 0 or 1 */ +} port_t; + +typedef struct card_s { + u8 __iomem *rambase; /* buffer memory base (virtual) */ + u8 __iomem *scabase; /* SCA memory base (virtual) */ + plx9052 __iomem *plxbase;/* PLX registers memory base (virtual) */ + u16 rx_ring_buffers; /* number of buffers in a ring */ + u16 tx_ring_buffers; + u16 buff_offset; /* offset of first buffer of first channel */ + u8 irq; /* interrupt request level */ + + port_t ports[2]; +} card_t; + +#define get_port(card, port) (&(card)->ports[port]) +#define sca_flush(card) (sca_in(IER0, card)) + +static inline void new_memcpy_toio(char __iomem *dest, char *src, int length) +{ + int len; + + do { + len = length > 256 ? 256 : length; + memcpy_toio(dest, src, len); + dest += len; + src += len; + length -= len; + readb(dest); + } while (len); +} + +#undef memcpy_toio +#define memcpy_toio new_memcpy_toio + +#include "hd64572.c" + +static void pci200_set_iface(port_t *port) +{ + card_t *card = port->card; + u16 msci = get_msci(port); + u8 rxs = port->rxs & CLK_BRG_MASK; + u8 txs = port->txs & CLK_BRG_MASK; + + sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS, + port->card); + switch (port->settings.clock_type) { + case CLOCK_INT: + rxs |= CLK_BRG; /* BRG output */ + txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */ + break; + + case CLOCK_TXINT: + rxs |= CLK_LINE; /* RXC input */ + txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */ + break; + + case CLOCK_TXFROMRX: + rxs |= CLK_LINE; /* RXC input */ + txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */ + break; + + default: /* EXTernal clock */ + rxs |= CLK_LINE; /* RXC input */ + txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */ + break; + } + + port->rxs = rxs; + port->txs = txs; + sca_out(rxs, msci + RXS, card); + sca_out(txs, msci + TXS, card); + sca_set_port(port); +} + +static int pci200_open(struct net_device *dev) +{ + port_t *port = dev_to_port(dev); + int result = hdlc_open(dev); + + if (result) + return result; + + sca_open(dev); + pci200_set_iface(port); + sca_flush(port->card); + return 0; +} + +static int pci200_close(struct net_device *dev) +{ + sca_close(dev); + sca_flush(dev_to_port(dev)->card); + hdlc_close(dev); + return 0; +} + +static int pci200_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) +{ +#ifdef DEBUG_RINGS + if (cmd == SIOCDEVPRIVATE) { + sca_dump_rings(dev); + return 0; + } +#endif + return -EOPNOTSUPP; +} + +static int pci200_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifs->ifs_ifsu.sync; + port_t *port = dev_to_port(dev); + + switch (ifs->type) { + case IF_GET_IFACE: + ifs->type = IF_IFACE_V35; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(line, &port->settings, size)) + return -EFAULT; + return 0; + + case IF_IFACE_V35: + case IF_IFACE_SYNC_SERIAL: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&new_line, line, size)) + return -EFAULT; + + if (new_line.clock_type != CLOCK_EXT && + new_line.clock_type != CLOCK_TXFROMRX && + new_line.clock_type != CLOCK_INT && + new_line.clock_type != CLOCK_TXINT) + return -EINVAL; /* No such clock setting */ + + if (new_line.loopback != 0 && new_line.loopback != 1) + return -EINVAL; + + memcpy(&port->settings, &new_line, size); /* Update settings */ + pci200_set_iface(port); + sca_flush(port->card); + return 0; + + default: + return hdlc_ioctl(dev, ifs); + } +} + +static void pci200_pci_remove_one(struct pci_dev *pdev) +{ + int i; + card_t *card = pci_get_drvdata(pdev); + + for (i = 0; i < 2; i++) + if (card->ports[i].card) + unregister_hdlc_device(card->ports[i].netdev); + + if (card->irq) + free_irq(card->irq, card); + + if (card->rambase) + iounmap(card->rambase); + if (card->scabase) + iounmap(card->scabase); + if (card->plxbase) + iounmap(card->plxbase); + + pci_release_regions(pdev); + pci_disable_device(pdev); + if (card->ports[0].netdev) + free_netdev(card->ports[0].netdev); + if (card->ports[1].netdev) + free_netdev(card->ports[1].netdev); + kfree(card); +} + +static const struct net_device_ops pci200_ops = { + .ndo_open = pci200_open, + .ndo_stop = pci200_close, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_siocwandev = pci200_ioctl, + .ndo_siocdevprivate = pci200_siocdevprivate, +}; + +static int pci200_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + card_t *card; + u32 __iomem *p; + int i; + u32 ramsize; + u32 ramphys; /* buffer memory base */ + u32 scaphys; /* SCA memory base */ + u32 plxphys; /* PLX registers memory base */ + + i = pci_enable_device(pdev); + if (i) + return i; + + i = pci_request_regions(pdev, "PCI200SYN"); + if (i) { + pci_disable_device(pdev); + return i; + } + + card = kzalloc(sizeof(card_t), GFP_KERNEL); + if (!card) { + pci_release_regions(pdev); + pci_disable_device(pdev); + return -ENOBUFS; + } + pci_set_drvdata(pdev, card); + card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]); + card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]); + if (!card->ports[0].netdev || !card->ports[1].netdev) { + pr_err("unable to allocate memory\n"); + pci200_pci_remove_one(pdev); + return -ENOMEM; + } + + if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE || + pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE || + pci_resource_len(pdev, 3) < 16384) { + pr_err("invalid card EEPROM parameters\n"); + pci200_pci_remove_one(pdev); + return -EFAULT; + } + + plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK; + card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE); + + scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK; + card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE); + + ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK; + card->rambase = pci_ioremap_bar(pdev, 3); + + if (!card->plxbase || !card->scabase || !card->rambase) { + pr_err("ioremap() failed\n"); + pci200_pci_remove_one(pdev); + return -EFAULT; + } + + /* Reset PLX */ + p = &card->plxbase->init_ctrl; + writel(readl(p) | 0x40000000, p); + readl(p); /* Flush the write - do not use sca_flush */ + udelay(1); + + writel(readl(p) & ~0x40000000, p); + readl(p); /* Flush the write - do not use sca_flush */ + udelay(1); + + ramsize = sca_detect_ram(card, card->rambase, + pci_resource_len(pdev, 3)); + + /* number of TX + RX buffers for one port - this is dual port card */ + i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU)); + card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS); + card->rx_ring_buffers = i - card->tx_ring_buffers; + + card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers + + card->rx_ring_buffers); + + pr_info("%u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n", + ramsize / 1024, ramphys, + pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers); + + if (card->tx_ring_buffers < 1) { + pr_err("RAM test failed\n"); + pci200_pci_remove_one(pdev); + return -EFAULT; + } + + /* Enable interrupts on the PCI bridge */ + p = &card->plxbase->intr_ctrl_stat; + writew(readw(p) | 0x0040, p); + + /* Allocate IRQ */ + if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) { + pr_warn("could not allocate IRQ%d\n", pdev->irq); + pci200_pci_remove_one(pdev); + return -EBUSY; + } + card->irq = pdev->irq; + + sca_init(card, 0); + + for (i = 0; i < 2; i++) { + port_t *port = &card->ports[i]; + struct net_device *dev = port->netdev; + hdlc_device *hdlc = dev_to_hdlc(dev); + + port->chan = i; + + spin_lock_init(&port->lock); + dev->irq = card->irq; + dev->mem_start = ramphys; + dev->mem_end = ramphys + ramsize - 1; + dev->tx_queue_len = 50; + dev->netdev_ops = &pci200_ops; + hdlc->attach = sca_attach; + hdlc->xmit = sca_xmit; + port->settings.clock_type = CLOCK_EXT; + port->card = card; + sca_init_port(port); + if (register_hdlc_device(dev)) { + pr_err("unable to register hdlc device\n"); + port->card = NULL; + pci200_pci_remove_one(pdev); + return -ENOBUFS; + } + + netdev_info(dev, "PCI200SYN channel %d\n", port->chan); + } + + sca_flush(card); + return 0; +} + +static const struct pci_device_id pci200_pci_tbl[] = { + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, + PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 }, + { 0, } +}; + +static struct pci_driver pci200_pci_driver = { + .name = "PCI200SYN", + .id_table = pci200_pci_tbl, + .probe = pci200_pci_init_one, + .remove = pci200_pci_remove_one, +}; + +static int __init pci200_init_module(void) +{ + if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) { + pr_err("Invalid PCI clock frequency\n"); + return -EINVAL; + } + return pci_register_driver(&pci200_pci_driver); +} + +static void __exit pci200_cleanup_module(void) +{ + pci_unregister_driver(&pci200_pci_driver); +} + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("Goramo PCI200SYN serial port driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, pci200_pci_tbl); +module_param(pci_clock_freq, int, 0444); +MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz"); +module_init(pci200_init_module); +module_exit(pci200_cleanup_module); diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c new file mode 100644 index 000000000..6063552ce --- /dev/null +++ b/drivers/net/wan/slic_ds26522.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * drivers/net/wan/slic_ds26522.c + * + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * + * Author:Zhao Qiang<qiang.zhao@nxp.com> + */ + +#include <linux/bitrev.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/kthread.h> +#include <linux/spi/spi.h> +#include <linux/wait.h> +#include <linux/param.h> +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/io.h> +#include "slic_ds26522.h" + +#define SLIC_TRANS_LEN 1 +#define SLIC_TWO_LEN 2 +#define SLIC_THREE_LEN 3 + +static struct spi_device *g_spi; + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Zhao Qiang<B45475@freescale.com>"); + +/* the read/write format of address is + * w/r|A13|A12|A11|A10|A9|A8|A7|A6|A5|A4|A3|A2|A1|A0|x + */ +static void slic_write(struct spi_device *spi, u16 addr, + u8 data) +{ + u8 temp[3]; + + addr = bitrev16(addr) >> 1; + data = bitrev8(data); + temp[0] = (u8)((addr >> 8) & 0x7f); + temp[1] = (u8)(addr & 0xfe); + temp[2] = data; + + /* write spi addr and value */ + spi_write(spi, &temp[0], SLIC_THREE_LEN); +} + +static u8 slic_read(struct spi_device *spi, u16 addr) +{ + u8 temp[2]; + u8 data; + + addr = bitrev16(addr) >> 1; + temp[0] = (u8)(((addr >> 8) & 0x7f) | 0x80); + temp[1] = (u8)(addr & 0xfe); + + spi_write_then_read(spi, &temp[0], SLIC_TWO_LEN, &data, + SLIC_TRANS_LEN); + + data = bitrev8(data); + return data; +} + +static bool get_slic_product_code(struct spi_device *spi) +{ + u8 device_id; + + device_id = slic_read(spi, DS26522_IDR_ADDR); + if ((device_id & 0xf8) == 0x68) + return true; + else + return false; +} + +static void ds26522_e1_spec_config(struct spi_device *spi) +{ + /* Receive E1 Mode, Framer Disabled */ + slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_E1); + + /* Transmit E1 Mode, Framer Disable */ + slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_E1); + + /* Receive E1 Mode Framer Enable */ + slic_write(spi, DS26522_RMMR_ADDR, + slic_read(spi, DS26522_RMMR_ADDR) | DS26522_RMMR_FRM_EN); + + /* Transmit E1 Mode Framer Enable */ + slic_write(spi, DS26522_TMMR_ADDR, + slic_read(spi, DS26522_TMMR_ADDR) | DS26522_TMMR_FRM_EN); + + /* RCR1, receive E1 B8zs & ESF */ + slic_write(spi, DS26522_RCR1_ADDR, + DS26522_RCR1_E1_HDB3 | DS26522_RCR1_E1_CCS); + + /* RSYSCLK=2.048MHz, RSYNC-Output */ + slic_write(spi, DS26522_RIOCR_ADDR, + DS26522_RIOCR_2048KHZ | DS26522_RIOCR_RSIO_OUT); + + /* TCR1 Transmit E1 b8zs */ + slic_write(spi, DS26522_TCR1_ADDR, DS26522_TCR1_TB8ZS); + + /* TSYSCLK=2.048MHz, TSYNC-Output */ + slic_write(spi, DS26522_TIOCR_ADDR, + DS26522_TIOCR_2048KHZ | DS26522_TIOCR_TSIO_OUT); + + /* Set E1TAF */ + slic_write(spi, DS26522_E1TAF_ADDR, DS26522_E1TAF_DEFAULT); + + /* Set E1TNAF register */ + slic_write(spi, DS26522_E1TNAF_ADDR, DS26522_E1TNAF_DEFAULT); + + /* Receive E1 Mode Framer Enable & init Done */ + slic_write(spi, DS26522_RMMR_ADDR, slic_read(spi, DS26522_RMMR_ADDR) | + DS26522_RMMR_INIT_DONE); + + /* Transmit E1 Mode Framer Enable & init Done */ + slic_write(spi, DS26522_TMMR_ADDR, slic_read(spi, DS26522_TMMR_ADDR) | + DS26522_TMMR_INIT_DONE); + + /* Configure LIU E1 mode */ + slic_write(spi, DS26522_LTRCR_ADDR, DS26522_LTRCR_E1); + + /* E1 Mode default 75 ohm w/Transmit Impedance Matlinking */ + slic_write(spi, DS26522_LTITSR_ADDR, + DS26522_LTITSR_TLIS_75OHM | DS26522_LTITSR_LBOS_75OHM); + + /* E1 Mode default 75 ohm Long Haul w/Receive Impedance Matlinking */ + slic_write(spi, DS26522_LRISMR_ADDR, + DS26522_LRISMR_75OHM | DS26522_LRISMR_MAX); + + /* Enable Transmit output */ + slic_write(spi, DS26522_LMCR_ADDR, DS26522_LMCR_TE); +} + +static int slic_ds26522_init_configure(struct spi_device *spi) +{ + u16 addr; + + /* set clock */ + slic_write(spi, DS26522_GTCCR_ADDR, DS26522_GTCCR_BPREFSEL_REFCLKIN | + DS26522_GTCCR_BFREQSEL_2048KHZ | + DS26522_GTCCR_FREQSEL_2048KHZ); + slic_write(spi, DS26522_GTCR2_ADDR, DS26522_GTCR2_TSSYNCOUT); + slic_write(spi, DS26522_GFCR_ADDR, DS26522_GFCR_BPCLK_2048KHZ); + + /* set gtcr */ + slic_write(spi, DS26522_GTCR1_ADDR, DS26522_GTCR1); + + /* Global LIU Software Reset Register */ + slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_RESET); + + /* Global Framer and BERT Software Reset Register */ + slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_RESET); + + usleep_range(100, 120); + + slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_NORMAL); + slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_NORMAL); + + /* Perform RX/TX SRESET,Reset receiver */ + slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_SFTRST); + + /* Reset tranceiver */ + slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_SFTRST); + + usleep_range(100, 120); + + /* Zero all Framer Registers */ + for (addr = DS26522_RF_ADDR_START; addr <= DS26522_RF_ADDR_END; + addr++) + slic_write(spi, addr, 0); + + for (addr = DS26522_TF_ADDR_START; addr <= DS26522_TF_ADDR_END; + addr++) + slic_write(spi, addr, 0); + + for (addr = DS26522_LIU_ADDR_START; addr <= DS26522_LIU_ADDR_END; + addr++) + slic_write(spi, addr, 0); + + for (addr = DS26522_BERT_ADDR_START; addr <= DS26522_BERT_ADDR_END; + addr++) + slic_write(spi, addr, 0); + + /* setup ds26522 for E1 specification */ + ds26522_e1_spec_config(spi); + + slic_write(spi, DS26522_GTCR1_ADDR, 0x00); + + return 0; +} + +static void slic_ds26522_remove(struct spi_device *spi) +{ + pr_info("DS26522 module uninstalled\n"); +} + +static int slic_ds26522_probe(struct spi_device *spi) +{ + int ret = 0; + + g_spi = spi; + spi->bits_per_word = 8; + + if (!get_slic_product_code(spi)) + return ret; + + ret = slic_ds26522_init_configure(spi); + if (ret == 0) + pr_info("DS26522 cs%d configured\n", spi->chip_select); + + return ret; +} + +static const struct spi_device_id slic_ds26522_id[] = { + { .name = "ds26522" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(spi, slic_ds26522_id); + +static const struct of_device_id slic_ds26522_match[] = { + { + .compatible = "maxim,ds26522", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, slic_ds26522_match); + +static struct spi_driver slic_ds26522_driver = { + .driver = { + .name = "ds26522", + .bus = &spi_bus_type, + .of_match_table = slic_ds26522_match, + }, + .probe = slic_ds26522_probe, + .remove = slic_ds26522_remove, + .id_table = slic_ds26522_id, +}; + +module_spi_driver(slic_ds26522_driver); diff --git a/drivers/net/wan/slic_ds26522.h b/drivers/net/wan/slic_ds26522.h new file mode 100644 index 000000000..59f987e4f --- /dev/null +++ b/drivers/net/wan/slic_ds26522.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * drivers/tdm/line_ctrl/slic_ds26522.h + * + * Copyright 2016 Freescale Semiconductor, Inc. + * + * Author: Zhao Qiang <B45475@freescale.com> + */ + +#define DS26522_RF_ADDR_START 0x00 +#define DS26522_RF_ADDR_END 0xef +#define DS26522_GLB_ADDR_START 0xf0 +#define DS26522_GLB_ADDR_END 0xff +#define DS26522_TF_ADDR_START 0x100 +#define DS26522_TF_ADDR_END 0x1ef +#define DS26522_LIU_ADDR_START 0x1000 +#define DS26522_LIU_ADDR_END 0x101f +#define DS26522_TEST_ADDR_START 0x1008 +#define DS26522_TEST_ADDR_END 0x101f +#define DS26522_BERT_ADDR_START 0x1100 +#define DS26522_BERT_ADDR_END 0x110f + +#define DS26522_RMMR_ADDR 0x80 +#define DS26522_RCR1_ADDR 0x81 +#define DS26522_RCR3_ADDR 0x83 +#define DS26522_RIOCR_ADDR 0x84 + +#define DS26522_GTCR1_ADDR 0xf0 +#define DS26522_GFCR_ADDR 0xf1 +#define DS26522_GTCR2_ADDR 0xf2 +#define DS26522_GTCCR_ADDR 0xf3 +#define DS26522_GLSRR_ADDR 0xf5 +#define DS26522_GFSRR_ADDR 0xf6 +#define DS26522_IDR_ADDR 0xf8 + +#define DS26522_E1TAF_ADDR 0x164 +#define DS26522_E1TNAF_ADDR 0x165 +#define DS26522_TMMR_ADDR 0x180 +#define DS26522_TCR1_ADDR 0x181 +#define DS26522_TIOCR_ADDR 0x184 + +#define DS26522_LTRCR_ADDR 0x1000 +#define DS26522_LTITSR_ADDR 0x1001 +#define DS26522_LMCR_ADDR 0x1002 +#define DS26522_LRISMR_ADDR 0x1007 + +#define MAX_NUM_OF_CHANNELS 8 +#define PQ_MDS_8E1T1_BRD_REV 0x00 +#define PQ_MDS_8E1T1_PLD_REV 0x00 + +#define DS26522_GTCCR_BPREFSEL_REFCLKIN 0xa0 +#define DS26522_GTCCR_BFREQSEL_1544KHZ 0x08 +#define DS26522_GTCCR_FREQSEL_1544KHZ 0x04 +#define DS26522_GTCCR_BFREQSEL_2048KHZ 0x00 +#define DS26522_GTCCR_FREQSEL_2048KHZ 0x00 + +#define DS26522_GFCR_BPCLK_2048KHZ 0x00 + +#define DS26522_GTCR2_TSSYNCOUT 0x02 +#define DS26522_GTCR1 0x00 + +#define DS26522_GFSRR_RESET 0x01 +#define DS26522_GFSRR_NORMAL 0x00 + +#define DS26522_GLSRR_RESET 0x01 +#define DS26522_GLSRR_NORMAL 0x00 + +#define DS26522_RMMR_SFTRST 0x02 +#define DS26522_RMMR_FRM_EN 0x80 +#define DS26522_RMMR_INIT_DONE 0x40 +#define DS26522_RMMR_T1 0x00 +#define DS26522_RMMR_E1 0x01 + +#define DS26522_E1TAF_DEFAULT 0x1b +#define DS26522_E1TNAF_DEFAULT 0x40 + +#define DS26522_TMMR_SFTRST 0x02 +#define DS26522_TMMR_FRM_EN 0x80 +#define DS26522_TMMR_INIT_DONE 0x40 +#define DS26522_TMMR_T1 0x00 +#define DS26522_TMMR_E1 0x01 + +#define DS26522_RCR1_T1_SYNCT 0x80 +#define DS26522_RCR1_T1_RB8ZS 0x40 +#define DS26522_RCR1_T1_SYNCC 0x08 + +#define DS26522_RCR1_E1_HDB3 0x40 +#define DS26522_RCR1_E1_CCS 0x20 + +#define DS26522_RIOCR_1544KHZ 0x00 +#define DS26522_RIOCR_2048KHZ 0x10 +#define DS26522_RIOCR_RSIO_OUT 0x00 + +#define DS26522_RCR3_FLB 0x01 + +#define DS26522_TIOCR_1544KHZ 0x00 +#define DS26522_TIOCR_2048KHZ 0x10 +#define DS26522_TIOCR_TSIO_OUT 0x04 + +#define DS26522_TCR1_TB8ZS 0x04 + +#define DS26522_LTRCR_T1 0x02 +#define DS26522_LTRCR_E1 0x00 + +#define DS26522_LTITSR_TLIS_75OHM 0x00 +#define DS26522_LTITSR_LBOS_75OHM 0x00 +#define DS26522_LTITSR_TLIS_100OHM 0x10 +#define DS26522_LTITSR_TLIS_0DB_CSU 0x00 + +#define DS26522_LRISMR_75OHM 0x00 +#define DS26522_LRISMR_100OHM 0x10 +#define DS26522_LRISMR_MAX 0x03 + +#define DS26522_LMCR_TE 0x01 + +enum line_rate { + LINE_RATE_T1, /* T1 line rate (1.544 Mbps) */ + LINE_RATE_E1 /* E1 line rate (2.048 Mbps) */ +}; + +enum tdm_trans_mode { + NORMAL = 0, + FRAMER_LB +}; + +enum card_support_type { + LM_CARD = 0, + DS26522_CARD, + NO_CARD +}; diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c new file mode 100644 index 000000000..5a9e26218 --- /dev/null +++ b/drivers/net/wan/wanxl.c @@ -0,0 +1,844 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * wanXL serial card driver for Linux + * host part + * + * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl> + * + * Status: + * - Only DTE (external clock) support with NRZ and NRZI encodings + * - wanXL100 will require minor driver modifications, no access to hw + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/netdevice.h> +#include <linux/hdlc.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <asm/io.h> + +#include "wanxl.h" + +static const char *version = "wanXL serial card driver version: 0.48"; + +#define PLX_CTL_RESET 0x40000000 /* adapter reset */ + +#undef DEBUG_PKT +#undef DEBUG_PCI + +/* MAILBOX #1 - PUTS COMMANDS */ +#define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */ +#ifdef __LITTLE_ENDIAN +#define MBX1_CMD_BSWAP 0x8C000001 /* little-endian Byte Swap Mode */ +#else +#define MBX1_CMD_BSWAP 0x8C000000 /* big-endian Byte Swap Mode */ +#endif + +/* MAILBOX #2 - DRAM SIZE */ +#define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */ + +struct port { + struct net_device *dev; + struct card *card; + spinlock_t lock; /* for wanxl_xmit */ + int node; /* physical port #0 - 3 */ + unsigned int clock_type; + int tx_in, tx_out; + struct sk_buff *tx_skbs[TX_BUFFERS]; +}; + +struct card_status { + desc_t rx_descs[RX_QUEUE_LENGTH]; + port_status_t port_status[4]; +}; + +struct card { + int n_ports; /* 1, 2 or 4 ports */ + u8 irq; + + u8 __iomem *plx; /* PLX PCI9060 virtual base address */ + struct pci_dev *pdev; /* for pci_name(pdev) */ + int rx_in; + struct sk_buff *rx_skbs[RX_QUEUE_LENGTH]; + struct card_status *status; /* shared between host and card */ + dma_addr_t status_address; + struct port ports[]; /* 1 - 4 port structures follow */ +}; + +static inline struct port *dev_to_port(struct net_device *dev) +{ + return (struct port *)dev_to_hdlc(dev)->priv; +} + +static inline port_status_t *get_status(struct port *port) +{ + return &port->card->status->port_status[port->node]; +} + +#ifdef DEBUG_PCI +static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr, + size_t size, int direction) +{ + dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction); + + if (addr + size > 0x100000000LL) + pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n", + pci_name(pdev), (unsigned long long)addr); + return addr; +} + +#undef pci_map_single +#define pci_map_single pci_map_single_debug +#endif + +/* Cable and/or personality module change interrupt service */ +static inline void wanxl_cable_intr(struct port *port) +{ + u32 value = get_status(port)->cable; + int valid = 1; + const char *cable, *pm, *dte = "", *dsr = "", *dcd = ""; + + switch (value & 0x7) { + case STATUS_CABLE_V35: + cable = "V.35"; + break; + case STATUS_CABLE_X21: + cable = "X.21"; + break; + case STATUS_CABLE_V24: + cable = "V.24"; + break; + case STATUS_CABLE_EIA530: + cable = "EIA530"; + break; + case STATUS_CABLE_NONE: + cable = "no"; + break; + default: + cable = "invalid"; + } + + switch ((value >> STATUS_CABLE_PM_SHIFT) & 0x7) { + case STATUS_CABLE_V35: + pm = "V.35"; + break; + case STATUS_CABLE_X21: + pm = "X.21"; + break; + case STATUS_CABLE_V24: + pm = "V.24"; + break; + case STATUS_CABLE_EIA530: + pm = "EIA530"; + break; + case STATUS_CABLE_NONE: + pm = "no personality"; + valid = 0; + break; + default: + pm = "invalid personality"; + valid = 0; + } + + if (valid) { + if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) { + dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" : + ", DSR off"; + dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" : + ", carrier off"; + } + dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE"; + } + netdev_info(port->dev, "%s%s module, %s cable%s%s\n", + pm, dte, cable, dsr, dcd); + + if (value & STATUS_CABLE_DCD) + netif_carrier_on(port->dev); + else + netif_carrier_off(port->dev); +} + +/* Transmit complete interrupt service */ +static inline void wanxl_tx_intr(struct port *port) +{ + struct net_device *dev = port->dev; + + while (1) { + desc_t *desc = &get_status(port)->tx_descs[port->tx_in]; + struct sk_buff *skb = port->tx_skbs[port->tx_in]; + + switch (desc->stat) { + case PACKET_FULL: + case PACKET_EMPTY: + netif_wake_queue(dev); + return; + + case PACKET_UNDERRUN: + dev->stats.tx_errors++; + dev->stats.tx_fifo_errors++; + break; + + default: + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + } + desc->stat = PACKET_EMPTY; /* Free descriptor */ + dma_unmap_single(&port->card->pdev->dev, desc->address, + skb->len, DMA_TO_DEVICE); + dev_consume_skb_irq(skb); + port->tx_in = (port->tx_in + 1) % TX_BUFFERS; + } +} + +/* Receive complete interrupt service */ +static inline void wanxl_rx_intr(struct card *card) +{ + desc_t *desc; + + while (desc = &card->status->rx_descs[card->rx_in], + desc->stat != PACKET_EMPTY) { + if ((desc->stat & PACKET_PORT_MASK) > card->n_ports) { + pr_crit("%s: received packet for nonexistent port\n", + pci_name(card->pdev)); + } else { + struct sk_buff *skb = card->rx_skbs[card->rx_in]; + struct port *port = &card->ports[desc->stat & + PACKET_PORT_MASK]; + struct net_device *dev = port->dev; + + if (!skb) { + dev->stats.rx_dropped++; + } else { + dma_unmap_single(&card->pdev->dev, + desc->address, BUFFER_LENGTH, + DMA_FROM_DEVICE); + skb_put(skb, desc->length); + +#ifdef DEBUG_PKT + printk(KERN_DEBUG "%s RX(%i):", dev->name, + skb->len); + debug_frame(skb); +#endif + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + skb->protocol = hdlc_type_trans(skb, dev); + netif_rx(skb); + skb = NULL; + } + + if (!skb) { + skb = dev_alloc_skb(BUFFER_LENGTH); + desc->address = skb ? + dma_map_single(&card->pdev->dev, + skb->data, + BUFFER_LENGTH, + DMA_FROM_DEVICE) : 0; + card->rx_skbs[card->rx_in] = skb; + } + } + desc->stat = PACKET_EMPTY; /* Free descriptor */ + card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH; + } +} + +static irqreturn_t wanxl_intr(int irq, void *dev_id) +{ + struct card *card = dev_id; + int i; + u32 stat; + int handled = 0; + + while ((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) { + handled = 1; + writel(stat, card->plx + PLX_DOORBELL_FROM_CARD); + + for (i = 0; i < card->n_ports; i++) { + if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i))) + wanxl_tx_intr(&card->ports[i]); + if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i))) + wanxl_cable_intr(&card->ports[i]); + } + if (stat & (1 << DOORBELL_FROM_CARD_RX)) + wanxl_rx_intr(card); + } + + return IRQ_RETVAL(handled); +} + +static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct port *port = dev_to_port(dev); + desc_t *desc; + + spin_lock(&port->lock); + + desc = &get_status(port)->tx_descs[port->tx_out]; + if (desc->stat != PACKET_EMPTY) { + /* should never happen - previous xmit should stop queue */ +#ifdef DEBUG_PKT + printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name); +#endif + netif_stop_queue(dev); + spin_unlock(&port->lock); + return NETDEV_TX_BUSY; /* request packet to be queued */ + } + +#ifdef DEBUG_PKT + printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len); + debug_frame(skb); +#endif + + port->tx_skbs[port->tx_out] = skb; + desc->address = dma_map_single(&port->card->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + desc->length = skb->len; + desc->stat = PACKET_FULL; + writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node), + port->card->plx + PLX_DOORBELL_TO_CARD); + + port->tx_out = (port->tx_out + 1) % TX_BUFFERS; + + if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) { + netif_stop_queue(dev); +#ifdef DEBUG_PKT + printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name); +#endif + } + + spin_unlock(&port->lock); + return NETDEV_TX_OK; +} + +static int wanxl_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + struct port *port = dev_to_port(dev); + + if (encoding != ENCODING_NRZ && + encoding != ENCODING_NRZI) + return -EINVAL; + + if (parity != PARITY_NONE && + parity != PARITY_CRC32_PR1_CCITT && + parity != PARITY_CRC16_PR1_CCITT && + parity != PARITY_CRC32_PR0_CCITT && + parity != PARITY_CRC16_PR0_CCITT) + return -EINVAL; + + get_status(port)->encoding = encoding; + get_status(port)->parity = parity; + return 0; +} + +static int wanxl_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings line; + struct port *port = dev_to_port(dev); + + switch (ifs->type) { + case IF_GET_IFACE: + ifs->type = IF_IFACE_SYNC_SERIAL; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ + return -ENOBUFS; + } + memset(&line, 0, sizeof(line)); + line.clock_type = get_status(port)->clocking; + line.clock_rate = 0; + line.loopback = 0; + + if (copy_to_user(ifs->ifs_ifsu.sync, &line, size)) + return -EFAULT; + return 0; + + case IF_IFACE_SYNC_SERIAL: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (dev->flags & IFF_UP) + return -EBUSY; + + if (copy_from_user(&line, ifs->ifs_ifsu.sync, + size)) + return -EFAULT; + + if (line.clock_type != CLOCK_EXT && + line.clock_type != CLOCK_TXFROMRX) + return -EINVAL; /* No such clock setting */ + + if (line.loopback != 0) + return -EINVAL; + + get_status(port)->clocking = line.clock_type; + return 0; + + default: + return hdlc_ioctl(dev, ifs); + } +} + +static int wanxl_open(struct net_device *dev) +{ + struct port *port = dev_to_port(dev); + u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD; + unsigned long timeout; + int i; + + if (get_status(port)->open) { + netdev_err(dev, "port already open\n"); + return -EIO; + } + + i = hdlc_open(dev); + if (i) + return i; + + port->tx_in = port->tx_out = 0; + for (i = 0; i < TX_BUFFERS; i++) + get_status(port)->tx_descs[i].stat = PACKET_EMPTY; + /* signal the card */ + writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr); + + timeout = jiffies + HZ; + do { + if (get_status(port)->open) { + netif_start_queue(dev); + return 0; + } + } while (time_after(timeout, jiffies)); + + netdev_err(dev, "unable to open port\n"); + /* ask the card to close the port, should it be still alive */ + writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr); + return -EFAULT; +} + +static int wanxl_close(struct net_device *dev) +{ + struct port *port = dev_to_port(dev); + unsigned long timeout; + int i; + + hdlc_close(dev); + /* signal the card */ + writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), + port->card->plx + PLX_DOORBELL_TO_CARD); + + timeout = jiffies + HZ; + do { + if (!get_status(port)->open) + break; + } while (time_after(timeout, jiffies)); + + if (get_status(port)->open) + netdev_err(dev, "unable to close port\n"); + + netif_stop_queue(dev); + + for (i = 0; i < TX_BUFFERS; i++) { + desc_t *desc = &get_status(port)->tx_descs[i]; + + if (desc->stat != PACKET_EMPTY) { + desc->stat = PACKET_EMPTY; + dma_unmap_single(&port->card->pdev->dev, + desc->address, port->tx_skbs[i]->len, + DMA_TO_DEVICE); + dev_kfree_skb(port->tx_skbs[i]); + } + } + return 0; +} + +static struct net_device_stats *wanxl_get_stats(struct net_device *dev) +{ + struct port *port = dev_to_port(dev); + + dev->stats.rx_over_errors = get_status(port)->rx_overruns; + dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors; + dev->stats.rx_errors = dev->stats.rx_over_errors + + dev->stats.rx_frame_errors; + return &dev->stats; +} + +static int wanxl_puts_command(struct card *card, u32 cmd) +{ + unsigned long timeout = jiffies + 5 * HZ; + + writel(cmd, card->plx + PLX_MAILBOX_1); + do { + if (readl(card->plx + PLX_MAILBOX_1) == 0) + return 0; + + schedule(); + } while (time_after(timeout, jiffies)); + + return -1; +} + +static void wanxl_reset(struct card *card) +{ + u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET; + + writel(0x80, card->plx + PLX_MAILBOX_0); + writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL); + readl(card->plx + PLX_CONTROL); /* wait for posted write */ + udelay(1); + writel(old_value, card->plx + PLX_CONTROL); + readl(card->plx + PLX_CONTROL); /* wait for posted write */ +} + +static void wanxl_pci_remove_one(struct pci_dev *pdev) +{ + struct card *card = pci_get_drvdata(pdev); + int i; + + for (i = 0; i < card->n_ports; i++) { + unregister_hdlc_device(card->ports[i].dev); + free_netdev(card->ports[i].dev); + } + + /* unregister and free all host resources */ + if (card->irq) + free_irq(card->irq, card); + + wanxl_reset(card); + + for (i = 0; i < RX_QUEUE_LENGTH; i++) + if (card->rx_skbs[i]) { + dma_unmap_single(&card->pdev->dev, + card->status->rx_descs[i].address, + BUFFER_LENGTH, DMA_FROM_DEVICE); + dev_kfree_skb(card->rx_skbs[i]); + } + + if (card->plx) + iounmap(card->plx); + + if (card->status) + dma_free_coherent(&pdev->dev, sizeof(struct card_status), + card->status, card->status_address); + + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(card); +} + +#include "wanxlfw.inc" + +static const struct net_device_ops wanxl_ops = { + .ndo_open = wanxl_open, + .ndo_stop = wanxl_close, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_siocwandev = wanxl_ioctl, + .ndo_get_stats = wanxl_get_stats, +}; + +static int wanxl_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct card *card; + u32 ramsize, stat; + unsigned long timeout; + u32 plx_phy; /* PLX PCI base address */ + u32 mem_phy; /* memory PCI base addr */ + u8 __iomem *mem; /* memory virtual base addr */ + int i, ports; + +#ifndef MODULE + pr_info_once("%s\n", version); +#endif + + i = pci_enable_device(pdev); + if (i) + return i; + + /* QUICC can only access first 256 MB of host RAM directly, + * but PLX9060 DMA does 32-bits for actual packet data transfers + */ + + /* FIXME when PCI/DMA subsystems are fixed. + * We set both dma_mask and consistent_dma_mask to 28 bits + * and pray pci_alloc_consistent() will use this info. It should + * work on most platforms + */ + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) || + dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) { + pr_err("No usable DMA configuration\n"); + pci_disable_device(pdev); + return -EIO; + } + + i = pci_request_regions(pdev, "wanXL"); + if (i) { + pci_disable_device(pdev); + return i; + } + + switch (pdev->device) { + case PCI_DEVICE_ID_SBE_WANXL100: + ports = 1; + break; + case PCI_DEVICE_ID_SBE_WANXL200: + ports = 2; + break; + default: + ports = 4; + } + + card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL); + if (!card) { + pci_release_regions(pdev); + pci_disable_device(pdev); + return -ENOBUFS; + } + + pci_set_drvdata(pdev, card); + card->pdev = pdev; + + card->status = dma_alloc_coherent(&pdev->dev, + sizeof(struct card_status), + &card->status_address, GFP_KERNEL); + if (!card->status) { + wanxl_pci_remove_one(pdev); + return -ENOBUFS; + } + +#ifdef DEBUG_PCI + printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory" + " at 0x%LX\n", pci_name(pdev), + (unsigned long long)card->status_address); +#endif + + /* FIXME when PCI/DMA subsystems are fixed. + * We set both dma_mask and consistent_dma_mask back to 32 bits + * to indicate the card can do 32-bit DMA addressing + */ + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) || + dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { + pr_err("No usable DMA configuration\n"); + wanxl_pci_remove_one(pdev); + return -EIO; + } + + /* set up PLX mapping */ + plx_phy = pci_resource_start(pdev, 0); + + card->plx = ioremap(plx_phy, 0x70); + if (!card->plx) { + pr_err("ioremap() failed\n"); + wanxl_pci_remove_one(pdev); + return -EFAULT; + } + +#if RESET_WHILE_LOADING + wanxl_reset(card); +#endif + + timeout = jiffies + 20 * HZ; + while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) { + if (time_before(timeout, jiffies)) { + pr_warn("%s: timeout waiting for PUTS to complete\n", + pci_name(pdev)); + wanxl_pci_remove_one(pdev); + return -ENODEV; + } + + switch (stat & 0xC0) { + case 0x00: /* hmm - PUTS completed with non-zero code? */ + case 0x80: /* PUTS still testing the hardware */ + break; + + default: + pr_warn("%s: PUTS test 0x%X failed\n", + pci_name(pdev), stat & 0x30); + wanxl_pci_remove_one(pdev); + return -ENODEV; + } + + schedule(); + } + + /* get on-board memory size (PUTS detects no more than 4 MB) */ + ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK; + + /* set up on-board RAM mapping */ + mem_phy = pci_resource_start(pdev, 2); + + /* sanity check the board's reported memory size */ + if (ramsize < BUFFERS_ADDR + + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) { + pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n", + pci_name(pdev), ramsize, + BUFFERS_ADDR + + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports); + wanxl_pci_remove_one(pdev); + return -ENODEV; + } + + if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) { + pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev)); + wanxl_pci_remove_one(pdev); + return -ENODEV; + } + + for (i = 0; i < RX_QUEUE_LENGTH; i++) { + struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH); + + card->rx_skbs[i] = skb; + if (skb) + card->status->rx_descs[i].address = + dma_map_single(&card->pdev->dev, skb->data, + BUFFER_LENGTH, DMA_FROM_DEVICE); + } + + mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware)); + if (!mem) { + pr_err("ioremap() failed\n"); + wanxl_pci_remove_one(pdev); + return -EFAULT; + } + + for (i = 0; i < sizeof(firmware); i += 4) + writel(ntohl(*(__be32 *)(firmware + i)), mem + PDM_OFFSET + i); + + for (i = 0; i < ports; i++) + writel(card->status_address + + (void *)&card->status->port_status[i] - + (void *)card->status, mem + PDM_OFFSET + 4 + i * 4); + writel(card->status_address, mem + PDM_OFFSET + 20); + writel(PDM_OFFSET, mem); + iounmap(mem); + + writel(0, card->plx + PLX_MAILBOX_5); + + if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) { + pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev)); + wanxl_pci_remove_one(pdev); + return -ENODEV; + } + + timeout = jiffies + 5 * HZ; + do { + stat = readl(card->plx + PLX_MAILBOX_5); + if (stat) + break; + schedule(); + } while (time_after(timeout, jiffies)); + + if (!stat) { + pr_warn("%s: timeout while initializing card firmware\n", + pci_name(pdev)); + wanxl_pci_remove_one(pdev); + return -ENODEV; + } + +#if DETECT_RAM + ramsize = stat; +#endif + + pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n", + pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq); + + /* Allocate IRQ */ + if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) { + pr_warn("%s: could not allocate IRQ%i\n", + pci_name(pdev), pdev->irq); + wanxl_pci_remove_one(pdev); + return -EBUSY; + } + card->irq = pdev->irq; + + for (i = 0; i < ports; i++) { + hdlc_device *hdlc; + struct port *port = &card->ports[i]; + struct net_device *dev = alloc_hdlcdev(port); + + if (!dev) { + pr_err("%s: unable to allocate memory\n", + pci_name(pdev)); + wanxl_pci_remove_one(pdev); + return -ENOMEM; + } + + port->dev = dev; + hdlc = dev_to_hdlc(dev); + spin_lock_init(&port->lock); + dev->tx_queue_len = 50; + dev->netdev_ops = &wanxl_ops; + hdlc->attach = wanxl_attach; + hdlc->xmit = wanxl_xmit; + port->card = card; + port->node = i; + get_status(port)->clocking = CLOCK_EXT; + if (register_hdlc_device(dev)) { + pr_err("%s: unable to register hdlc device\n", + pci_name(pdev)); + free_netdev(dev); + wanxl_pci_remove_one(pdev); + return -ENOBUFS; + } + card->n_ports++; + } + + pr_info("%s: port", pci_name(pdev)); + for (i = 0; i < ports; i++) + pr_cont("%s #%i: %s", + i ? "," : "", i, card->ports[i].dev->name); + pr_cont("\n"); + + for (i = 0; i < ports; i++) + wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/ + + return 0; +} + +static const struct pci_device_id wanxl_pci_tbl[] = { + { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { 0, } +}; + +static struct pci_driver wanxl_pci_driver = { + .name = "wanXL", + .id_table = wanxl_pci_tbl, + .probe = wanxl_pci_init_one, + .remove = wanxl_pci_remove_one, +}; + +static int __init wanxl_init_module(void) +{ +#ifdef MODULE + pr_info("%s\n", version); +#endif + return pci_register_driver(&wanxl_pci_driver); +} + +static void __exit wanxl_cleanup_module(void) +{ + pci_unregister_driver(&wanxl_pci_driver); +} + +MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); +MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl); + +module_init(wanxl_init_module); +module_exit(wanxl_cleanup_module); diff --git a/drivers/net/wan/wanxl.h b/drivers/net/wan/wanxl.h new file mode 100644 index 000000000..0b0198bf2 --- /dev/null +++ b/drivers/net/wan/wanxl.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * wanXL serial card driver for Linux + * definitions common to host driver and card firmware + * + * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl> + */ + +#define RESET_WHILE_LOADING 0 + +/* you must rebuild the firmware if any of the following is changed */ +#define DETECT_RAM 0 /* needed for > 4MB RAM, 16 MB maximum */ +#define QUICC_MEMCPY_USES_PLX 1 /* must be used if the host has > 256 MB RAM */ + + +#define STATUS_CABLE_V35 2 +#define STATUS_CABLE_X21 3 +#define STATUS_CABLE_V24 4 +#define STATUS_CABLE_EIA530 5 +#define STATUS_CABLE_INVALID 6 +#define STATUS_CABLE_NONE 7 + +#define STATUS_CABLE_DCE 0x8000 +#define STATUS_CABLE_DSR 0x0010 +#define STATUS_CABLE_DCD 0x0008 +#define STATUS_CABLE_PM_SHIFT 5 + +#define PDM_OFFSET 0x1000 + +#define TX_BUFFERS 10 /* per port */ +#define RX_BUFFERS 30 +#define RX_QUEUE_LENGTH 40 /* card->host queue length - per card */ + +#define PACKET_EMPTY 0x00 +#define PACKET_FULL 0x10 +#define PACKET_SENT 0x20 /* TX only */ +#define PACKET_UNDERRUN 0x30 /* TX only */ +#define PACKET_PORT_MASK 0x03 /* RX only */ + +/* bit numbers in PLX9060 doorbell registers */ +#define DOORBELL_FROM_CARD_TX_0 0 /* packet sent by the card */ +#define DOORBELL_FROM_CARD_TX_1 1 +#define DOORBELL_FROM_CARD_TX_2 2 +#define DOORBELL_FROM_CARD_TX_3 3 +#define DOORBELL_FROM_CARD_RX 4 +#define DOORBELL_FROM_CARD_CABLE_0 5 /* cable/PM/etc. changed */ +#define DOORBELL_FROM_CARD_CABLE_1 6 +#define DOORBELL_FROM_CARD_CABLE_2 7 +#define DOORBELL_FROM_CARD_CABLE_3 8 + +#define DOORBELL_TO_CARD_OPEN_0 0 +#define DOORBELL_TO_CARD_OPEN_1 1 +#define DOORBELL_TO_CARD_OPEN_2 2 +#define DOORBELL_TO_CARD_OPEN_3 3 +#define DOORBELL_TO_CARD_CLOSE_0 4 +#define DOORBELL_TO_CARD_CLOSE_1 5 +#define DOORBELL_TO_CARD_CLOSE_2 6 +#define DOORBELL_TO_CARD_CLOSE_3 7 +#define DOORBELL_TO_CARD_TX_0 8 /* outbound packet queued */ +#define DOORBELL_TO_CARD_TX_1 9 +#define DOORBELL_TO_CARD_TX_2 10 +#define DOORBELL_TO_CARD_TX_3 11 + +/* firmware-only status bits, starting from last DOORBELL_TO_CARD + 1 */ +#define TASK_SCC_0 12 +#define TASK_SCC_1 13 +#define TASK_SCC_2 14 +#define TASK_SCC_3 15 + +#define ALIGN32(x) (((x) + 3) & 0xFFFFFFFC) +#define BUFFER_LENGTH ALIGN32(HDLC_MAX_MRU + 4) /* 4 bytes for 32-bit CRC */ + +/* Address of TX and RX buffers in 68360 address space */ +#define BUFFERS_ADDR 0x4000 /* 16 KB */ + +#ifndef __ASSEMBLER__ +#define PLX_OFFSET 0 +#else +#define PLX_OFFSET PLX + 0x80 +#endif + +#define PLX_MAILBOX_0 (PLX_OFFSET + 0x40) +#define PLX_MAILBOX_1 (PLX_OFFSET + 0x44) +#define PLX_MAILBOX_2 (PLX_OFFSET + 0x48) +#define PLX_MAILBOX_3 (PLX_OFFSET + 0x4C) +#define PLX_MAILBOX_4 (PLX_OFFSET + 0x50) +#define PLX_MAILBOX_5 (PLX_OFFSET + 0x54) +#define PLX_MAILBOX_6 (PLX_OFFSET + 0x58) +#define PLX_MAILBOX_7 (PLX_OFFSET + 0x5C) +#define PLX_DOORBELL_TO_CARD (PLX_OFFSET + 0x60) +#define PLX_DOORBELL_FROM_CARD (PLX_OFFSET + 0x64) +#define PLX_INTERRUPT_CS (PLX_OFFSET + 0x68) +#define PLX_CONTROL (PLX_OFFSET + 0x6C) + +#ifdef __ASSEMBLER__ +#define PLX_DMA_0_MODE (PLX + 0x100) +#define PLX_DMA_0_PCI (PLX + 0x104) +#define PLX_DMA_0_LOCAL (PLX + 0x108) +#define PLX_DMA_0_LENGTH (PLX + 0x10C) +#define PLX_DMA_0_DESC (PLX + 0x110) +#define PLX_DMA_1_MODE (PLX + 0x114) +#define PLX_DMA_1_PCI (PLX + 0x118) +#define PLX_DMA_1_LOCAL (PLX + 0x11C) +#define PLX_DMA_1_LENGTH (PLX + 0x120) +#define PLX_DMA_1_DESC (PLX + 0x124) +#define PLX_DMA_CMD_STS (PLX + 0x128) +#define PLX_DMA_ARBITR_0 (PLX + 0x12C) +#define PLX_DMA_ARBITR_1 (PLX + 0x130) +#endif + +#define DESC_LENGTH 12 + +/* offsets from start of status_t */ +/* card to host */ +#define STATUS_OPEN 0 +#define STATUS_CABLE (STATUS_OPEN + 4) +#define STATUS_RX_OVERRUNS (STATUS_CABLE + 4) +#define STATUS_RX_FRAME_ERRORS (STATUS_RX_OVERRUNS + 4) + +/* host to card */ +#define STATUS_PARITY (STATUS_RX_FRAME_ERRORS + 4) +#define STATUS_ENCODING (STATUS_PARITY + 4) +#define STATUS_CLOCKING (STATUS_ENCODING + 4) +#define STATUS_TX_DESCS (STATUS_CLOCKING + 4) + +#ifndef __ASSEMBLER__ + +typedef struct { + volatile u32 stat; + u32 address; /* PCI address */ + volatile u32 length; +}desc_t; + + +typedef struct { +// Card to host + volatile u32 open; + volatile u32 cable; + volatile u32 rx_overruns; + volatile u32 rx_frame_errors; + +// Host to card + u32 parity; + u32 encoding; + u32 clocking; + desc_t tx_descs[TX_BUFFERS]; +}port_status_t; + +#endif /* __ASSEMBLER__ */ diff --git a/drivers/net/wan/wanxlfw.S b/drivers/net/wan/wanxlfw.S new file mode 100644 index 000000000..6c3735ac8 --- /dev/null +++ b/drivers/net/wan/wanxlfw.S @@ -0,0 +1,894 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +.psize 0 +/* + wanXL serial card driver for Linux + card firmware part + + Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl> + + + + + + DPRAM BDs: + 0x000 - 0x050 TX#0 0x050 - 0x140 RX#0 + 0x140 - 0x190 TX#1 0x190 - 0x280 RX#1 + 0x280 - 0x2D0 TX#2 0x2D0 - 0x3C0 RX#2 + 0x3C0 - 0x410 TX#3 0x410 - 0x500 RX#3 + + + 000 5FF 1536 Bytes Dual-Port RAM User Data / BDs + 600 6FF 256 Bytes Dual-Port RAM User Data / BDs + 700 7FF 256 Bytes Dual-Port RAM User Data / BDs + C00 CBF 192 Bytes Dual-Port RAM Parameter RAM Page 1 + D00 DBF 192 Bytes Dual-Port RAM Parameter RAM Page 2 + E00 EBF 192 Bytes Dual-Port RAM Parameter RAM Page 3 + F00 FBF 192 Bytes Dual-Port RAM Parameter RAM Page 4 + + local interrupts level + NMI 7 + PIT timer, CPM (RX/TX complete) 4 + PCI9060 DMA and PCI doorbells 3 + Cable - not used 1 +*/ + +#include <linux/hdlc.h> +#include <linux/hdlc/ioctl.h> +#include "wanxl.h" + +/* memory addresses and offsets */ + +MAX_RAM_SIZE = 16 * 1024 * 1024 // max RAM supported by hardware + +PCI9060_VECTOR = 0x0000006C +CPM_IRQ_BASE = 0x40 +ERROR_VECTOR = CPM_IRQ_BASE * 4 +SCC1_VECTOR = (CPM_IRQ_BASE + 0x1E) * 4 +SCC2_VECTOR = (CPM_IRQ_BASE + 0x1D) * 4 +SCC3_VECTOR = (CPM_IRQ_BASE + 0x1C) * 4 +SCC4_VECTOR = (CPM_IRQ_BASE + 0x1B) * 4 +CPM_IRQ_LEVEL = 4 +TIMER_IRQ = 128 +TIMER_IRQ_LEVEL = 4 +PITR_CONST = 0x100 + 16 // 1 Hz timer + +MBAR = 0x0003FF00 + +VALUE_WINDOW = 0x40000000 +ORDER_WINDOW = 0xC0000000 + +PLX = 0xFFF90000 + +CSRA = 0xFFFB0000 +CSRB = 0xFFFB0002 +CSRC = 0xFFFB0004 +CSRD = 0xFFFB0006 +STATUS_CABLE_LL = 0x2000 +STATUS_CABLE_DTR = 0x1000 + +DPRBASE = 0xFFFC0000 + +SCC1_BASE = DPRBASE + 0xC00 +MISC_BASE = DPRBASE + 0xCB0 +SCC2_BASE = DPRBASE + 0xD00 +SCC3_BASE = DPRBASE + 0xE00 +SCC4_BASE = DPRBASE + 0xF00 + +// offset from SCCx_BASE +// SCC_xBASE contain offsets from DPRBASE and must be divisible by 8 +SCC_RBASE = 0 // 16-bit RxBD base address +SCC_TBASE = 2 // 16-bit TxBD base address +SCC_RFCR = 4 // 8-bit Rx function code +SCC_TFCR = 5 // 8-bit Tx function code +SCC_MRBLR = 6 // 16-bit maximum Rx buffer length +SCC_C_MASK = 0x34 // 32-bit CRC constant +SCC_C_PRES = 0x38 // 32-bit CRC preset +SCC_MFLR = 0x46 // 16-bit max Rx frame length (without flags) + +REGBASE = DPRBASE + 0x1000 +PICR = REGBASE + 0x026 // 16-bit periodic irq control +PITR = REGBASE + 0x02A // 16-bit periodic irq timing +OR1 = REGBASE + 0x064 // 32-bit RAM bank #1 options +CICR = REGBASE + 0x540 // 32(24)-bit CP interrupt config +CIMR = REGBASE + 0x548 // 32-bit CP interrupt mask +CISR = REGBASE + 0x54C // 32-bit CP interrupts in-service +PADIR = REGBASE + 0x550 // 16-bit PortA data direction bitmap +PAPAR = REGBASE + 0x552 // 16-bit PortA pin assignment bitmap +PAODR = REGBASE + 0x554 // 16-bit PortA open drain bitmap +PADAT = REGBASE + 0x556 // 16-bit PortA data register + +PCDIR = REGBASE + 0x560 // 16-bit PortC data direction bitmap +PCPAR = REGBASE + 0x562 // 16-bit PortC pin assignment bitmap +PCSO = REGBASE + 0x564 // 16-bit PortC special options +PCDAT = REGBASE + 0x566 // 16-bit PortC data register +PCINT = REGBASE + 0x568 // 16-bit PortC interrupt control +CR = REGBASE + 0x5C0 // 16-bit Command register + +SCC1_REGS = REGBASE + 0x600 +SCC2_REGS = REGBASE + 0x620 +SCC3_REGS = REGBASE + 0x640 +SCC4_REGS = REGBASE + 0x660 +SICR = REGBASE + 0x6EC // 32-bit SI clock route + +// offset from SCCx_REGS +SCC_GSMR_L = 0x00 // 32 bits +SCC_GSMR_H = 0x04 // 32 bits +SCC_PSMR = 0x08 // 16 bits +SCC_TODR = 0x0C // 16 bits +SCC_DSR = 0x0E // 16 bits +SCC_SCCE = 0x10 // 16 bits +SCC_SCCM = 0x14 // 16 bits +SCC_SCCS = 0x17 // 8 bits + +#if QUICC_MEMCPY_USES_PLX + .macro memcpy_from_pci src, dest, len // len must be < 8 MB + addl #3, \len + andl #0xFFFFFFFC, \len // always copy n * 4 bytes + movel \src, PLX_DMA_0_PCI + movel \dest, PLX_DMA_0_LOCAL + movel \len, PLX_DMA_0_LENGTH + movel #0x0103, PLX_DMA_CMD_STS // start channel 0 transfer + bsr memcpy_from_pci_run + .endm + + .macro memcpy_to_pci src, dest, len + addl #3, \len + andl #0xFFFFFFFC, \len // always copy n * 4 bytes + movel \src, PLX_DMA_1_LOCAL + movel \dest, PLX_DMA_1_PCI + movel \len, PLX_DMA_1_LENGTH + movel #0x0301, PLX_DMA_CMD_STS // start channel 1 transfer + bsr memcpy_to_pci_run + .endm + +#else + + .macro memcpy src, dest, len // len must be < 65536 bytes + movel %d7, -(%sp) // src and dest must be < 256 MB + movel \len, %d7 // bits 0 and 1 + lsrl #2, \len + andl \len, \len + beq 99f // only 0 - 3 bytes + subl #1, \len // for dbf +98: movel (\src)+, (\dest)+ + dbfw \len, 98b +99: movel %d7, \len + btstl #1, \len + beq 99f + movew (\src)+, (\dest)+ +99: btstl #0, \len + beq 99f + moveb (\src)+, (\dest)+ +99: + movel (%sp)+, %d7 + .endm + + .macro memcpy_from_pci src, dest, len + addl #VALUE_WINDOW, \src + memcpy \src, \dest, \len + .endm + + .macro memcpy_to_pci src, dest, len + addl #VALUE_WINDOW, \dest + memcpy \src, \dest, \len + .endm +#endif + + + .macro wait_for_command +99: btstl #0, CR + bne 99b + .endm + + + + +/****************************** card initialization *******************/ + .text + .global _start +_start: bra init + + .org _start + 4 +ch_status_addr: .long 0, 0, 0, 0 +rx_descs_addr: .long 0 + +init: +#if DETECT_RAM + movel OR1, %d0 + andl #0xF00007FF, %d0 // mask AMxx bits + orl #0xFFFF800 & ~(MAX_RAM_SIZE - 1), %d0 // update RAM bank size + movel %d0, OR1 +#endif + + addl #VALUE_WINDOW, rx_descs_addr // PCI addresses of shared data + clrl %d0 // D0 = 4 * port +init_1: tstl ch_status_addr(%d0) + beq init_2 + addl #VALUE_WINDOW, ch_status_addr(%d0) +init_2: addl #4, %d0 + cmpl #4 * 4, %d0 + bne init_1 + + movel #pci9060_interrupt, PCI9060_VECTOR + movel #error_interrupt, ERROR_VECTOR + movel #port_interrupt_1, SCC1_VECTOR + movel #port_interrupt_2, SCC2_VECTOR + movel #port_interrupt_3, SCC3_VECTOR + movel #port_interrupt_4, SCC4_VECTOR + movel #timer_interrupt, TIMER_IRQ * 4 + + movel #0x78000000, CIMR // only SCCx IRQs from CPM + movew #(TIMER_IRQ_LEVEL << 8) + TIMER_IRQ, PICR // interrupt from PIT + movew #PITR_CONST, PITR + + // SCC1=SCCa SCC2=SCCb SCC3=SCCc SCC4=SCCd prio=4 HP=-1 IRQ=64-79 + movel #0xD41F40 + (CPM_IRQ_LEVEL << 13), CICR + movel #0x543, PLX_DMA_0_MODE // 32-bit, Ready, Burst, IRQ + movel #0x543, PLX_DMA_1_MODE + movel #0x0, PLX_DMA_0_DESC // from PCI to local + movel #0x8, PLX_DMA_1_DESC // from local to PCI + movel #0x101, PLX_DMA_CMD_STS // enable both DMA channels + // enable local IRQ, DMA, doorbells and PCI IRQ + orl #0x000F0300, PLX_INTERRUPT_CS + +#if DETECT_RAM + bsr ram_test +#else + movel #1, PLX_MAILBOX_5 // non-zero value = init complete +#endif + bsr check_csr + + movew #0xFFFF, PAPAR // all pins are clocks/data + clrw PADIR // first function + clrw PCSO // CD and CTS always active + + +/****************************** main loop *****************************/ + +main: movel channel_stats, %d7 // D7 = doorbell + irq status + clrl channel_stats + + tstl %d7 + bne main_1 + // nothing to do - wait for next event + stop #0x2200 // supervisor + IRQ level 2 + movew #0x2700, %sr // disable IRQs again + bra main + +main_1: clrl %d0 // D0 = 4 * port + clrl %d6 // D6 = doorbell to host value + +main_l: btstl #DOORBELL_TO_CARD_CLOSE_0, %d7 + beq main_op + bclrl #DOORBELL_TO_CARD_OPEN_0, %d7 // in case both bits are set + bsr close_port +main_op: + btstl #DOORBELL_TO_CARD_OPEN_0, %d7 + beq main_cl + bsr open_port +main_cl: + btstl #DOORBELL_TO_CARD_TX_0, %d7 + beq main_txend + bsr tx +main_txend: + btstl #TASK_SCC_0, %d7 + beq main_next + bsr tx_end + bsr rx + +main_next: + lsrl #1, %d7 // port status for next port + addl #4, %d0 // D0 = 4 * next port + cmpl #4 * 4, %d0 + bne main_l + movel %d6, PLX_DOORBELL_FROM_CARD // signal the host + bra main + + +/****************************** open port *****************************/ + +open_port: // D0 = 4 * port, D6 = doorbell to host + movel ch_status_addr(%d0), %a0 // A0 = port status address + tstl STATUS_OPEN(%a0) + bne open_port_ret // port already open + movel #1, STATUS_OPEN(%a0) // confirm the port is open +// setup BDs + clrl tx_in(%d0) + clrl tx_out(%d0) + clrl tx_count(%d0) + clrl rx_in(%d0) + + movel SICR, %d1 // D1 = clock settings in SICR + andl clocking_mask(%d0), %d1 + cmpl #CLOCK_TXFROMRX, STATUS_CLOCKING(%a0) + bne open_port_clock_ext + orl clocking_txfromrx(%d0), %d1 + bra open_port_set_clock + +open_port_clock_ext: + orl clocking_ext(%d0), %d1 +open_port_set_clock: + movel %d1, SICR // update clock settings in SICR + + orw #STATUS_CABLE_DTR, csr_output(%d0) // DTR on + bsr check_csr // call with disabled timer interrupt + +// Setup TX descriptors + movel first_buffer(%d0), %d1 // D1 = starting buffer address + movel tx_first_bd(%d0), %a1 // A1 = starting TX BD address + movel #TX_BUFFERS - 2, %d2 // D2 = TX_BUFFERS - 1 counter + movel #0x18000000, %d3 // D3 = initial TX BD flags: Int + Last + cmpl #PARITY_NONE, STATUS_PARITY(%a0) + beq open_port_tx_loop + bsetl #26, %d3 // TX BD flag: Transmit CRC +open_port_tx_loop: + movel %d3, (%a1)+ // TX flags + length + movel %d1, (%a1)+ // buffer address + addl #BUFFER_LENGTH, %d1 + dbfw %d2, open_port_tx_loop + + bsetl #29, %d3 // TX BD flag: Wrap (last BD) + movel %d3, (%a1)+ // Final TX flags + length + movel %d1, (%a1)+ // buffer address + +// Setup RX descriptors // A1 = starting RX BD address + movel #RX_BUFFERS - 2, %d2 // D2 = RX_BUFFERS - 1 counter +open_port_rx_loop: + movel #0x90000000, (%a1)+ // RX flags + length + movel %d1, (%a1)+ // buffer address + addl #BUFFER_LENGTH, %d1 + dbfw %d2, open_port_rx_loop + + movel #0xB0000000, (%a1)+ // Final RX flags + length + movel %d1, (%a1)+ // buffer address + +// Setup port parameters + movel scc_base_addr(%d0), %a1 // A1 = SCC_BASE address + movel scc_reg_addr(%d0), %a2 // A2 = SCC_REGS address + + movel #0xFFFF, SCC_SCCE(%a2) // clear status bits + movel #0x0000, SCC_SCCM(%a2) // interrupt mask + + movel tx_first_bd(%d0), %d1 + movew %d1, SCC_TBASE(%a1) // D1 = offset of first TxBD + addl #TX_BUFFERS * 8, %d1 + movew %d1, SCC_RBASE(%a1) // D1 = offset of first RxBD + moveb #0x8, SCC_RFCR(%a1) // Intel mode, 1000 + moveb #0x8, SCC_TFCR(%a1) + +// Parity settings + cmpl #PARITY_CRC16_PR1_CCITT, STATUS_PARITY(%a0) + bne open_port_parity_1 + clrw SCC_PSMR(%a2) // CRC16-CCITT + movel #0xF0B8, SCC_C_MASK(%a1) + movel #0xFFFF, SCC_C_PRES(%a1) + movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC + movew #2, parity_bytes(%d0) + bra open_port_2 + +open_port_parity_1: + cmpl #PARITY_CRC32_PR1_CCITT, STATUS_PARITY(%a0) + bne open_port_parity_2 + movew #0x0800, SCC_PSMR(%a2) // CRC32-CCITT + movel #0xDEBB20E3, SCC_C_MASK(%a1) + movel #0xFFFFFFFF, SCC_C_PRES(%a1) + movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC + movew #4, parity_bytes(%d0) + bra open_port_2 + +open_port_parity_2: + cmpl #PARITY_CRC16_PR0_CCITT, STATUS_PARITY(%a0) + bne open_port_parity_3 + clrw SCC_PSMR(%a2) // CRC16-CCITT preset 0 + movel #0xF0B8, SCC_C_MASK(%a1) + clrl SCC_C_PRES(%a1) + movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC + movew #2, parity_bytes(%d0) + bra open_port_2 + +open_port_parity_3: + cmpl #PARITY_CRC32_PR0_CCITT, STATUS_PARITY(%a0) + bne open_port_parity_4 + movew #0x0800, SCC_PSMR(%a2) // CRC32-CCITT preset 0 + movel #0xDEBB20E3, SCC_C_MASK(%a1) + clrl SCC_C_PRES(%a1) + movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC + movew #4, parity_bytes(%d0) + bra open_port_2 + +open_port_parity_4: + clrw SCC_PSMR(%a2) // no parity + movel #0xF0B8, SCC_C_MASK(%a1) + movel #0xFFFF, SCC_C_PRES(%a1) + movew #HDLC_MAX_MRU, SCC_MFLR(%a1) // 0 bytes for CRC + clrw parity_bytes(%d0) + +open_port_2: + movel #0x00000003, SCC_GSMR_H(%a2) // RTSM + cmpl #ENCODING_NRZI, STATUS_ENCODING(%a0) + bne open_port_nrz + movel #0x10040900, SCC_GSMR_L(%a2) // NRZI: TCI Tend RECN+TENC=1 + bra open_port_3 + +open_port_nrz: + movel #0x10040000, SCC_GSMR_L(%a2) // NRZ: TCI Tend RECN+TENC=0 +open_port_3: + movew #BUFFER_LENGTH, SCC_MRBLR(%a1) + movel %d0, %d1 + lsll #4, %d1 // D1 bits 7 and 6 = port + orl #1, %d1 + movew %d1, CR // Init SCC RX and TX params + wait_for_command + + // TCI Tend ENR ENT + movew #0x001F, SCC_SCCM(%a2) // TXE RXF BSY TXB RXB interrupts + orl #0x00000030, SCC_GSMR_L(%a2) // enable SCC +open_port_ret: + rts + + +/****************************** close port ****************************/ + +close_port: // D0 = 4 * port, D6 = doorbell to host + movel scc_reg_addr(%d0), %a0 // A0 = SCC_REGS address + clrw SCC_SCCM(%a0) // no SCC interrupts + andl #0xFFFFFFCF, SCC_GSMR_L(%a0) // Disable ENT and ENR + + andw #~STATUS_CABLE_DTR, csr_output(%d0) // DTR off + bsr check_csr // call with disabled timer interrupt + + movel ch_status_addr(%d0), %d1 + clrl STATUS_OPEN(%d1) // confirm the port is closed + rts + + +/****************************** transmit packet ***********************/ +// queue packets for transmission +tx: // D0 = 4 * port, D6 = doorbell to host + cmpl #TX_BUFFERS, tx_count(%d0) + beq tx_ret // all DB's = descs in use + + movel tx_out(%d0), %d1 + movel %d1, %d2 // D1 = D2 = tx_out BD# = desc# + mulul #DESC_LENGTH, %d2 // D2 = TX desc offset + addl ch_status_addr(%d0), %d2 + addl #STATUS_TX_DESCS, %d2 // D2 = TX desc address + cmpl #PACKET_FULL, (%d2) // desc status + bne tx_ret + +// queue it + movel 4(%d2), %a0 // PCI address + lsll #3, %d1 // BD is 8-bytes long + addl tx_first_bd(%d0), %d1 // D1 = current tx_out BD addr + + movel 4(%d1), %a1 // A1 = dest address + movel 8(%d2), %d2 // D2 = length + movew %d2, 2(%d1) // length into BD + memcpy_from_pci %a0, %a1, %d2 + bsetl #31, (%d1) // CP go ahead + +// update tx_out and tx_count + movel tx_out(%d0), %d1 + addl #1, %d1 + cmpl #TX_BUFFERS, %d1 + bne tx_1 + clrl %d1 +tx_1: movel %d1, tx_out(%d0) + + addl #1, tx_count(%d0) + bra tx + +tx_ret: rts + + +/****************************** packet received ***********************/ + +// Service receive buffers // D0 = 4 * port, D6 = doorbell to host +rx: movel rx_in(%d0), %d1 // D1 = rx_in BD# + lsll #3, %d1 // BD is 8-bytes long + addl rx_first_bd(%d0), %d1 // D1 = current rx_in BD address + movew (%d1), %d2 // D2 = RX BD flags + btstl #15, %d2 + bne rx_ret // BD still empty + + btstl #1, %d2 + bne rx_overrun + + tstw parity_bytes(%d0) + bne rx_parity + bclrl #2, %d2 // do not test for CRC errors +rx_parity: + andw #0x0CBC, %d2 // mask status bits + cmpw #0x0C00, %d2 // correct frame + bne rx_bad_frame + clrl %d3 + movew 2(%d1), %d3 + subw parity_bytes(%d0), %d3 // D3 = packet length + cmpw #HDLC_MAX_MRU, %d3 + bgt rx_bad_frame + +rx_good_frame: + movel rx_out, %d2 + mulul #DESC_LENGTH, %d2 + addl rx_descs_addr, %d2 // D2 = RX desc address + cmpl #PACKET_EMPTY, (%d2) // desc stat + bne rx_overrun + + movel %d3, 8(%d2) + movel 4(%d1), %a0 // A0 = source address + movel 4(%d2), %a1 + tstl %a1 + beq rx_ignore_data + memcpy_to_pci %a0, %a1, %d3 +rx_ignore_data: + movel packet_full(%d0), (%d2) // update desc stat + +// update D6 and rx_out + bsetl #DOORBELL_FROM_CARD_RX, %d6 // signal host that RX completed + movel rx_out, %d2 + addl #1, %d2 + cmpl #RX_QUEUE_LENGTH, %d2 + bne rx_1 + clrl %d2 +rx_1: movel %d2, rx_out + +rx_free_bd: + andw #0xF000, (%d1) // clear CM and error bits + bsetl #31, (%d1) // free BD +// update rx_in + movel rx_in(%d0), %d1 + addl #1, %d1 + cmpl #RX_BUFFERS, %d1 + bne rx_2 + clrl %d1 +rx_2: movel %d1, rx_in(%d0) + bra rx + +rx_overrun: + movel ch_status_addr(%d0), %d2 + addl #1, STATUS_RX_OVERRUNS(%d2) + bra rx_free_bd + +rx_bad_frame: + movel ch_status_addr(%d0), %d2 + addl #1, STATUS_RX_FRAME_ERRORS(%d2) + bra rx_free_bd + +rx_ret: rts + + +/****************************** packet transmitted ********************/ + +// Service transmit buffers // D0 = 4 * port, D6 = doorbell to host +tx_end: tstl tx_count(%d0) + beq tx_end_ret // TX buffers already empty + + movel tx_in(%d0), %d1 + movel %d1, %d2 // D1 = D2 = tx_in BD# = desc# + lsll #3, %d1 // BD is 8-bytes long + addl tx_first_bd(%d0), %d1 // D1 = current tx_in BD address + movew (%d1), %d3 // D3 = TX BD flags + btstl #15, %d3 + bne tx_end_ret // BD still being transmitted + +// update D6, tx_in and tx_count + orl bell_tx(%d0), %d6 // signal host that TX desc freed + subl #1, tx_count(%d0) + movel tx_in(%d0), %d1 + addl #1, %d1 + cmpl #TX_BUFFERS, %d1 + bne tx_end_1 + clrl %d1 +tx_end_1: + movel %d1, tx_in(%d0) + +// free host's descriptor + mulul #DESC_LENGTH, %d2 // D2 = TX desc offset + addl ch_status_addr(%d0), %d2 + addl #STATUS_TX_DESCS, %d2 // D2 = TX desc address + btstl #1, %d3 + bne tx_end_underrun + movel #PACKET_SENT, (%d2) + bra tx_end + +tx_end_underrun: + movel #PACKET_UNDERRUN, (%d2) + bra tx_end + +tx_end_ret: rts + + +/****************************** PLX PCI9060 DMA memcpy ****************/ + +#if QUICC_MEMCPY_USES_PLX +// called with interrupts disabled +memcpy_from_pci_run: + movel %d0, -(%sp) + movew %sr, -(%sp) +memcpy_1: + movel PLX_DMA_CMD_STS, %d0 // do not btst PLX register directly + btstl #4, %d0 // transfer done? + bne memcpy_end + stop #0x2200 // enable PCI9060 interrupts + movew #0x2700, %sr // disable interrupts again + bra memcpy_1 + +memcpy_to_pci_run: + movel %d0, -(%sp) + movew %sr, -(%sp) +memcpy_2: + movel PLX_DMA_CMD_STS, %d0 // do not btst PLX register directly + btstl #12, %d0 // transfer done? + bne memcpy_end + stop #0x2200 // enable PCI9060 interrupts + movew #0x2700, %sr // disable interrupts again + bra memcpy_2 + +memcpy_end: + movew (%sp)+, %sr + movel (%sp)+, %d0 + rts +#endif + + + + + + +/****************************** PLX PCI9060 interrupt *****************/ + +pci9060_interrupt: + movel %d0, -(%sp) + + movel PLX_DOORBELL_TO_CARD, %d0 + movel %d0, PLX_DOORBELL_TO_CARD // confirm all requests + orl %d0, channel_stats + + movel #0x0909, PLX_DMA_CMD_STS // clear DMA ch #0 and #1 interrupts + + movel (%sp)+, %d0 + rte + +/****************************** SCC interrupts ************************/ + +port_interrupt_1: + orl #0, SCC1_REGS + SCC_SCCE; // confirm SCC events + orl #1 << TASK_SCC_0, channel_stats + movel #0x40000000, CISR + rte + +port_interrupt_2: + orl #0, SCC2_REGS + SCC_SCCE; // confirm SCC events + orl #1 << TASK_SCC_1, channel_stats + movel #0x20000000, CISR + rte + +port_interrupt_3: + orl #0, SCC3_REGS + SCC_SCCE; // confirm SCC events + orl #1 << TASK_SCC_2, channel_stats + movel #0x10000000, CISR + rte + +port_interrupt_4: + orl #0, SCC4_REGS + SCC_SCCE; // confirm SCC events + orl #1 << TASK_SCC_3, channel_stats + movel #0x08000000, CISR + rte + +error_interrupt: + rte + + +/****************************** cable and PM routine ******************/ +// modified registers: none +check_csr: + movel %d0, -(%sp) + movel %d1, -(%sp) + movel %d2, -(%sp) + movel %a0, -(%sp) + movel %a1, -(%sp) + + clrl %d0 // D0 = 4 * port + movel #CSRA, %a0 // A0 = CSR address + +check_csr_loop: + movew (%a0), %d1 // D1 = CSR input bits + andl #0xE7, %d1 // PM and cable sense bits (no DCE bit) + cmpw #STATUS_CABLE_V35 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1 + bne check_csr_1 + movew #0x0E08, %d1 + bra check_csr_valid + +check_csr_1: + cmpw #STATUS_CABLE_X21 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1 + bne check_csr_2 + movew #0x0408, %d1 + bra check_csr_valid + +check_csr_2: + cmpw #STATUS_CABLE_V24 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1 + bne check_csr_3 + movew #0x0208, %d1 + bra check_csr_valid + +check_csr_3: + cmpw #STATUS_CABLE_EIA530 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1 + bne check_csr_disable + movew #0x0D08, %d1 + bra check_csr_valid + +check_csr_disable: + movew #0x0008, %d1 // D1 = disable everything + movew #0x80E7, %d2 // D2 = input mask: ignore DSR + bra check_csr_write + +check_csr_valid: // D1 = mode and IRQ bits + movew csr_output(%d0), %d2 + andw #0x3000, %d2 // D2 = requested LL and DTR bits + orw %d2, %d1 // D1 = all requested output bits + movew #0x80FF, %d2 // D2 = input mask: include DSR + +check_csr_write: + cmpw old_csr_output(%d0), %d1 + beq check_csr_input + movew %d1, old_csr_output(%d0) + movew %d1, (%a0) // Write CSR output bits + +check_csr_input: + movew (PCDAT), %d1 + andw dcd_mask(%d0), %d1 + beq check_csr_dcd_on // DCD and CTS signals are negated + movew (%a0), %d1 // D1 = CSR input bits + andw #~STATUS_CABLE_DCD, %d1 // DCD off + bra check_csr_previous + +check_csr_dcd_on: + movew (%a0), %d1 // D1 = CSR input bits + orw #STATUS_CABLE_DCD, %d1 // DCD on +check_csr_previous: + andw %d2, %d1 // input mask + movel ch_status_addr(%d0), %a1 + cmpl STATUS_CABLE(%a1), %d1 // check for change + beq check_csr_next + movel %d1, STATUS_CABLE(%a1) // update status + movel bell_cable(%d0), PLX_DOORBELL_FROM_CARD // signal the host + +check_csr_next: + addl #2, %a0 // next CSR register + addl #4, %d0 // D0 = 4 * next port + cmpl #4 * 4, %d0 + bne check_csr_loop + + movel (%sp)+, %a1 + movel (%sp)+, %a0 + movel (%sp)+, %d2 + movel (%sp)+, %d1 + movel (%sp)+, %d0 + rts + + +/****************************** timer interrupt ***********************/ + +timer_interrupt: + bsr check_csr + rte + + +/****************************** RAM sizing and test *******************/ +#if DETECT_RAM +ram_test: + movel #0x12345678, %d1 // D1 = test value + movel %d1, (128 * 1024 - 4) + movel #128 * 1024, %d0 // D0 = RAM size tested +ram_test_size: + cmpl #MAX_RAM_SIZE, %d0 + beq ram_test_size_found + movel %d0, %a0 + addl #128 * 1024 - 4, %a0 + cmpl (%a0), %d1 + beq ram_test_size_check +ram_test_next_size: + lsll #1, %d0 + bra ram_test_size + +ram_test_size_check: + eorl #0xFFFFFFFF, %d1 + movel %d1, (128 * 1024 - 4) + cmpl (%a0), %d1 + bne ram_test_next_size + +ram_test_size_found: // D0 = RAM size + movel %d0, %a0 // A0 = fill ptr + subl #firmware_end + 4, %d0 + lsrl #2, %d0 + movel %d0, %d1 // D1 = DBf counter +ram_test_fill: + movel %a0, -(%a0) + dbfw %d1, ram_test_fill + subl #0x10000, %d1 + cmpl #0xFFFFFFFF, %d1 + bne ram_test_fill + +ram_test_loop: // D0 = DBf counter + cmpl (%a0)+, %a0 + dbnew %d0, ram_test_loop + bne ram_test_found_bad + subl #0x10000, %d0 + cmpl #0xFFFFFFFF, %d0 + bne ram_test_loop + bra ram_test_all_ok + +ram_test_found_bad: + subl #4, %a0 +ram_test_all_ok: + movel %a0, PLX_MAILBOX_5 + rts +#endif + + +/****************************** constants *****************************/ + +scc_reg_addr: + .long SCC1_REGS, SCC2_REGS, SCC3_REGS, SCC4_REGS +scc_base_addr: + .long SCC1_BASE, SCC2_BASE, SCC3_BASE, SCC4_BASE + +tx_first_bd: + .long DPRBASE + .long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 + .long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 2 + .long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 3 + +rx_first_bd: + .long DPRBASE + TX_BUFFERS * 8 + .long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 + .long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 2 + .long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 3 + +first_buffer: + .long BUFFERS_ADDR + .long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH + .long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 2 + .long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 3 + +bell_tx: + .long 1 << DOORBELL_FROM_CARD_TX_0, 1 << DOORBELL_FROM_CARD_TX_1 + .long 1 << DOORBELL_FROM_CARD_TX_2, 1 << DOORBELL_FROM_CARD_TX_3 + +bell_cable: + .long 1 << DOORBELL_FROM_CARD_CABLE_0, 1 << DOORBELL_FROM_CARD_CABLE_1 + .long 1 << DOORBELL_FROM_CARD_CABLE_2, 1 << DOORBELL_FROM_CARD_CABLE_3 + +packet_full: + .long PACKET_FULL, PACKET_FULL + 1, PACKET_FULL + 2, PACKET_FULL + 3 + +clocking_ext: + .long 0x0000002C, 0x00003E00, 0x002C0000, 0x3E000000 +clocking_txfromrx: + .long 0x0000002D, 0x00003F00, 0x002D0000, 0x3F000000 +clocking_mask: + .long 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000 +dcd_mask: + .word 0x020, 0, 0x080, 0, 0x200, 0, 0x800 + + .ascii "wanXL firmware\n" + .asciz "Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>\n" + + +/****************************** variables *****************************/ + + .align 4 +channel_stats: .long 0 + +tx_in: .long 0, 0, 0, 0 // transmitted +tx_out: .long 0, 0, 0, 0 // received from host for transmission +tx_count: .long 0, 0, 0, 0 // currently in transmit queue + +rx_in: .long 0, 0, 0, 0 // received from port +rx_out: .long 0 // transmitted to host +parity_bytes: .word 0, 0, 0, 0, 0, 0, 0 // only 4 words are used + +csr_output: .word 0 +old_csr_output: .word 0, 0, 0, 0, 0, 0, 0 + .align 4 +firmware_end: // must be dword-aligned diff --git a/drivers/net/wan/wanxlfw.inc_shipped b/drivers/net/wan/wanxlfw.inc_shipped new file mode 100644 index 000000000..73da688f9 --- /dev/null +++ b/drivers/net/wan/wanxlfw.inc_shipped @@ -0,0 +1,158 @@ +static u8 firmware[]={ +0x60,0x00,0x00,0x16,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0xB9,0x40,0x00,0x00,0x00,0x00,0x00, +0x10,0x14,0x42,0x80,0x4A,0xB0,0x09,0xB0,0x00,0x00,0x10,0x04,0x67,0x00,0x00,0x0E, +0x06,0xB0,0x40,0x00,0x00,0x00,0x09,0xB0,0x00,0x00,0x10,0x04,0x58,0x80,0x0C,0x80, +0x00,0x00,0x00,0x10,0x66,0x00,0xFF,0xDE,0x21,0xFC,0x00,0x00,0x16,0xBC,0x00,0x6C, +0x21,0xFC,0x00,0x00,0x17,0x5E,0x01,0x00,0x21,0xFC,0x00,0x00,0x16,0xDE,0x01,0x78, +0x21,0xFC,0x00,0x00,0x16,0xFE,0x01,0x74,0x21,0xFC,0x00,0x00,0x17,0x1E,0x01,0x70, +0x21,0xFC,0x00,0x00,0x17,0x3E,0x01,0x6C,0x21,0xFC,0x00,0x00,0x18,0x4C,0x02,0x00, +0x23,0xFC,0x78,0x00,0x00,0x00,0xFF,0xFC,0x15,0x48,0x33,0xFC,0x04,0x80,0xFF,0xFC, +0x10,0x26,0x33,0xFC,0x01,0x10,0xFF,0xFC,0x10,0x2A,0x23,0xFC,0x00,0xD4,0x9F,0x40, +0xFF,0xFC,0x15,0x40,0x23,0xFC,0x00,0x00,0x05,0x43,0xFF,0xF9,0x01,0x00,0x23,0xFC, +0x00,0x00,0x05,0x43,0xFF,0xF9,0x01,0x14,0x23,0xFC,0x00,0x00,0x00,0x00,0xFF,0xF9, +0x01,0x10,0x23,0xFC,0x00,0x00,0x00,0x08,0xFF,0xF9,0x01,0x24,0x23,0xFC,0x00,0x00, +0x01,0x01,0xFF,0xF9,0x01,0x28,0x00,0xB9,0x00,0x0F,0x03,0x00,0xFF,0xF9,0x00,0xE8, +0x23,0xFC,0x00,0x00,0x00,0x01,0xFF,0xF9,0x00,0xD4,0x61,0x00,0x06,0x74,0x33,0xFC, +0xFF,0xFF,0xFF,0xFC,0x15,0x52,0x42,0x79,0xFF,0xFC,0x15,0x50,0x42,0x79,0xFF,0xFC, +0x15,0x64,0x2E,0x3A,0x08,0x50,0x42,0xB9,0x00,0x00,0x19,0x54,0x4A,0x87,0x66,0x00, +0x00,0x0E,0x4E,0x72,0x22,0x00,0x46,0xFC,0x27,0x00,0x60,0x00,0xFF,0xE6,0x42,0x80, +0x42,0x86,0x08,0x07,0x00,0x04,0x67,0x00,0x00,0x0A,0x08,0x87,0x00,0x00,0x61,0x00, +0x02,0xA0,0x08,0x07,0x00,0x00,0x67,0x00,0x00,0x06,0x61,0x00,0x00,0x36,0x08,0x07, +0x00,0x08,0x67,0x00,0x00,0x06,0x61,0x00,0x02,0xB8,0x08,0x07,0x00,0x0C,0x67,0x00, +0x00,0x0A,0x61,0x00,0x04,0x94,0x61,0x00,0x03,0x60,0xE2,0x8F,0x58,0x80,0x0C,0x80, +0x00,0x00,0x00,0x10,0x66,0x00,0xFF,0xBC,0x23,0xC6,0xFF,0xF9,0x00,0xE4,0x60,0x00, +0xFF,0x92,0x20,0x70,0x09,0xB0,0x00,0x00,0x10,0x04,0x4A,0xA8,0x00,0x00,0x66,0x00, +0x02,0x4E,0x21,0x7C,0x00,0x00,0x00,0x01,0x00,0x00,0x42,0xB0,0x09,0xB0,0x00,0x00, +0x19,0x58,0x42,0xB0,0x09,0xB0,0x00,0x00,0x19,0x68,0x42,0xB0,0x09,0xB0,0x00,0x00, +0x19,0x78,0x42,0xB0,0x09,0xB0,0x00,0x00,0x19,0x88,0x22,0x39,0xFF,0xFC,0x16,0xEC, +0xC2,0xB0,0x09,0xB0,0x00,0x00,0x18,0xF2,0x0C,0xA8,0x00,0x00,0x00,0x04,0x00,0x18, +0x66,0x00,0x00,0x0E,0x82,0xB0,0x09,0xB0,0x00,0x00,0x18,0xE2,0x60,0x00,0x00,0x0A, +0x82,0xB0,0x09,0xB0,0x00,0x00,0x18,0xD2,0x23,0xC1,0xFF,0xFC,0x16,0xEC,0x00,0x70, +0x10,0x00,0x09,0xB0,0x00,0x00,0x19,0xAA,0x61,0x00,0x05,0x76,0x22,0x30,0x09,0xB0, +0x00,0x00,0x18,0x92,0x22,0x70,0x09,0xB0,0x00,0x00,0x18,0x72,0x74,0x08,0x26,0x3C, +0x18,0x00,0x00,0x00,0x0C,0xA8,0x00,0x00,0x00,0x01,0x00,0x10,0x67,0x00,0x00,0x06, +0x08,0xC3,0x00,0x1A,0x22,0xC3,0x22,0xC1,0x06,0x81,0x00,0x00,0x05,0xFC,0x51,0xCA, +0xFF,0xF4,0x08,0xC3,0x00,0x1D,0x22,0xC3,0x22,0xC1,0x74,0x1C,0x22,0xFC,0x90,0x00, +0x00,0x00,0x22,0xC1,0x06,0x81,0x00,0x00,0x05,0xFC,0x51,0xCA,0xFF,0xF0,0x22,0xFC, +0xB0,0x00,0x00,0x00,0x22,0xC1,0x22,0x70,0x09,0xB0,0x00,0x00,0x18,0x62,0x24,0x70, +0x09,0xB0,0x00,0x00,0x18,0x52,0x25,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x10,0x25,0x7C, +0x00,0x00,0x00,0x00,0x00,0x14,0x22,0x30,0x09,0xB0,0x00,0x00,0x18,0x72,0x33,0x41, +0x00,0x02,0x06,0x81,0x00,0x00,0x00,0x50,0x33,0x41,0x00,0x00,0x13,0x7C,0x00,0x08, +0x00,0x04,0x13,0x7C,0x00,0x08,0x00,0x05,0x0C,0xA8,0x00,0x00,0x00,0x05,0x00,0x10, +0x66,0x00,0x00,0x2A,0x42,0x6A,0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34, +0x23,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x38,0x33,0x7C,0x05,0xFA,0x00,0x46,0x31,0xBC, +0x00,0x02,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00,0x00,0xBC,0x0C,0xA8,0x00,0x00, +0x00,0x07,0x00,0x10,0x66,0x00,0x00,0x2C,0x35,0x7C,0x08,0x00,0x00,0x08,0x23,0x7C, +0xDE,0xBB,0x20,0xE3,0x00,0x34,0x23,0x7C,0xFF,0xFF,0xFF,0xFF,0x00,0x38,0x33,0x7C, +0x05,0xFC,0x00,0x46,0x31,0xBC,0x00,0x04,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00, +0x00,0x86,0x0C,0xA8,0x00,0x00,0x00,0x04,0x00,0x10,0x66,0x00,0x00,0x26,0x42,0x6A, +0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34,0x42,0xA9,0x00,0x38,0x33,0x7C, +0x05,0xFA,0x00,0x46,0x31,0xBC,0x00,0x02,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00, +0x00,0x56,0x0C,0xA8,0x00,0x00,0x00,0x06,0x00,0x10,0x66,0x00,0x00,0x28,0x35,0x7C, +0x08,0x00,0x00,0x08,0x23,0x7C,0xDE,0xBB,0x20,0xE3,0x00,0x34,0x42,0xA9,0x00,0x38, +0x33,0x7C,0x05,0xFC,0x00,0x46,0x31,0xBC,0x00,0x04,0x09,0xB0,0x00,0x00,0x19,0x9C, +0x60,0x00,0x00,0x24,0x42,0x6A,0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34, +0x23,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x38,0x33,0x7C,0x05,0xF8,0x00,0x46,0x42,0x70, +0x09,0xB0,0x00,0x00,0x19,0x9C,0x25,0x7C,0x00,0x00,0x00,0x03,0x00,0x04,0x0C,0xA8, +0x00,0x00,0x00,0x02,0x00,0x14,0x66,0x00,0x00,0x0E,0x25,0x7C,0x10,0x04,0x09,0x00, +0x00,0x00,0x60,0x00,0x00,0x0A,0x25,0x7C,0x10,0x04,0x00,0x00,0x00,0x00,0x33,0x7C, +0x05,0xFC,0x00,0x06,0x22,0x00,0xE9,0x89,0x00,0x81,0x00,0x00,0x00,0x01,0x33,0xC1, +0xFF,0xFC,0x15,0xC0,0x08,0x39,0x00,0x00,0xFF,0xFC,0x15,0xC0,0x66,0x00,0xFF,0xF6, +0x35,0x7C,0x00,0x1F,0x00,0x14,0x00,0xAA,0x00,0x00,0x00,0x30,0x00,0x00,0x4E,0x75, +0x20,0x70,0x09,0xB0,0x00,0x00,0x18,0x52,0x42,0x68,0x00,0x14,0x02,0xA8,0xFF,0xFF, +0xFF,0xCF,0x00,0x00,0x02,0x70,0xEF,0xFF,0x09,0xB0,0x00,0x00,0x19,0xAA,0x61,0x00, +0x03,0x70,0x22,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x42,0xB0,0x19,0x90,0x4E,0x75, +0x0C,0xB0,0x00,0x00,0x00,0x0A,0x09,0xB0,0x00,0x00,0x19,0x78,0x67,0x00,0x00,0xA8, +0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x68,0x24,0x01,0x4C,0x3C,0x20,0x00,0x00,0x00, +0x00,0x0C,0xD4,0xB0,0x09,0xB0,0x00,0x00,0x10,0x04,0x06,0x82,0x00,0x00,0x00,0x1C, +0x0C,0xB0,0x00,0x00,0x00,0x10,0x29,0x90,0x66,0x00,0x00,0x7C,0x20,0x70,0x29,0xA0, +0x00,0x04,0xE7,0x89,0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x72,0x22,0x70,0x19,0xA0, +0x00,0x04,0x24,0x30,0x29,0xA0,0x00,0x08,0x31,0x82,0x19,0xA0,0x00,0x02,0x56,0x82, +0x02,0x82,0xFF,0xFF,0xFF,0xFC,0x23,0xC8,0xFF,0xF9,0x01,0x04,0x23,0xC9,0xFF,0xF9, +0x01,0x08,0x23,0xC2,0xFF,0xF9,0x01,0x0C,0x23,0xFC,0x00,0x00,0x01,0x03,0xFF,0xF9, +0x01,0x28,0x61,0x00,0x01,0xF6,0x08,0xF0,0x00,0x1F,0x19,0x90,0x22,0x30,0x09,0xB0, +0x00,0x00,0x19,0x68,0x52,0x81,0x0C,0x81,0x00,0x00,0x00,0x0A,0x66,0x00,0x00,0x04, +0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,0x19,0x68,0x52,0xB0,0x09,0xB0,0x00,0x00, +0x19,0x78,0x60,0x00,0xFF,0x4C,0x4E,0x75,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x88, +0xE7,0x89,0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x82,0x34,0x30,0x19,0x90,0x08,0x02, +0x00,0x0F,0x66,0x00,0x01,0x12,0x08,0x02,0x00,0x01,0x66,0x00,0x00,0xE6,0x4A,0x70, +0x09,0xB0,0x00,0x00,0x19,0x9C,0x66,0x00,0x00,0x06,0x08,0x82,0x00,0x02,0x02,0x42, +0x0C,0xBC,0x0C,0x42,0x0C,0x00,0x66,0x00,0x00,0xDC,0x42,0x83,0x36,0x30,0x19,0xA0, +0x00,0x02,0x96,0x70,0x09,0xB0,0x00,0x00,0x19,0x9C,0x0C,0x43,0x05,0xF8,0x6E,0x00, +0x00,0xC4,0x24,0x3A,0x04,0x84,0x4C,0x3C,0x20,0x00,0x00,0x00,0x00,0x0C,0xD4,0xBA, +0xFA,0xF4,0x0C,0xB0,0x00,0x00,0x00,0x00,0x29,0x90,0x66,0x00,0x00,0x96,0x21,0x83, +0x29,0xA0,0x00,0x08,0x20,0x70,0x19,0xA0,0x00,0x04,0x22,0x70,0x29,0xA0,0x00,0x04, +0x4A,0x89,0x67,0x00,0x00,0x2A,0x56,0x83,0x02,0x83,0xFF,0xFF,0xFF,0xFC,0x23,0xC8, +0xFF,0xF9,0x01,0x1C,0x23,0xC9,0xFF,0xF9,0x01,0x18,0x23,0xC3,0xFF,0xF9,0x01,0x20, +0x23,0xFC,0x00,0x00,0x03,0x01,0xFF,0xF9,0x01,0x28,0x61,0x00,0x01,0x2C,0x21,0xB0, +0x09,0xB0,0x00,0x00,0x18,0xC2,0x29,0x90,0x08,0xC6,0x00,0x04,0x24,0x3A,0x04,0x1A, +0x52,0x82,0x0C,0x82,0x00,0x00,0x00,0x28,0x66,0x00,0x00,0x04,0x42,0x82,0x23,0xC2, +0x00,0x00,0x19,0x98,0x02,0x70,0xF0,0x00,0x19,0x90,0x08,0xF0,0x00,0x1F,0x19,0x90, +0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x88,0x52,0x81,0x0C,0x81,0x00,0x00,0x00,0x1E, +0x66,0x00,0x00,0x04,0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,0x19,0x88,0x60,0x00, +0xFE,0xF8,0x24,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x52,0xB0,0x29,0xA0,0x00,0x08, +0x60,0x00,0xFF,0xC2,0x24,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x52,0xB0,0x29,0xA0, +0x00,0x0C,0x60,0x00,0xFF,0xB0,0x4E,0x75,0x4A,0xB0,0x09,0xB0,0x00,0x00,0x19,0x78, +0x67,0x00,0x00,0x86,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x58,0x24,0x01,0xE7,0x89, +0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x72,0x36,0x30,0x19,0x90,0x08,0x03,0x00,0x0F, +0x66,0x00,0x00,0x66,0x8C,0xB0,0x09,0xB0,0x00,0x00,0x18,0xA2,0x53,0xB0,0x09,0xB0, +0x00,0x00,0x19,0x78,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x58,0x52,0x81,0x0C,0x81, +0x00,0x00,0x00,0x0A,0x66,0x00,0x00,0x04,0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00, +0x19,0x58,0x4C,0x3C,0x20,0x00,0x00,0x00,0x00,0x0C,0xD4,0xB0,0x09,0xB0,0x00,0x00, +0x10,0x04,0x06,0x82,0x00,0x00,0x00,0x1C,0x08,0x03,0x00,0x01,0x66,0x00,0x00,0x0E, +0x21,0xBC,0x00,0x00,0x00,0x20,0x29,0x90,0x60,0x00,0xFF,0x7E,0x21,0xBC,0x00,0x00, +0x00,0x30,0x29,0x90,0x60,0x00,0xFF,0x72,0x4E,0x75,0x2F,0x00,0x40,0xE7,0x20,0x39, +0xFF,0xF9,0x01,0x28,0x08,0x00,0x00,0x04,0x66,0x00,0x00,0x2C,0x4E,0x72,0x22,0x00, +0x46,0xFC,0x27,0x00,0x60,0x00,0xFF,0xE8,0x2F,0x00,0x40,0xE7,0x20,0x39,0xFF,0xF9, +0x01,0x28,0x08,0x00,0x00,0x0C,0x66,0x00,0x00,0x0E,0x4E,0x72,0x22,0x00,0x46,0xFC, +0x27,0x00,0x60,0x00,0xFF,0xE8,0x46,0xDF,0x20,0x1F,0x4E,0x75,0x2F,0x00,0x20,0x39, +0xFF,0xF9,0x00,0xE0,0x23,0xC0,0xFF,0xF9,0x00,0xE0,0x81,0xB9,0x00,0x00,0x19,0x54, +0x23,0xFC,0x00,0x00,0x09,0x09,0xFF,0xF9,0x01,0x28,0x20,0x1F,0x4E,0x73,0x00,0xB9, +0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x10,0x00,0xB9,0x00,0x00,0x10,0x00,0x00,0x00, +0x19,0x54,0x23,0xFC,0x40,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9, +0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x30,0x00,0xB9,0x00,0x00,0x20,0x00,0x00,0x00, +0x19,0x54,0x23,0xFC,0x20,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9, +0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x50,0x00,0xB9,0x00,0x00,0x40,0x00,0x00,0x00, +0x19,0x54,0x23,0xFC,0x10,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9, +0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x70,0x00,0xB9,0x00,0x00,0x80,0x00,0x00,0x00, +0x19,0x54,0x23,0xFC,0x08,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x4E,0x73, +0x2F,0x00,0x2F,0x01,0x2F,0x02,0x2F,0x08,0x2F,0x09,0x42,0x80,0x20,0x7C,0xFF,0xFB, +0x00,0x00,0x32,0x10,0x02,0x81,0x00,0x00,0x00,0xE7,0x0C,0x41,0x00,0x42,0x66,0x00, +0x00,0x0A,0x32,0x3C,0x0E,0x08,0x60,0x00,0x00,0x3E,0x0C,0x41,0x00,0x63,0x66,0x00, +0x00,0x0A,0x32,0x3C,0x04,0x08,0x60,0x00,0x00,0x2E,0x0C,0x41,0x00,0x84,0x66,0x00, +0x00,0x0A,0x32,0x3C,0x02,0x08,0x60,0x00,0x00,0x1E,0x0C,0x41,0x00,0xA5,0x66,0x00, +0x00,0x0A,0x32,0x3C,0x0D,0x08,0x60,0x00,0x00,0x0E,0x32,0x3C,0x00,0x08,0x34,0x3C, +0x80,0xE7,0x60,0x00,0x00,0x14,0x34,0x30,0x09,0xB0,0x00,0x00,0x19,0xAA,0x02,0x42, +0x30,0x00,0x82,0x42,0x34,0x3C,0x80,0xFF,0xB2,0x70,0x09,0xB0,0x00,0x00,0x19,0xAC, +0x67,0x00,0x00,0x0C,0x31,0x81,0x09,0xB0,0x00,0x00,0x19,0xAC,0x30,0x81,0x32,0x39, +0xFF,0xFC,0x15,0x66,0xC2,0x70,0x09,0xB0,0x00,0x00,0x19,0x02,0x67,0x00,0x00,0x0C, +0x32,0x10,0x02,0x41,0xFF,0xF7,0x60,0x00,0x00,0x08,0x32,0x10,0x00,0x41,0x00,0x08, +0xC2,0x42,0x22,0x70,0x09,0xB0,0x00,0x00,0x10,0x04,0xB2,0xA9,0x00,0x04,0x67,0x00, +0x00,0x12,0x23,0x41,0x00,0x04,0x23,0xF0,0x09,0xB0,0x00,0x00,0x18,0xB2,0xFF,0xF9, +0x00,0xE4,0x54,0x88,0x58,0x80,0x0C,0x80,0x00,0x00,0x00,0x10,0x66,0x00,0xFF,0x34, +0x22,0x5F,0x20,0x5F,0x24,0x1F,0x22,0x1F,0x20,0x1F,0x4E,0x75,0x61,0x00,0xFF,0x12, +0x4E,0x73,0xFF,0xFC,0x16,0x00,0xFF,0xFC,0x16,0x20,0xFF,0xFC,0x16,0x40,0xFF,0xFC, +0x16,0x60,0xFF,0xFC,0x0C,0x00,0xFF,0xFC,0x0D,0x00,0xFF,0xFC,0x0E,0x00,0xFF,0xFC, +0x0F,0x00,0xFF,0xFC,0x00,0x00,0xFF,0xFC,0x01,0x40,0xFF,0xFC,0x02,0x80,0xFF,0xFC, +0x03,0xC0,0xFF,0xFC,0x00,0x50,0xFF,0xFC,0x01,0x90,0xFF,0xFC,0x02,0xD0,0xFF,0xFC, +0x04,0x10,0x00,0x00,0x40,0x00,0x00,0x01,0x2F,0x60,0x00,0x02,0x1E,0xC0,0x00,0x03, +0x0E,0x20,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x04,0x00,0x00, +0x00,0x08,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x80,0x00,0x00, +0x01,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x12,0x00,0x00, +0x00,0x13,0x00,0x00,0x00,0x2C,0x00,0x00,0x3E,0x00,0x00,0x2C,0x00,0x00,0x3E,0x00, +0x00,0x00,0x00,0x00,0x00,0x2D,0x00,0x00,0x3F,0x00,0x00,0x2D,0x00,0x00,0x3F,0x00, +0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00, +0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x80,0x00,0x00,0x02,0x00,0x00,0x00,0x08,0x00, +0x77,0x61,0x6E,0x58,0x4C,0x20,0x66,0x69,0x72,0x6D,0x77,0x61,0x72,0x65,0x0A,0x43, +0x6F,0x70,0x79,0x72,0x69,0x67,0x68,0x74,0x20,0x28,0x43,0x29,0x20,0x32,0x30,0x30, +0x33,0x20,0x4B,0x72,0x7A,0x79,0x73,0x7A,0x74,0x6F,0x66,0x20,0x48,0x61,0x6C,0x61, +0x73,0x61,0x20,0x3C,0x6B,0x68,0x63,0x40,0x70,0x6D,0x2E,0x77,0x61,0x77,0x2E,0x70, +0x6C,0x3E,0x0A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 +}; |