diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/microchip | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/microchip')
61 files changed, 37090 insertions, 0 deletions
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig new file mode 100644 index 000000000..ed7a35c3c --- /dev/null +++ b/drivers/net/ethernet/microchip/Kconfig @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Microchip network device configuration +# + +config NET_VENDOR_MICROCHIP + bool "Microchip devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Microchip cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_MICROCHIP + +config ENC28J60 + tristate "ENC28J60 support" + depends on SPI + select CRC32 + help + Support for the Microchip EN28J60 ethernet chip. + + To compile this driver as a module, choose M here. The module will be + called enc28j60. + +config ENC28J60_WRITEVERIFY + bool "Enable write verify" + depends on ENC28J60 + help + Enable the verify after the buffer write useful for debugging purpose. + If unsure, say N. + +config ENCX24J600 + tristate "ENCX24J600 support" + depends on SPI + help + Support for the Microchip ENC424J600/624J600 ethernet chip. + + To compile this driver as a module, choose M here. The module will be + called encx24j600. + +config LAN743X + tristate "LAN743x support" + depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL + select PHYLIB + select CRC16 + select CRC32 + help + Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip + + To compile this driver as a module, choose M here. The module will be + called lan743x. + +source "drivers/net/ethernet/microchip/lan966x/Kconfig" +source "drivers/net/ethernet/microchip/sparx5/Kconfig" + +endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile new file mode 100644 index 000000000..9faa41436 --- /dev/null +++ b/drivers/net/ethernet/microchip/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Microchip network device drivers. +# + +obj-$(CONFIG_ENC28J60) += enc28j60.o +obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o +obj-$(CONFIG_LAN743X) += lan743x.o + +lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o + +obj-$(CONFIG_LAN966X_SWITCH) += lan966x/ +obj-$(CONFIG_SPARX5_SWITCH) += sparx5/ diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c new file mode 100644 index 000000000..176efbeae --- /dev/null +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -0,0 +1,1645 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Microchip ENC28J60 ethernet driver (MAC + PHY) + * + * Copyright (C) 2007 Eurek srl + * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com> + * based on enc28j60.c written by David Anders for 2.4 kernel version + * + * $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $ + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/interrupt.h> +#include <linux/property.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> + +#include "enc28j60_hw.h" + +#define DRV_NAME "enc28j60" +#define DRV_VERSION "1.02" + +#define SPI_OPLEN 1 + +#define ENC28J60_MSG_DEFAULT \ + (NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK) + +/* Buffer size required for the largest SPI transfer (i.e., reading a + * frame). + */ +#define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN) + +#define TX_TIMEOUT (4 * HZ) + +/* Max TX retries in case of collision as suggested by errata datasheet */ +#define MAX_TX_RETRYCOUNT 16 + +enum { + RXFILTER_NORMAL, + RXFILTER_MULTI, + RXFILTER_PROMISC +}; + +/* Driver local data */ +struct enc28j60_net { + struct net_device *netdev; + struct spi_device *spi; + struct mutex lock; + struct sk_buff *tx_skb; + struct work_struct tx_work; + struct work_struct irq_work; + struct work_struct setrx_work; + struct work_struct restart_work; + u8 bank; /* current register bank selected */ + u16 next_pk_ptr; /* next packet pointer within FIFO */ + u16 max_pk_counter; /* statistics: max packet counter */ + u16 tx_retry_count; + bool hw_enable; + bool full_duplex; + int rxfilter; + u32 msg_enable; + u8 spi_transfer_buf[SPI_TRANSFER_BUF_LEN]; +}; + +/* use ethtool to change the level for any given device */ +static struct { + u32 msg_enable; +} debug = { -1 }; + +/* + * SPI read buffer + * Wait for the SPI transfer and copy received data to destination. + */ +static int +spi_read_buf(struct enc28j60_net *priv, int len, u8 *data) +{ + struct device *dev = &priv->spi->dev; + u8 *rx_buf = priv->spi_transfer_buf + 4; + u8 *tx_buf = priv->spi_transfer_buf; + struct spi_transfer tx = { + .tx_buf = tx_buf, + .len = SPI_OPLEN, + }; + struct spi_transfer rx = { + .rx_buf = rx_buf, + .len = len, + }; + struct spi_message msg; + int ret; + + tx_buf[0] = ENC28J60_READ_BUF_MEM; + + spi_message_init(&msg); + spi_message_add_tail(&tx, &msg); + spi_message_add_tail(&rx, &msg); + + ret = spi_sync(priv->spi, &msg); + if (ret == 0) { + memcpy(data, rx_buf, len); + ret = msg.status; + } + if (ret && netif_msg_drv(priv)) + dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", + __func__, ret); + + return ret; +} + +/* + * SPI write buffer + */ +static int spi_write_buf(struct enc28j60_net *priv, int len, const u8 *data) +{ + struct device *dev = &priv->spi->dev; + int ret; + + if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0) + ret = -EINVAL; + else { + priv->spi_transfer_buf[0] = ENC28J60_WRITE_BUF_MEM; + memcpy(&priv->spi_transfer_buf[1], data, len); + ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1); + if (ret && netif_msg_drv(priv)) + dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", + __func__, ret); + } + return ret; +} + +/* + * basic SPI read operation + */ +static u8 spi_read_op(struct enc28j60_net *priv, u8 op, u8 addr) +{ + struct device *dev = &priv->spi->dev; + u8 tx_buf[2]; + u8 rx_buf[4]; + u8 val = 0; + int ret; + int slen = SPI_OPLEN; + + /* do dummy read if needed */ + if (addr & SPRD_MASK) + slen++; + + tx_buf[0] = op | (addr & ADDR_MASK); + ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen); + if (ret) + dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", + __func__, ret); + else + val = rx_buf[slen - 1]; + + return val; +} + +/* + * basic SPI write operation + */ +static int spi_write_op(struct enc28j60_net *priv, u8 op, u8 addr, u8 val) +{ + struct device *dev = &priv->spi->dev; + int ret; + + priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK); + priv->spi_transfer_buf[1] = val; + ret = spi_write(priv->spi, priv->spi_transfer_buf, 2); + if (ret && netif_msg_drv(priv)) + dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", + __func__, ret); + return ret; +} + +static void enc28j60_soft_reset(struct enc28j60_net *priv) +{ + spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET); + /* Errata workaround #1, CLKRDY check is unreliable, + * delay at least 1 ms instead */ + udelay(2000); +} + +/* + * select the current register bank if necessary + */ +static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr) +{ + u8 b = (addr & BANK_MASK) >> 5; + + /* These registers (EIE, EIR, ESTAT, ECON2, ECON1) + * are present in all banks, no need to switch bank. + */ + if (addr >= EIE && addr <= ECON1) + return; + + /* Clear or set each bank selection bit as needed */ + if ((b & ECON1_BSEL0) != (priv->bank & ECON1_BSEL0)) { + if (b & ECON1_BSEL0) + spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1, + ECON1_BSEL0); + else + spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1, + ECON1_BSEL0); + } + if ((b & ECON1_BSEL1) != (priv->bank & ECON1_BSEL1)) { + if (b & ECON1_BSEL1) + spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1, + ECON1_BSEL1); + else + spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1, + ECON1_BSEL1); + } + priv->bank = b; +} + +/* + * Register access routines through the SPI bus. + * Every register access comes in two flavours: + * - nolock_xxx: caller needs to invoke mutex_lock, usually to access + * atomically more than one register + * - locked_xxx: caller doesn't need to invoke mutex_lock, single access + * + * Some registers can be accessed through the bit field clear and + * bit field set to avoid a read modify write cycle. + */ + +/* + * Register bit field Set + */ +static void nolock_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask) +{ + enc28j60_set_bank(priv, addr); + spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask); +} + +static void locked_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask) +{ + mutex_lock(&priv->lock); + nolock_reg_bfset(priv, addr, mask); + mutex_unlock(&priv->lock); +} + +/* + * Register bit field Clear + */ +static void nolock_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask) +{ + enc28j60_set_bank(priv, addr); + spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask); +} + +static void locked_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask) +{ + mutex_lock(&priv->lock); + nolock_reg_bfclr(priv, addr, mask); + mutex_unlock(&priv->lock); +} + +/* + * Register byte read + */ +static int nolock_regb_read(struct enc28j60_net *priv, u8 address) +{ + enc28j60_set_bank(priv, address); + return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address); +} + +static int locked_regb_read(struct enc28j60_net *priv, u8 address) +{ + int ret; + + mutex_lock(&priv->lock); + ret = nolock_regb_read(priv, address); + mutex_unlock(&priv->lock); + + return ret; +} + +/* + * Register word read + */ +static int nolock_regw_read(struct enc28j60_net *priv, u8 address) +{ + int rl, rh; + + enc28j60_set_bank(priv, address); + rl = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address); + rh = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address + 1); + + return (rh << 8) | rl; +} + +static int locked_regw_read(struct enc28j60_net *priv, u8 address) +{ + int ret; + + mutex_lock(&priv->lock); + ret = nolock_regw_read(priv, address); + mutex_unlock(&priv->lock); + + return ret; +} + +/* + * Register byte write + */ +static void nolock_regb_write(struct enc28j60_net *priv, u8 address, u8 data) +{ + enc28j60_set_bank(priv, address); + spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data); +} + +static void locked_regb_write(struct enc28j60_net *priv, u8 address, u8 data) +{ + mutex_lock(&priv->lock); + nolock_regb_write(priv, address, data); + mutex_unlock(&priv->lock); +} + +/* + * Register word write + */ +static void nolock_regw_write(struct enc28j60_net *priv, u8 address, u16 data) +{ + enc28j60_set_bank(priv, address); + spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data); + spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address + 1, + (u8) (data >> 8)); +} + +static void locked_regw_write(struct enc28j60_net *priv, u8 address, u16 data) +{ + mutex_lock(&priv->lock); + nolock_regw_write(priv, address, data); + mutex_unlock(&priv->lock); +} + +/* + * Buffer memory read + * Select the starting address and execute a SPI buffer read. + */ +static void enc28j60_mem_read(struct enc28j60_net *priv, u16 addr, int len, + u8 *data) +{ + mutex_lock(&priv->lock); + nolock_regw_write(priv, ERDPTL, addr); +#ifdef CONFIG_ENC28J60_WRITEVERIFY + if (netif_msg_drv(priv)) { + struct device *dev = &priv->spi->dev; + u16 reg; + + reg = nolock_regw_read(priv, ERDPTL); + if (reg != addr) + dev_printk(KERN_DEBUG, dev, + "%s() error writing ERDPT (0x%04x - 0x%04x)\n", + __func__, reg, addr); + } +#endif + spi_read_buf(priv, len, data); + mutex_unlock(&priv->lock); +} + +/* + * Write packet to enc28j60 TX buffer memory + */ +static void +enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data) +{ + struct device *dev = &priv->spi->dev; + + mutex_lock(&priv->lock); + /* Set the write pointer to start of transmit buffer area */ + nolock_regw_write(priv, EWRPTL, TXSTART_INIT); +#ifdef CONFIG_ENC28J60_WRITEVERIFY + if (netif_msg_drv(priv)) { + u16 reg; + reg = nolock_regw_read(priv, EWRPTL); + if (reg != TXSTART_INIT) + dev_printk(KERN_DEBUG, dev, + "%s() ERWPT:0x%04x != 0x%04x\n", + __func__, reg, TXSTART_INIT); + } +#endif + /* Set the TXND pointer to correspond to the packet size given */ + nolock_regw_write(priv, ETXNDL, TXSTART_INIT + len); + /* write per-packet control byte */ + spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00); + if (netif_msg_hw(priv)) + dev_printk(KERN_DEBUG, dev, + "%s() after control byte ERWPT:0x%04x\n", + __func__, nolock_regw_read(priv, EWRPTL)); + /* copy the packet into the transmit buffer */ + spi_write_buf(priv, len, data); + if (netif_msg_hw(priv)) + dev_printk(KERN_DEBUG, dev, + "%s() after write packet ERWPT:0x%04x, len=%d\n", + __func__, nolock_regw_read(priv, EWRPTL), len); + mutex_unlock(&priv->lock); +} + +static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val) +{ + struct device *dev = &priv->spi->dev; + unsigned long timeout = jiffies + msecs_to_jiffies(20); + + /* 20 msec timeout read */ + while ((nolock_regb_read(priv, reg) & mask) != val) { + if (time_after(jiffies, timeout)) { + if (netif_msg_drv(priv)) + dev_dbg(dev, "reg %02x ready timeout!\n", reg); + return -ETIMEDOUT; + } + cpu_relax(); + } + return 0; +} + +/* + * Wait until the PHY operation is complete. + */ +static int wait_phy_ready(struct enc28j60_net *priv) +{ + return poll_ready(priv, MISTAT, MISTAT_BUSY, 0) ? 0 : 1; +} + +/* + * PHY register read + * PHY registers are not accessed directly, but through the MII. + */ +static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address) +{ + u16 ret; + + mutex_lock(&priv->lock); + /* set the PHY register address */ + nolock_regb_write(priv, MIREGADR, address); + /* start the register read operation */ + nolock_regb_write(priv, MICMD, MICMD_MIIRD); + /* wait until the PHY read completes */ + wait_phy_ready(priv); + /* quit reading */ + nolock_regb_write(priv, MICMD, 0x00); + /* return the data */ + ret = nolock_regw_read(priv, MIRDL); + mutex_unlock(&priv->lock); + + return ret; +} + +static int enc28j60_phy_write(struct enc28j60_net *priv, u8 address, u16 data) +{ + int ret; + + mutex_lock(&priv->lock); + /* set the PHY register address */ + nolock_regb_write(priv, MIREGADR, address); + /* write the PHY data */ + nolock_regw_write(priv, MIWRL, data); + /* wait until the PHY write completes and return */ + ret = wait_phy_ready(priv); + mutex_unlock(&priv->lock); + + return ret; +} + +/* + * Program the hardware MAC address from dev->dev_addr. + */ +static int enc28j60_set_hw_macaddr(struct net_device *ndev) +{ + int ret; + struct enc28j60_net *priv = netdev_priv(ndev); + struct device *dev = &priv->spi->dev; + + mutex_lock(&priv->lock); + if (!priv->hw_enable) { + if (netif_msg_drv(priv)) + dev_info(dev, "%s: Setting MAC address to %pM\n", + ndev->name, ndev->dev_addr); + /* NOTE: MAC address in ENC28J60 is byte-backward */ + nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]); + nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]); + nolock_regb_write(priv, MAADR3, ndev->dev_addr[2]); + nolock_regb_write(priv, MAADR2, ndev->dev_addr[3]); + nolock_regb_write(priv, MAADR1, ndev->dev_addr[4]); + nolock_regb_write(priv, MAADR0, ndev->dev_addr[5]); + ret = 0; + } else { + if (netif_msg_drv(priv)) + dev_printk(KERN_DEBUG, dev, + "%s() Hardware must be disabled to set Mac address\n", + __func__); + ret = -EBUSY; + } + mutex_unlock(&priv->lock); + return ret; +} + +/* + * Store the new hardware address in dev->dev_addr, and update the MAC. + */ +static int enc28j60_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *address = addr; + + if (netif_running(dev)) + return -EBUSY; + if (!is_valid_ether_addr(address->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(dev, address->sa_data); + return enc28j60_set_hw_macaddr(dev); +} + +/* + * Debug routine to dump useful register contents + */ +static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg) +{ + struct device *dev = &priv->spi->dev; + + mutex_lock(&priv->lock); + dev_printk(KERN_DEBUG, dev, + " %s\n" + "HwRevID: 0x%02x\n" + "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n" + " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n" + "MAC : MACON1 MACON3 MACON4\n" + " 0x%02x 0x%02x 0x%02x\n" + "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n" + " 0x%04x 0x%04x 0x%04x 0x%04x " + "0x%02x 0x%02x 0x%04x\n" + "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n" + " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n", + msg, nolock_regb_read(priv, EREVID), + nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2), + nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR), + nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1), + nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4), + nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL), + nolock_regw_read(priv, ERXWRPTL), + nolock_regw_read(priv, ERXRDPTL), + nolock_regb_read(priv, ERXFCON), + nolock_regb_read(priv, EPKTCNT), + nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL), + nolock_regw_read(priv, ETXNDL), + nolock_regb_read(priv, MACLCON1), + nolock_regb_read(priv, MACLCON2), + nolock_regb_read(priv, MAPHSUP)); + mutex_unlock(&priv->lock); +} + +/* + * ERXRDPT need to be set always at odd addresses, refer to errata datasheet + */ +static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end) +{ + u16 erxrdpt; + + if ((next_packet_ptr - 1 < start) || (next_packet_ptr - 1 > end)) + erxrdpt = end; + else + erxrdpt = next_packet_ptr - 1; + + return erxrdpt; +} + +/* + * Calculate wrap around when reading beyond the end of the RX buffer + */ +static u16 rx_packet_start(u16 ptr) +{ + if (ptr + RSV_SIZE > RXEND_INIT) + return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1); + else + return ptr + RSV_SIZE; +} + +static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) +{ + struct device *dev = &priv->spi->dev; + u16 erxrdpt; + + if (start > 0x1FFF || end > 0x1FFF || start > end) { + if (netif_msg_drv(priv)) + dev_err(dev, "%s(%d, %d) RXFIFO bad parameters!\n", + __func__, start, end); + return; + } + /* set receive buffer start + end */ + priv->next_pk_ptr = start; + nolock_regw_write(priv, ERXSTL, start); + erxrdpt = erxrdpt_workaround(priv->next_pk_ptr, start, end); + nolock_regw_write(priv, ERXRDPTL, erxrdpt); + nolock_regw_write(priv, ERXNDL, end); +} + +static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end) +{ + struct device *dev = &priv->spi->dev; + + if (start > 0x1FFF || end > 0x1FFF || start > end) { + if (netif_msg_drv(priv)) + dev_err(dev, "%s(%d, %d) TXFIFO bad parameters!\n", + __func__, start, end); + return; + } + /* set transmit buffer start + end */ + nolock_regw_write(priv, ETXSTL, start); + nolock_regw_write(priv, ETXNDL, end); +} + +/* + * Low power mode shrinks power consumption about 100x, so we'd like + * the chip to be in that mode whenever it's inactive. (However, we + * can't stay in low power mode during suspend with WOL active.) + */ +static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low) +{ + struct device *dev = &priv->spi->dev; + + if (netif_msg_drv(priv)) + dev_dbg(dev, "%s power...\n", is_low ? "low" : "high"); + + mutex_lock(&priv->lock); + if (is_low) { + nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); + poll_ready(priv, ESTAT, ESTAT_RXBUSY, 0); + poll_ready(priv, ECON1, ECON1_TXRTS, 0); + /* ECON2_VRPS was set during initialization */ + nolock_reg_bfset(priv, ECON2, ECON2_PWRSV); + } else { + nolock_reg_bfclr(priv, ECON2, ECON2_PWRSV); + poll_ready(priv, ESTAT, ESTAT_CLKRDY, ESTAT_CLKRDY); + /* caller sets ECON1_RXEN */ + } + mutex_unlock(&priv->lock); +} + +static int enc28j60_hw_init(struct enc28j60_net *priv) +{ + struct device *dev = &priv->spi->dev; + u8 reg; + + if (netif_msg_drv(priv)) + dev_printk(KERN_DEBUG, dev, "%s() - %s\n", __func__, + priv->full_duplex ? "FullDuplex" : "HalfDuplex"); + + mutex_lock(&priv->lock); + /* first reset the chip */ + enc28j60_soft_reset(priv); + /* Clear ECON1 */ + spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, ECON1, 0x00); + priv->bank = 0; + priv->hw_enable = false; + priv->tx_retry_count = 0; + priv->max_pk_counter = 0; + priv->rxfilter = RXFILTER_NORMAL; + /* enable address auto increment and voltage regulator powersave */ + nolock_regb_write(priv, ECON2, ECON2_AUTOINC | ECON2_VRPS); + + nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); + nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); + mutex_unlock(&priv->lock); + + /* + * Check the RevID. + * If it's 0x00 or 0xFF probably the enc28j60 is not mounted or + * damaged. + */ + reg = locked_regb_read(priv, EREVID); + if (netif_msg_drv(priv)) + dev_info(dev, "chip RevID: 0x%02x\n", reg); + if (reg == 0x00 || reg == 0xff) { + if (netif_msg_drv(priv)) + dev_printk(KERN_DEBUG, dev, "%s() Invalid RevId %d\n", + __func__, reg); + return 0; + } + + /* default filter mode: (unicast OR broadcast) AND crc valid */ + locked_regb_write(priv, ERXFCON, + ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN); + + /* enable MAC receive */ + locked_regb_write(priv, MACON1, + MACON1_MARXEN | MACON1_TXPAUS | MACON1_RXPAUS); + /* enable automatic padding and CRC operations */ + if (priv->full_duplex) { + locked_regb_write(priv, MACON3, + MACON3_PADCFG0 | MACON3_TXCRCEN | + MACON3_FRMLNEN | MACON3_FULDPX); + /* set inter-frame gap (non-back-to-back) */ + locked_regb_write(priv, MAIPGL, 0x12); + /* set inter-frame gap (back-to-back) */ + locked_regb_write(priv, MABBIPG, 0x15); + } else { + locked_regb_write(priv, MACON3, + MACON3_PADCFG0 | MACON3_TXCRCEN | + MACON3_FRMLNEN); + locked_regb_write(priv, MACON4, 1 << 6); /* DEFER bit */ + /* set inter-frame gap (non-back-to-back) */ + locked_regw_write(priv, MAIPGL, 0x0C12); + /* set inter-frame gap (back-to-back) */ + locked_regb_write(priv, MABBIPG, 0x12); + } + /* + * MACLCON1 (default) + * MACLCON2 (default) + * Set the maximum packet size which the controller will accept. + */ + locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN); + + /* Configure LEDs */ + if (!enc28j60_phy_write(priv, PHLCON, ENC28J60_LAMPS_MODE)) + return 0; + + if (priv->full_duplex) { + if (!enc28j60_phy_write(priv, PHCON1, PHCON1_PDPXMD)) + return 0; + if (!enc28j60_phy_write(priv, PHCON2, 0x00)) + return 0; + } else { + if (!enc28j60_phy_write(priv, PHCON1, 0x00)) + return 0; + if (!enc28j60_phy_write(priv, PHCON2, PHCON2_HDLDIS)) + return 0; + } + if (netif_msg_hw(priv)) + enc28j60_dump_regs(priv, "Hw initialized."); + + return 1; +} + +static void enc28j60_hw_enable(struct enc28j60_net *priv) +{ + struct device *dev = &priv->spi->dev; + + /* enable interrupts */ + if (netif_msg_hw(priv)) + dev_printk(KERN_DEBUG, dev, "%s() enabling interrupts.\n", + __func__); + + enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE); + + mutex_lock(&priv->lock); + nolock_reg_bfclr(priv, EIR, EIR_DMAIF | EIR_LINKIF | + EIR_TXIF | EIR_TXERIF | EIR_RXERIF | EIR_PKTIF); + nolock_regb_write(priv, EIE, EIE_INTIE | EIE_PKTIE | EIE_LINKIE | + EIE_TXIE | EIE_TXERIE | EIE_RXERIE); + + /* enable receive logic */ + nolock_reg_bfset(priv, ECON1, ECON1_RXEN); + priv->hw_enable = true; + mutex_unlock(&priv->lock); +} + +static void enc28j60_hw_disable(struct enc28j60_net *priv) +{ + mutex_lock(&priv->lock); + /* disable interrupts and packet reception */ + nolock_regb_write(priv, EIE, 0x00); + nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); + priv->hw_enable = false; + mutex_unlock(&priv->lock); +} + +static int +enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + int ret = 0; + + if (!priv->hw_enable) { + /* link is in low power mode now; duplex setting + * will take effect on next enc28j60_hw_init(). + */ + if (autoneg == AUTONEG_DISABLE && speed == SPEED_10) + priv->full_duplex = (duplex == DUPLEX_FULL); + else { + if (netif_msg_link(priv)) + netdev_warn(ndev, "unsupported link setting\n"); + ret = -EOPNOTSUPP; + } + } else { + if (netif_msg_link(priv)) + netdev_warn(ndev, "Warning: hw must be disabled to set link mode\n"); + ret = -EBUSY; + } + return ret; +} + +/* + * Read the Transmit Status Vector + */ +static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE]) +{ + struct device *dev = &priv->spi->dev; + int endptr; + + endptr = locked_regw_read(priv, ETXNDL); + if (netif_msg_hw(priv)) + dev_printk(KERN_DEBUG, dev, "reading TSV at addr:0x%04x\n", + endptr + 1); + enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv); +} + +static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, + u8 tsv[TSV_SIZE]) +{ + struct device *dev = &priv->spi->dev; + u16 tmp1, tmp2; + + dev_printk(KERN_DEBUG, dev, "%s - TSV:\n", msg); + tmp1 = tsv[1]; + tmp1 <<= 8; + tmp1 |= tsv[0]; + + tmp2 = tsv[5]; + tmp2 <<= 8; + tmp2 |= tsv[4]; + + dev_printk(KERN_DEBUG, dev, + "ByteCount: %d, CollisionCount: %d, TotByteOnWire: %d\n", + tmp1, tsv[2] & 0x0f, tmp2); + dev_printk(KERN_DEBUG, dev, + "TxDone: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n", + TSV_GETBIT(tsv, TSV_TXDONE), + TSV_GETBIT(tsv, TSV_TXCRCERROR), + TSV_GETBIT(tsv, TSV_TXLENCHKERROR), + TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE)); + dev_printk(KERN_DEBUG, dev, + "Multicast: %d, Broadcast: %d, PacketDefer: %d, ExDefer: %d\n", + TSV_GETBIT(tsv, TSV_TXMULTICAST), + TSV_GETBIT(tsv, TSV_TXBROADCAST), + TSV_GETBIT(tsv, TSV_TXPACKETDEFER), + TSV_GETBIT(tsv, TSV_TXEXDEFER)); + dev_printk(KERN_DEBUG, dev, + "ExCollision: %d, LateCollision: %d, Giant: %d, Underrun: %d\n", + TSV_GETBIT(tsv, TSV_TXEXCOLLISION), + TSV_GETBIT(tsv, TSV_TXLATECOLLISION), + TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN)); + dev_printk(KERN_DEBUG, dev, + "ControlFrame: %d, PauseFrame: %d, BackPressApp: %d, VLanTagFrame: %d\n", + TSV_GETBIT(tsv, TSV_TXCONTROLFRAME), + TSV_GETBIT(tsv, TSV_TXPAUSEFRAME), + TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP), + TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME)); +} + +/* + * Receive Status vector + */ +static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg, + u16 pk_ptr, int len, u16 sts) +{ + struct device *dev = &priv->spi->dev; + + dev_printk(KERN_DEBUG, dev, "%s - NextPk: 0x%04x - RSV:\n", msg, pk_ptr); + dev_printk(KERN_DEBUG, dev, "ByteCount: %d, DribbleNibble: %d\n", + len, RSV_GETBIT(sts, RSV_DRIBBLENIBBLE)); + dev_printk(KERN_DEBUG, dev, + "RxOK: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n", + RSV_GETBIT(sts, RSV_RXOK), + RSV_GETBIT(sts, RSV_CRCERROR), + RSV_GETBIT(sts, RSV_LENCHECKERR), + RSV_GETBIT(sts, RSV_LENOUTOFRANGE)); + dev_printk(KERN_DEBUG, dev, + "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n", + RSV_GETBIT(sts, RSV_RXMULTICAST), + RSV_GETBIT(sts, RSV_RXBROADCAST), + RSV_GETBIT(sts, RSV_RXLONGEVDROPEV), + RSV_GETBIT(sts, RSV_CARRIEREV)); + dev_printk(KERN_DEBUG, dev, + "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n", + RSV_GETBIT(sts, RSV_RXCONTROLFRAME), + RSV_GETBIT(sts, RSV_RXPAUSEFRAME), + RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE), + RSV_GETBIT(sts, RSV_RXTYPEVLAN)); +} + +static void dump_packet(const char *msg, int len, const char *data) +{ + printk(KERN_DEBUG DRV_NAME ": %s - packet len:%d\n", msg, len); + print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1, + data, len, true); +} + +/* + * Hardware receive function. + * Read the buffer memory, update the FIFO pointer to free the buffer, + * check the status vector and decrement the packet counter. + */ +static void enc28j60_hw_rx(struct net_device *ndev) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + struct device *dev = &priv->spi->dev; + struct sk_buff *skb = NULL; + u16 erxrdpt, next_packet, rxstat; + u8 rsv[RSV_SIZE]; + int len; + + if (netif_msg_rx_status(priv)) + netdev_printk(KERN_DEBUG, ndev, "RX pk_addr:0x%04x\n", + priv->next_pk_ptr); + + if (unlikely(priv->next_pk_ptr > RXEND_INIT)) { + if (netif_msg_rx_err(priv)) + netdev_err(ndev, "%s() Invalid packet address!! 0x%04x\n", + __func__, priv->next_pk_ptr); + /* packet address corrupted: reset RX logic */ + mutex_lock(&priv->lock); + nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); + nolock_reg_bfset(priv, ECON1, ECON1_RXRST); + nolock_reg_bfclr(priv, ECON1, ECON1_RXRST); + nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); + nolock_reg_bfclr(priv, EIR, EIR_RXERIF); + nolock_reg_bfset(priv, ECON1, ECON1_RXEN); + mutex_unlock(&priv->lock); + ndev->stats.rx_errors++; + return; + } + /* Read next packet pointer and rx status vector */ + enc28j60_mem_read(priv, priv->next_pk_ptr, sizeof(rsv), rsv); + + next_packet = rsv[1]; + next_packet <<= 8; + next_packet |= rsv[0]; + + len = rsv[3]; + len <<= 8; + len |= rsv[2]; + + rxstat = rsv[5]; + rxstat <<= 8; + rxstat |= rsv[4]; + + if (netif_msg_rx_status(priv)) + enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat); + + if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) { + if (netif_msg_rx_err(priv)) + netdev_err(ndev, "Rx Error (%04x)\n", rxstat); + ndev->stats.rx_errors++; + if (RSV_GETBIT(rxstat, RSV_CRCERROR)) + ndev->stats.rx_crc_errors++; + if (RSV_GETBIT(rxstat, RSV_LENCHECKERR)) + ndev->stats.rx_frame_errors++; + if (len > MAX_FRAMELEN) + ndev->stats.rx_over_errors++; + } else { + skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN); + if (!skb) { + if (netif_msg_rx_err(priv)) + netdev_err(ndev, "out of memory for Rx'd frame\n"); + ndev->stats.rx_dropped++; + } else { + skb_reserve(skb, NET_IP_ALIGN); + /* copy the packet from the receive buffer */ + enc28j60_mem_read(priv, + rx_packet_start(priv->next_pk_ptr), + len, skb_put(skb, len)); + if (netif_msg_pktdata(priv)) + dump_packet(__func__, skb->len, skb->data); + skb->protocol = eth_type_trans(skb, ndev); + /* update statistics */ + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; + netif_rx(skb); + } + } + /* + * Move the RX read pointer to the start of the next + * received packet. + * This frees the memory we just read out. + */ + erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT); + if (netif_msg_hw(priv)) + dev_printk(KERN_DEBUG, dev, "%s() ERXRDPT:0x%04x\n", + __func__, erxrdpt); + + mutex_lock(&priv->lock); + nolock_regw_write(priv, ERXRDPTL, erxrdpt); +#ifdef CONFIG_ENC28J60_WRITEVERIFY + if (netif_msg_drv(priv)) { + u16 reg; + reg = nolock_regw_read(priv, ERXRDPTL); + if (reg != erxrdpt) + dev_printk(KERN_DEBUG, dev, + "%s() ERXRDPT verify error (0x%04x - 0x%04x)\n", + __func__, reg, erxrdpt); + } +#endif + priv->next_pk_ptr = next_packet; + /* we are done with this packet, decrement the packet counter */ + nolock_reg_bfset(priv, ECON2, ECON2_PKTDEC); + mutex_unlock(&priv->lock); +} + +/* + * Calculate free space in RxFIFO + */ +static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv) +{ + struct net_device *ndev = priv->netdev; + int epkcnt, erxst, erxnd, erxwr, erxrd; + int free_space; + + mutex_lock(&priv->lock); + epkcnt = nolock_regb_read(priv, EPKTCNT); + if (epkcnt >= 255) + free_space = -1; + else { + erxst = nolock_regw_read(priv, ERXSTL); + erxnd = nolock_regw_read(priv, ERXNDL); + erxwr = nolock_regw_read(priv, ERXWRPTL); + erxrd = nolock_regw_read(priv, ERXRDPTL); + + if (erxwr > erxrd) + free_space = (erxnd - erxst) - (erxwr - erxrd); + else if (erxwr == erxrd) + free_space = (erxnd - erxst); + else + free_space = erxrd - erxwr - 1; + } + mutex_unlock(&priv->lock); + if (netif_msg_rx_status(priv)) + netdev_printk(KERN_DEBUG, ndev, "%s() free_space = %d\n", + __func__, free_space); + return free_space; +} + +/* + * Access the PHY to determine link status + */ +static void enc28j60_check_link_status(struct net_device *ndev) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + struct device *dev = &priv->spi->dev; + u16 reg; + int duplex; + + reg = enc28j60_phy_read(priv, PHSTAT2); + if (netif_msg_hw(priv)) + dev_printk(KERN_DEBUG, dev, + "%s() PHSTAT1: %04x, PHSTAT2: %04x\n", __func__, + enc28j60_phy_read(priv, PHSTAT1), reg); + duplex = reg & PHSTAT2_DPXSTAT; + + if (reg & PHSTAT2_LSTAT) { + netif_carrier_on(ndev); + if (netif_msg_ifup(priv)) + netdev_info(ndev, "link up - %s\n", + duplex ? "Full duplex" : "Half duplex"); + } else { + if (netif_msg_ifdown(priv)) + netdev_info(ndev, "link down\n"); + netif_carrier_off(ndev); + } +} + +static void enc28j60_tx_clear(struct net_device *ndev, bool err) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + + if (err) + ndev->stats.tx_errors++; + else + ndev->stats.tx_packets++; + + if (priv->tx_skb) { + if (!err) + ndev->stats.tx_bytes += priv->tx_skb->len; + dev_kfree_skb(priv->tx_skb); + priv->tx_skb = NULL; + } + locked_reg_bfclr(priv, ECON1, ECON1_TXRTS); + netif_wake_queue(ndev); +} + +/* + * RX handler + * Ignore PKTIF because is unreliable! (Look at the errata datasheet) + * Check EPKTCNT is the suggested workaround. + * We don't need to clear interrupt flag, automatically done when + * enc28j60_hw_rx() decrements the packet counter. + * Returns how many packet processed. + */ +static int enc28j60_rx_interrupt(struct net_device *ndev) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + int pk_counter, ret; + + pk_counter = locked_regb_read(priv, EPKTCNT); + if (pk_counter && netif_msg_intr(priv)) + netdev_printk(KERN_DEBUG, ndev, "intRX, pk_cnt: %d\n", + pk_counter); + if (pk_counter > priv->max_pk_counter) { + /* update statistics */ + priv->max_pk_counter = pk_counter; + if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1) + netdev_printk(KERN_DEBUG, ndev, "RX max_pk_cnt: %d\n", + priv->max_pk_counter); + } + ret = pk_counter; + while (pk_counter-- > 0) + enc28j60_hw_rx(ndev); + + return ret; +} + +static void enc28j60_irq_work_handler(struct work_struct *work) +{ + struct enc28j60_net *priv = + container_of(work, struct enc28j60_net, irq_work); + struct net_device *ndev = priv->netdev; + int intflags, loop; + + /* disable further interrupts */ + locked_reg_bfclr(priv, EIE, EIE_INTIE); + + do { + loop = 0; + intflags = locked_regb_read(priv, EIR); + /* DMA interrupt handler (not currently used) */ + if ((intflags & EIR_DMAIF) != 0) { + loop++; + if (netif_msg_intr(priv)) + netdev_printk(KERN_DEBUG, ndev, "intDMA(%d)\n", + loop); + locked_reg_bfclr(priv, EIR, EIR_DMAIF); + } + /* LINK changed handler */ + if ((intflags & EIR_LINKIF) != 0) { + loop++; + if (netif_msg_intr(priv)) + netdev_printk(KERN_DEBUG, ndev, "intLINK(%d)\n", + loop); + enc28j60_check_link_status(ndev); + /* read PHIR to clear the flag */ + enc28j60_phy_read(priv, PHIR); + } + /* TX complete handler */ + if (((intflags & EIR_TXIF) != 0) && + ((intflags & EIR_TXERIF) == 0)) { + bool err = false; + loop++; + if (netif_msg_intr(priv)) + netdev_printk(KERN_DEBUG, ndev, "intTX(%d)\n", + loop); + priv->tx_retry_count = 0; + if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) { + if (netif_msg_tx_err(priv)) + netdev_err(ndev, "Tx Error (aborted)\n"); + err = true; + } + if (netif_msg_tx_done(priv)) { + u8 tsv[TSV_SIZE]; + enc28j60_read_tsv(priv, tsv); + enc28j60_dump_tsv(priv, "Tx Done", tsv); + } + enc28j60_tx_clear(ndev, err); + locked_reg_bfclr(priv, EIR, EIR_TXIF); + } + /* TX Error handler */ + if ((intflags & EIR_TXERIF) != 0) { + u8 tsv[TSV_SIZE]; + + loop++; + if (netif_msg_intr(priv)) + netdev_printk(KERN_DEBUG, ndev, "intTXErr(%d)\n", + loop); + locked_reg_bfclr(priv, ECON1, ECON1_TXRTS); + enc28j60_read_tsv(priv, tsv); + if (netif_msg_tx_err(priv)) + enc28j60_dump_tsv(priv, "Tx Error", tsv); + /* Reset TX logic */ + mutex_lock(&priv->lock); + nolock_reg_bfset(priv, ECON1, ECON1_TXRST); + nolock_reg_bfclr(priv, ECON1, ECON1_TXRST); + nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); + mutex_unlock(&priv->lock); + /* Transmit Late collision check for retransmit */ + if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) { + if (netif_msg_tx_err(priv)) + netdev_printk(KERN_DEBUG, ndev, + "LateCollision TXErr (%d)\n", + priv->tx_retry_count); + if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT) + locked_reg_bfset(priv, ECON1, + ECON1_TXRTS); + else + enc28j60_tx_clear(ndev, true); + } else + enc28j60_tx_clear(ndev, true); + locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF); + } + /* RX Error handler */ + if ((intflags & EIR_RXERIF) != 0) { + loop++; + if (netif_msg_intr(priv)) + netdev_printk(KERN_DEBUG, ndev, "intRXErr(%d)\n", + loop); + /* Check free FIFO space to flag RX overrun */ + if (enc28j60_get_free_rxfifo(priv) <= 0) { + if (netif_msg_rx_err(priv)) + netdev_printk(KERN_DEBUG, ndev, "RX Overrun\n"); + ndev->stats.rx_dropped++; + } + locked_reg_bfclr(priv, EIR, EIR_RXERIF); + } + /* RX handler */ + if (enc28j60_rx_interrupt(ndev)) + loop++; + } while (loop); + + /* re-enable interrupts */ + locked_reg_bfset(priv, EIE, EIE_INTIE); +} + +/* + * Hardware transmit function. + * Fill the buffer memory and send the contents of the transmit buffer + * onto the network + */ +static void enc28j60_hw_tx(struct enc28j60_net *priv) +{ + struct net_device *ndev = priv->netdev; + + BUG_ON(!priv->tx_skb); + + if (netif_msg_tx_queued(priv)) + netdev_printk(KERN_DEBUG, ndev, "Tx Packet Len:%d\n", + priv->tx_skb->len); + + if (netif_msg_pktdata(priv)) + dump_packet(__func__, + priv->tx_skb->len, priv->tx_skb->data); + enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data); + +#ifdef CONFIG_ENC28J60_WRITEVERIFY + /* readback and verify written data */ + if (netif_msg_drv(priv)) { + struct device *dev = &priv->spi->dev; + int test_len, k; + u8 test_buf[64]; /* limit the test to the first 64 bytes */ + int okflag; + + test_len = priv->tx_skb->len; + if (test_len > sizeof(test_buf)) + test_len = sizeof(test_buf); + + /* + 1 to skip control byte */ + enc28j60_mem_read(priv, TXSTART_INIT + 1, test_len, test_buf); + okflag = 1; + for (k = 0; k < test_len; k++) { + if (priv->tx_skb->data[k] != test_buf[k]) { + dev_printk(KERN_DEBUG, dev, + "Error, %d location differ: 0x%02x-0x%02x\n", + k, priv->tx_skb->data[k], test_buf[k]); + okflag = 0; + } + } + if (!okflag) + dev_printk(KERN_DEBUG, dev, "Tx write buffer, verify ERROR!\n"); + } +#endif + /* set TX request flag */ + locked_reg_bfset(priv, ECON1, ECON1_TXRTS); +} + +static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + + /* If some error occurs while trying to transmit this + * packet, you should return '1' from this function. + * In such a case you _may not_ do anything to the + * SKB, it is still owned by the network queueing + * layer when an error is returned. This means you + * may not modify any SKB fields, you may not free + * the SKB, etc. + */ + netif_stop_queue(dev); + + /* Remember the skb for deferred processing */ + priv->tx_skb = skb; + schedule_work(&priv->tx_work); + + return NETDEV_TX_OK; +} + +static void enc28j60_tx_work_handler(struct work_struct *work) +{ + struct enc28j60_net *priv = + container_of(work, struct enc28j60_net, tx_work); + + /* actual delivery of data */ + enc28j60_hw_tx(priv); +} + +static irqreturn_t enc28j60_irq(int irq, void *dev_id) +{ + struct enc28j60_net *priv = dev_id; + + /* + * Can't do anything in interrupt context because we need to + * block (spi_sync() is blocking) so fire of the interrupt + * handling workqueue. + * Remember that we access enc28j60 registers through SPI bus + * via spi_sync() call. + */ + schedule_work(&priv->irq_work); + + return IRQ_HANDLED; +} + +static void enc28j60_tx_timeout(struct net_device *ndev, unsigned int txqueue) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + + if (netif_msg_timer(priv)) + netdev_err(ndev, "tx timeout\n"); + + ndev->stats.tx_errors++; + /* can't restart safely under softirq */ + schedule_work(&priv->restart_work); +} + +/* + * Open/initialize the board. This is called (in the current kernel) + * sometime after booting when the 'ifconfig' program is run. + * + * This routine should set everything up anew at each open, even + * registers that "should" only need to be set once at boot, so that + * there is non-reboot way to recover if something goes wrong. + */ +static int enc28j60_net_open(struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + + if (!is_valid_ether_addr(dev->dev_addr)) { + if (netif_msg_ifup(priv)) + netdev_err(dev, "invalid MAC address %pM\n", dev->dev_addr); + return -EADDRNOTAVAIL; + } + /* Reset the hardware here (and take it out of low power mode) */ + enc28j60_lowpower(priv, false); + enc28j60_hw_disable(priv); + if (!enc28j60_hw_init(priv)) { + if (netif_msg_ifup(priv)) + netdev_err(dev, "hw_reset() failed\n"); + return -EINVAL; + } + /* Update the MAC address (in case user has changed it) */ + enc28j60_set_hw_macaddr(dev); + /* Enable interrupts */ + enc28j60_hw_enable(priv); + /* check link status */ + enc28j60_check_link_status(dev); + /* We are now ready to accept transmit requests from + * the queueing layer of the networking. + */ + netif_start_queue(dev); + + return 0; +} + +/* The inverse routine to net_open(). */ +static int enc28j60_net_close(struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + + enc28j60_hw_disable(priv); + enc28j60_lowpower(priv, true); + netif_stop_queue(dev); + + return 0; +} + +/* + * Set or clear the multicast filter for this adapter + * num_addrs == -1 Promiscuous mode, receive all packets + * num_addrs == 0 Normal mode, filter out multicast packets + * num_addrs > 0 Multicast mode, receive normal and MC packets + */ +static void enc28j60_set_multicast_list(struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + int oldfilter = priv->rxfilter; + + if (dev->flags & IFF_PROMISC) { + if (netif_msg_link(priv)) + netdev_info(dev, "promiscuous mode\n"); + priv->rxfilter = RXFILTER_PROMISC; + } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) { + if (netif_msg_link(priv)) + netdev_info(dev, "%smulticast mode\n", + (dev->flags & IFF_ALLMULTI) ? "all-" : ""); + priv->rxfilter = RXFILTER_MULTI; + } else { + if (netif_msg_link(priv)) + netdev_info(dev, "normal mode\n"); + priv->rxfilter = RXFILTER_NORMAL; + } + + if (oldfilter != priv->rxfilter) + schedule_work(&priv->setrx_work); +} + +static void enc28j60_setrx_work_handler(struct work_struct *work) +{ + struct enc28j60_net *priv = + container_of(work, struct enc28j60_net, setrx_work); + struct device *dev = &priv->spi->dev; + + if (priv->rxfilter == RXFILTER_PROMISC) { + if (netif_msg_drv(priv)) + dev_printk(KERN_DEBUG, dev, "promiscuous mode\n"); + locked_regb_write(priv, ERXFCON, 0x00); + } else if (priv->rxfilter == RXFILTER_MULTI) { + if (netif_msg_drv(priv)) + dev_printk(KERN_DEBUG, dev, "multicast mode\n"); + locked_regb_write(priv, ERXFCON, + ERXFCON_UCEN | ERXFCON_CRCEN | + ERXFCON_BCEN | ERXFCON_MCEN); + } else { + if (netif_msg_drv(priv)) + dev_printk(KERN_DEBUG, dev, "normal mode\n"); + locked_regb_write(priv, ERXFCON, + ERXFCON_UCEN | ERXFCON_CRCEN | + ERXFCON_BCEN); + } +} + +static void enc28j60_restart_work_handler(struct work_struct *work) +{ + struct enc28j60_net *priv = + container_of(work, struct enc28j60_net, restart_work); + struct net_device *ndev = priv->netdev; + int ret; + + rtnl_lock(); + if (netif_running(ndev)) { + enc28j60_net_close(ndev); + ret = enc28j60_net_open(ndev); + if (unlikely(ret)) { + netdev_info(ndev, "could not restart %d\n", ret); + dev_close(ndev); + } + } + rtnl_unlock(); +} + +/* ......................... ETHTOOL SUPPORT ........................... */ + +static void +enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->version, DRV_VERSION, sizeof(info->version)); + strscpy(info->bus_info, + dev_name(dev->dev.parent), sizeof(info->bus_info)); +} + +static int +enc28j60_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct enc28j60_net *priv = netdev_priv(dev); + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + + cmd->base.speed = SPEED_10; + cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.port = PORT_TP; + cmd->base.autoneg = AUTONEG_DISABLE; + + return 0; +} + +static int +enc28j60_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + return enc28j60_setlink(dev, cmd->base.autoneg, + cmd->base.speed, cmd->base.duplex); +} + +static u32 enc28j60_get_msglevel(struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void enc28j60_set_msglevel(struct net_device *dev, u32 val) +{ + struct enc28j60_net *priv = netdev_priv(dev); + priv->msg_enable = val; +} + +static const struct ethtool_ops enc28j60_ethtool_ops = { + .get_drvinfo = enc28j60_get_drvinfo, + .get_msglevel = enc28j60_get_msglevel, + .set_msglevel = enc28j60_set_msglevel, + .get_link_ksettings = enc28j60_get_link_ksettings, + .set_link_ksettings = enc28j60_set_link_ksettings, +}; + +static int enc28j60_chipset_init(struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + + return enc28j60_hw_init(priv); +} + +static const struct net_device_ops enc28j60_netdev_ops = { + .ndo_open = enc28j60_net_open, + .ndo_stop = enc28j60_net_close, + .ndo_start_xmit = enc28j60_send_packet, + .ndo_set_rx_mode = enc28j60_set_multicast_list, + .ndo_set_mac_address = enc28j60_set_mac_address, + .ndo_tx_timeout = enc28j60_tx_timeout, + .ndo_validate_addr = eth_validate_addr, +}; + +static int enc28j60_probe(struct spi_device *spi) +{ + struct net_device *dev; + struct enc28j60_net *priv; + int ret = 0; + + if (netif_msg_drv(&debug)) + dev_info(&spi->dev, "Ethernet driver %s loaded\n", DRV_VERSION); + + dev = alloc_etherdev(sizeof(struct enc28j60_net)); + if (!dev) { + ret = -ENOMEM; + goto error_alloc; + } + priv = netdev_priv(dev); + + priv->netdev = dev; /* priv to netdev reference */ + priv->spi = spi; /* priv to spi reference */ + priv->msg_enable = netif_msg_init(debug.msg_enable, ENC28J60_MSG_DEFAULT); + mutex_init(&priv->lock); + INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler); + INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler); + INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler); + INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler); + spi_set_drvdata(spi, priv); /* spi to priv reference */ + SET_NETDEV_DEV(dev, &spi->dev); + + if (!enc28j60_chipset_init(dev)) { + if (netif_msg_probe(priv)) + dev_info(&spi->dev, "chip not found\n"); + ret = -EIO; + goto error_irq; + } + + if (device_get_ethdev_address(&spi->dev, dev)) + eth_hw_addr_random(dev); + enc28j60_set_hw_macaddr(dev); + + /* Board setup must set the relevant edge trigger type; + * level triggers won't currently work. + */ + ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv); + if (ret < 0) { + if (netif_msg_probe(priv)) + dev_err(&spi->dev, "request irq %d failed (ret = %d)\n", + spi->irq, ret); + goto error_irq; + } + + dev->if_port = IF_PORT_10BASET; + dev->irq = spi->irq; + dev->netdev_ops = &enc28j60_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + dev->ethtool_ops = &enc28j60_ethtool_ops; + + enc28j60_lowpower(priv, true); + + ret = register_netdev(dev); + if (ret) { + if (netif_msg_probe(priv)) + dev_err(&spi->dev, "register netdev failed (ret = %d)\n", + ret); + goto error_register; + } + + return 0; + +error_register: + free_irq(spi->irq, priv); +error_irq: + free_netdev(dev); +error_alloc: + return ret; +} + +static void enc28j60_remove(struct spi_device *spi) +{ + struct enc28j60_net *priv = spi_get_drvdata(spi); + + unregister_netdev(priv->netdev); + free_irq(spi->irq, priv); + free_netdev(priv->netdev); +} + +static const struct of_device_id enc28j60_dt_ids[] = { + { .compatible = "microchip,enc28j60" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, enc28j60_dt_ids); + +static struct spi_driver enc28j60_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = enc28j60_dt_ids, + }, + .probe = enc28j60_probe, + .remove = enc28j60_remove, +}; +module_spi_driver(enc28j60_driver); + +MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); +MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>"); +MODULE_LICENSE("GPL"); +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level in amount of bits set (0=none, ..., 31=all)"); +MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/net/ethernet/microchip/enc28j60_hw.h b/drivers/net/ethernet/microchip/enc28j60_hw.h new file mode 100644 index 000000000..da4ab1725 --- /dev/null +++ b/drivers/net/ethernet/microchip/enc28j60_hw.h @@ -0,0 +1,310 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * enc28j60_hw.h: EDTP FrameThrower style enc28j60 registers + * + * $Id: enc28j60_hw.h,v 1.9 2007/12/14 11:59:16 claudio Exp $ + */ + +#ifndef _ENC28J60_HW_H +#define _ENC28J60_HW_H + +/* + * ENC28J60 Control Registers + * Control register definitions are a combination of address, + * bank number, and Ethernet/MAC/PHY indicator bits. + * - Register address (bits 0-4) + * - Bank number (bits 5-6) + * - MAC/MII indicator (bit 7) + */ +#define ADDR_MASK 0x1F +#define BANK_MASK 0x60 +#define SPRD_MASK 0x80 +/* All-bank registers */ +#define EIE 0x1B +#define EIR 0x1C +#define ESTAT 0x1D +#define ECON2 0x1E +#define ECON1 0x1F +/* Bank 0 registers */ +#define ERDPTL (0x00|0x00) +#define ERDPTH (0x01|0x00) +#define EWRPTL (0x02|0x00) +#define EWRPTH (0x03|0x00) +#define ETXSTL (0x04|0x00) +#define ETXSTH (0x05|0x00) +#define ETXNDL (0x06|0x00) +#define ETXNDH (0x07|0x00) +#define ERXSTL (0x08|0x00) +#define ERXSTH (0x09|0x00) +#define ERXNDL (0x0A|0x00) +#define ERXNDH (0x0B|0x00) +#define ERXRDPTL (0x0C|0x00) +#define ERXRDPTH (0x0D|0x00) +#define ERXWRPTL (0x0E|0x00) +#define ERXWRPTH (0x0F|0x00) +#define EDMASTL (0x10|0x00) +#define EDMASTH (0x11|0x00) +#define EDMANDL (0x12|0x00) +#define EDMANDH (0x13|0x00) +#define EDMADSTL (0x14|0x00) +#define EDMADSTH (0x15|0x00) +#define EDMACSL (0x16|0x00) +#define EDMACSH (0x17|0x00) +/* Bank 1 registers */ +#define EHT0 (0x00|0x20) +#define EHT1 (0x01|0x20) +#define EHT2 (0x02|0x20) +#define EHT3 (0x03|0x20) +#define EHT4 (0x04|0x20) +#define EHT5 (0x05|0x20) +#define EHT6 (0x06|0x20) +#define EHT7 (0x07|0x20) +#define EPMM0 (0x08|0x20) +#define EPMM1 (0x09|0x20) +#define EPMM2 (0x0A|0x20) +#define EPMM3 (0x0B|0x20) +#define EPMM4 (0x0C|0x20) +#define EPMM5 (0x0D|0x20) +#define EPMM6 (0x0E|0x20) +#define EPMM7 (0x0F|0x20) +#define EPMCSL (0x10|0x20) +#define EPMCSH (0x11|0x20) +#define EPMOL (0x14|0x20) +#define EPMOH (0x15|0x20) +#define EWOLIE (0x16|0x20) +#define EWOLIR (0x17|0x20) +#define ERXFCON (0x18|0x20) +#define EPKTCNT (0x19|0x20) +/* Bank 2 registers */ +#define MACON1 (0x00|0x40|SPRD_MASK) +/* #define MACON2 (0x01|0x40|SPRD_MASK) */ +#define MACON3 (0x02|0x40|SPRD_MASK) +#define MACON4 (0x03|0x40|SPRD_MASK) +#define MABBIPG (0x04|0x40|SPRD_MASK) +#define MAIPGL (0x06|0x40|SPRD_MASK) +#define MAIPGH (0x07|0x40|SPRD_MASK) +#define MACLCON1 (0x08|0x40|SPRD_MASK) +#define MACLCON2 (0x09|0x40|SPRD_MASK) +#define MAMXFLL (0x0A|0x40|SPRD_MASK) +#define MAMXFLH (0x0B|0x40|SPRD_MASK) +#define MAPHSUP (0x0D|0x40|SPRD_MASK) +#define MICON (0x11|0x40|SPRD_MASK) +#define MICMD (0x12|0x40|SPRD_MASK) +#define MIREGADR (0x14|0x40|SPRD_MASK) +#define MIWRL (0x16|0x40|SPRD_MASK) +#define MIWRH (0x17|0x40|SPRD_MASK) +#define MIRDL (0x18|0x40|SPRD_MASK) +#define MIRDH (0x19|0x40|SPRD_MASK) +/* Bank 3 registers */ +#define MAADR1 (0x00|0x60|SPRD_MASK) +#define MAADR0 (0x01|0x60|SPRD_MASK) +#define MAADR3 (0x02|0x60|SPRD_MASK) +#define MAADR2 (0x03|0x60|SPRD_MASK) +#define MAADR5 (0x04|0x60|SPRD_MASK) +#define MAADR4 (0x05|0x60|SPRD_MASK) +#define EBSTSD (0x06|0x60) +#define EBSTCON (0x07|0x60) +#define EBSTCSL (0x08|0x60) +#define EBSTCSH (0x09|0x60) +#define MISTAT (0x0A|0x60|SPRD_MASK) +#define EREVID (0x12|0x60) +#define ECOCON (0x15|0x60) +#define EFLOCON (0x17|0x60) +#define EPAUSL (0x18|0x60) +#define EPAUSH (0x19|0x60) +/* PHY registers */ +#define PHCON1 0x00 +#define PHSTAT1 0x01 +#define PHHID1 0x02 +#define PHHID2 0x03 +#define PHCON2 0x10 +#define PHSTAT2 0x11 +#define PHIE 0x12 +#define PHIR 0x13 +#define PHLCON 0x14 + +/* ENC28J60 EIE Register Bit Definitions */ +#define EIE_INTIE 0x80 +#define EIE_PKTIE 0x40 +#define EIE_DMAIE 0x20 +#define EIE_LINKIE 0x10 +#define EIE_TXIE 0x08 +/* #define EIE_WOLIE 0x04 (reserved) */ +#define EIE_TXERIE 0x02 +#define EIE_RXERIE 0x01 +/* ENC28J60 EIR Register Bit Definitions */ +#define EIR_PKTIF 0x40 +#define EIR_DMAIF 0x20 +#define EIR_LINKIF 0x10 +#define EIR_TXIF 0x08 +/* #define EIR_WOLIF 0x04 (reserved) */ +#define EIR_TXERIF 0x02 +#define EIR_RXERIF 0x01 +/* ENC28J60 ESTAT Register Bit Definitions */ +#define ESTAT_INT 0x80 +#define ESTAT_LATECOL 0x10 +#define ESTAT_RXBUSY 0x04 +#define ESTAT_TXABRT 0x02 +#define ESTAT_CLKRDY 0x01 +/* ENC28J60 ECON2 Register Bit Definitions */ +#define ECON2_AUTOINC 0x80 +#define ECON2_PKTDEC 0x40 +#define ECON2_PWRSV 0x20 +#define ECON2_VRPS 0x08 +/* ENC28J60 ECON1 Register Bit Definitions */ +#define ECON1_TXRST 0x80 +#define ECON1_RXRST 0x40 +#define ECON1_DMAST 0x20 +#define ECON1_CSUMEN 0x10 +#define ECON1_TXRTS 0x08 +#define ECON1_RXEN 0x04 +#define ECON1_BSEL1 0x02 +#define ECON1_BSEL0 0x01 +/* ENC28J60 MACON1 Register Bit Definitions */ +#define MACON1_LOOPBK 0x10 +#define MACON1_TXPAUS 0x08 +#define MACON1_RXPAUS 0x04 +#define MACON1_PASSALL 0x02 +#define MACON1_MARXEN 0x01 +/* ENC28J60 MACON2 Register Bit Definitions */ +#define MACON2_MARST 0x80 +#define MACON2_RNDRST 0x40 +#define MACON2_MARXRST 0x08 +#define MACON2_RFUNRST 0x04 +#define MACON2_MATXRST 0x02 +#define MACON2_TFUNRST 0x01 +/* ENC28J60 MACON3 Register Bit Definitions */ +#define MACON3_PADCFG2 0x80 +#define MACON3_PADCFG1 0x40 +#define MACON3_PADCFG0 0x20 +#define MACON3_TXCRCEN 0x10 +#define MACON3_PHDRLEN 0x08 +#define MACON3_HFRMLEN 0x04 +#define MACON3_FRMLNEN 0x02 +#define MACON3_FULDPX 0x01 +/* ENC28J60 MICMD Register Bit Definitions */ +#define MICMD_MIISCAN 0x02 +#define MICMD_MIIRD 0x01 +/* ENC28J60 MISTAT Register Bit Definitions */ +#define MISTAT_NVALID 0x04 +#define MISTAT_SCAN 0x02 +#define MISTAT_BUSY 0x01 +/* ENC28J60 ERXFCON Register Bit Definitions */ +#define ERXFCON_UCEN 0x80 +#define ERXFCON_ANDOR 0x40 +#define ERXFCON_CRCEN 0x20 +#define ERXFCON_PMEN 0x10 +#define ERXFCON_MPEN 0x08 +#define ERXFCON_HTEN 0x04 +#define ERXFCON_MCEN 0x02 +#define ERXFCON_BCEN 0x01 + +/* ENC28J60 PHY PHCON1 Register Bit Definitions */ +#define PHCON1_PRST 0x8000 +#define PHCON1_PLOOPBK 0x4000 +#define PHCON1_PPWRSV 0x0800 +#define PHCON1_PDPXMD 0x0100 +/* ENC28J60 PHY PHSTAT1 Register Bit Definitions */ +#define PHSTAT1_PFDPX 0x1000 +#define PHSTAT1_PHDPX 0x0800 +#define PHSTAT1_LLSTAT 0x0004 +#define PHSTAT1_JBSTAT 0x0002 +/* ENC28J60 PHY PHSTAT2 Register Bit Definitions */ +#define PHSTAT2_TXSTAT (1 << 13) +#define PHSTAT2_RXSTAT (1 << 12) +#define PHSTAT2_COLSTAT (1 << 11) +#define PHSTAT2_LSTAT (1 << 10) +#define PHSTAT2_DPXSTAT (1 << 9) +#define PHSTAT2_PLRITY (1 << 5) +/* ENC28J60 PHY PHCON2 Register Bit Definitions */ +#define PHCON2_FRCLINK 0x4000 +#define PHCON2_TXDIS 0x2000 +#define PHCON2_JABBER 0x0400 +#define PHCON2_HDLDIS 0x0100 +/* ENC28J60 PHY PHIE Register Bit Definitions */ +#define PHIE_PLNKIE (1 << 4) +#define PHIE_PGEIE (1 << 1) +/* ENC28J60 PHY PHIR Register Bit Definitions */ +#define PHIR_PLNKIF (1 << 4) +#define PHIR_PGEIF (1 << 1) + +/* ENC28J60 Packet Control Byte Bit Definitions */ +#define PKTCTRL_PHUGEEN 0x08 +#define PKTCTRL_PPADEN 0x04 +#define PKTCTRL_PCRCEN 0x02 +#define PKTCTRL_POVERRIDE 0x01 + +/* ENC28J60 Transmit Status Vector */ +#define TSV_TXBYTECNT 0 +#define TSV_TXCOLLISIONCNT 16 +#define TSV_TXCRCERROR 20 +#define TSV_TXLENCHKERROR 21 +#define TSV_TXLENOUTOFRANGE 22 +#define TSV_TXDONE 23 +#define TSV_TXMULTICAST 24 +#define TSV_TXBROADCAST 25 +#define TSV_TXPACKETDEFER 26 +#define TSV_TXEXDEFER 27 +#define TSV_TXEXCOLLISION 28 +#define TSV_TXLATECOLLISION 29 +#define TSV_TXGIANT 30 +#define TSV_TXUNDERRUN 31 +#define TSV_TOTBYTETXONWIRE 32 +#define TSV_TXCONTROLFRAME 48 +#define TSV_TXPAUSEFRAME 49 +#define TSV_BACKPRESSUREAPP 50 +#define TSV_TXVLANTAGFRAME 51 + +#define TSV_SIZE 7 +#define TSV_BYTEOF(x) ((x) / 8) +#define TSV_BITMASK(x) (1 << ((x) % 8)) +#define TSV_GETBIT(x, y) (((x)[TSV_BYTEOF(y)] & TSV_BITMASK(y)) ? 1 : 0) + +/* ENC28J60 Receive Status Vector */ +#define RSV_RXLONGEVDROPEV 16 +#define RSV_CARRIEREV 18 +#define RSV_CRCERROR 20 +#define RSV_LENCHECKERR 21 +#define RSV_LENOUTOFRANGE 22 +#define RSV_RXOK 23 +#define RSV_RXMULTICAST 24 +#define RSV_RXBROADCAST 25 +#define RSV_DRIBBLENIBBLE 26 +#define RSV_RXCONTROLFRAME 27 +#define RSV_RXPAUSEFRAME 28 +#define RSV_RXUNKNOWNOPCODE 29 +#define RSV_RXTYPEVLAN 30 + +#define RSV_SIZE 6 +#define RSV_BITMASK(x) (1 << ((x) - 16)) +#define RSV_GETBIT(x, y) (((x) & RSV_BITMASK(y)) ? 1 : 0) + + +/* SPI operation codes */ +#define ENC28J60_READ_CTRL_REG 0x00 +#define ENC28J60_READ_BUF_MEM 0x3A +#define ENC28J60_WRITE_CTRL_REG 0x40 +#define ENC28J60_WRITE_BUF_MEM 0x7A +#define ENC28J60_BIT_FIELD_SET 0x80 +#define ENC28J60_BIT_FIELD_CLR 0xA0 +#define ENC28J60_SOFT_RESET 0xFF + + +/* buffer boundaries applied to internal 8K ram + * entire available packet buffer space is allocated. + * Give TX buffer space for one full ethernet frame (~1500 bytes) + * receive buffer gets the rest */ +#define TXSTART_INIT 0x1A00 +#define TXEND_INIT 0x1FFF + +/* Put RX buffer at 0 as suggested by the Errata datasheet */ +#define RXSTART_INIT 0x0000 +#define RXEND_INIT 0x19FF + +/* maximum ethernet frame length */ +#define MAX_FRAMELEN 1518 + +/* Preferred half duplex: LEDA: Link status LEDB: Rx/Tx activity */ +#define ENC28J60_LAMPS_MODE 0x3476 + +#endif diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c new file mode 100644 index 000000000..5693784ee --- /dev/null +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -0,0 +1,516 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Register map access API - ENCX24J600 support + * + * Copyright 2015 Gridpoint + * + * Author: Jon Ringle <jringle@gridpoint.com> + */ + +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/regmap.h> +#include <linux/spi/spi.h> + +#include "encx24j600_hw.h" + +static int encx24j600_switch_bank(struct encx24j600_context *ctx, + int bank) +{ + int ret = 0; + int bank_opcode = BANK_SELECT(bank); + + ret = spi_write(ctx->spi, &bank_opcode, 1); + if (ret == 0) + ctx->bank = bank; + + return ret; +} + +static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode, + const void *buf, size_t len) +{ + struct spi_message m; + struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, }, + { .tx_buf = buf, .len = len }, }; + spi_message_init(&m); + spi_message_add_tail(&t[0], &m); + spi_message_add_tail(&t[1], &m); + + return spi_sync(ctx->spi, &m); +} + +static void regmap_lock_mutex(void *context) +{ + struct encx24j600_context *ctx = context; + + mutex_lock(&ctx->mutex); +} + +static void regmap_unlock_mutex(void *context) +{ + struct encx24j600_context *ctx = context; + + mutex_unlock(&ctx->mutex); +} + +static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val, + size_t len) +{ + struct encx24j600_context *ctx = context; + u8 banked_reg = reg & ADDR_MASK; + u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT); + u8 cmd = RCRU; + int ret = 0; + int i = 0; + u8 tx_buf[2]; + + if (reg < 0x80) { + cmd = RCRCODE | banked_reg; + if ((banked_reg < 0x16) && (ctx->bank != bank)) + ret = encx24j600_switch_bank(ctx, bank); + if (unlikely(ret)) + return ret; + } else { + /* Translate registers that are more effecient using + * 3-byte SPI commands + */ + switch (reg) { + case EGPRDPT: + cmd = RGPRDPT; break; + case EGPWRPT: + cmd = RGPWRPT; break; + case ERXRDPT: + cmd = RRXRDPT; break; + case ERXWRPT: + cmd = RRXWRPT; break; + case EUDARDPT: + cmd = RUDARDPT; break; + case EUDAWRPT: + cmd = RUDAWRPT; break; + case EGPDATA: + case ERXDATA: + case EUDADATA: + default: + return -EINVAL; + } + } + + tx_buf[i++] = cmd; + if (cmd == RCRU) + tx_buf[i++] = reg; + + ret = spi_write_then_read(ctx->spi, tx_buf, i, val, len); + + return ret; +} + +static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx, + u8 reg, u8 *val, size_t len, + u8 unbanked_cmd, u8 banked_code) +{ + u8 banked_reg = reg & ADDR_MASK; + u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT); + u8 cmd = unbanked_cmd; + struct spi_message m; + struct spi_transfer t[3] = { { .tx_buf = &cmd, .len = sizeof(cmd), }, + { .tx_buf = ®, .len = sizeof(reg), }, + { .tx_buf = val, .len = len }, }; + + if (reg < 0x80) { + int ret = 0; + + cmd = banked_code | banked_reg; + if ((banked_reg < 0x16) && (ctx->bank != bank)) + ret = encx24j600_switch_bank(ctx, bank); + if (unlikely(ret)) + return ret; + } else { + /* Translate registers that are more effecient using + * 3-byte SPI commands + */ + switch (reg) { + case EGPRDPT: + cmd = WGPRDPT; break; + case EGPWRPT: + cmd = WGPWRPT; break; + case ERXRDPT: + cmd = WRXRDPT; break; + case ERXWRPT: + cmd = WRXWRPT; break; + case EUDARDPT: + cmd = WUDARDPT; break; + case EUDAWRPT: + cmd = WUDAWRPT; break; + case EGPDATA: + case ERXDATA: + case EUDADATA: + default: + return -EINVAL; + } + } + + spi_message_init(&m); + spi_message_add_tail(&t[0], &m); + + if (cmd == unbanked_cmd) { + t[1].tx_buf = ® + spi_message_add_tail(&t[1], &m); + } + + spi_message_add_tail(&t[2], &m); + return spi_sync(ctx->spi, &m); +} + +static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val, + size_t len) +{ + struct encx24j600_context *ctx = context; + + return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE); +} + +static int regmap_encx24j600_sfr_set_bits(struct encx24j600_context *ctx, + u8 reg, u8 val) +{ + return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFSU, BFSCODE); +} + +static int regmap_encx24j600_sfr_clr_bits(struct encx24j600_context *ctx, + u8 reg, u8 val) +{ + return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFCU, BFCCODE); +} + +static int regmap_encx24j600_reg_update_bits(void *context, unsigned int reg, + unsigned int mask, + unsigned int val) +{ + struct encx24j600_context *ctx = context; + + int ret = 0; + unsigned int set_mask = mask & val; + unsigned int clr_mask = mask & ~val; + + if ((reg >= 0x40 && reg < 0x6c) || reg >= 0x80) + return -EINVAL; + + if (set_mask & 0xff) + ret = regmap_encx24j600_sfr_set_bits(ctx, reg, set_mask); + + set_mask = (set_mask & 0xff00) >> 8; + + if ((set_mask & 0xff) && (ret == 0)) + ret = regmap_encx24j600_sfr_set_bits(ctx, reg + 1, set_mask); + + if ((clr_mask & 0xff) && (ret == 0)) + ret = regmap_encx24j600_sfr_clr_bits(ctx, reg, clr_mask); + + clr_mask = (clr_mask & 0xff00) >> 8; + + if ((clr_mask & 0xff) && (ret == 0)) + ret = regmap_encx24j600_sfr_clr_bits(ctx, reg + 1, clr_mask); + + return ret; +} + +int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data, + size_t count) +{ + struct encx24j600_context *ctx = context; + + if (reg < 0xc0) + return encx24j600_cmdn(ctx, reg, data, count); + + /* SPI 1-byte command. Ignore data */ + return spi_write(ctx->spi, ®, 1); +} +EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write); + +int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count) +{ + struct encx24j600_context *ctx = context; + + if (reg == RBSEL && count > 1) + count = 1; + + return spi_write_then_read(ctx->spi, ®, sizeof(reg), data, count); +} +EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_read); + +static int regmap_encx24j600_write(void *context, const void *data, + size_t len) +{ + u8 *dout = (u8 *)data; + u8 reg = dout[0]; + ++dout; + --len; + + if (reg > 0xa0) + return regmap_encx24j600_spi_write(context, reg, dout, len); + + if (len > 2) + return -EINVAL; + + return regmap_encx24j600_sfr_write(context, reg, dout, len); +} + +static int regmap_encx24j600_read(void *context, + const void *reg_buf, size_t reg_size, + void *val, size_t val_size) +{ + u8 reg = *(const u8 *)reg_buf; + + if (reg_size != 1) { + pr_err("%s: reg=%02x reg_size=%zu\n", __func__, reg, reg_size); + return -EINVAL; + } + + if (reg > 0xa0) + return regmap_encx24j600_spi_read(context, reg, val, val_size); + + if (val_size > 2) { + pr_err("%s: reg=%02x val_size=%zu\n", __func__, reg, val_size); + return -EINVAL; + } + + return regmap_encx24j600_sfr_read(context, reg, val, val_size); +} + +static bool encx24j600_regmap_readable(struct device *dev, unsigned int reg) +{ + if ((reg < 0x36) || + ((reg >= 0x40) && (reg < 0x4c)) || + ((reg >= 0x52) && (reg < 0x56)) || + ((reg >= 0x60) && (reg < 0x66)) || + ((reg >= 0x68) && (reg < 0x80)) || + ((reg >= 0x86) && (reg < 0x92)) || + (reg == 0xc8)) + return true; + else + return false; +} + +static bool encx24j600_regmap_writeable(struct device *dev, unsigned int reg) +{ + if ((reg < 0x12) || + ((reg >= 0x14) && (reg < 0x1a)) || + ((reg >= 0x1c) && (reg < 0x36)) || + ((reg >= 0x40) && (reg < 0x4c)) || + ((reg >= 0x52) && (reg < 0x56)) || + ((reg >= 0x60) && (reg < 0x68)) || + ((reg >= 0x6c) && (reg < 0x80)) || + ((reg >= 0x86) && (reg < 0x92)) || + ((reg >= 0xc0) && (reg < 0xc8)) || + ((reg >= 0xca) && (reg < 0xf0))) + return true; + else + return false; +} + +static bool encx24j600_regmap_volatile(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ERXHEAD: + case EDMACS: + case ETXSTAT: + case ETXWIRE: + case ECON1: /* Can be modified via single byte cmds */ + case ECON2: /* Can be modified via single byte cmds */ + case ESTAT: + case EIR: /* Can be modified via single byte cmds */ + case MIRD: + case MISTAT: + return true; + default: + break; + } + + return false; +} + +static bool encx24j600_regmap_precious(struct device *dev, unsigned int reg) +{ + /* single byte cmds are precious */ + if (((reg >= 0xc0) && (reg < 0xc8)) || + ((reg >= 0xca) && (reg < 0xf0))) + return true; + else + return false; +} + +static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct encx24j600_context *ctx = context; + int ret; + unsigned int mistat; + + reg = MIREGADR_VAL | (reg & PHREG_MASK); + ret = regmap_write(ctx->regmap, MIREGADR, reg); + if (unlikely(ret)) + goto err_out; + + ret = regmap_write(ctx->regmap, MICMD, MIIRD); + if (unlikely(ret)) + goto err_out; + + usleep_range(26, 100); + while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) && + (mistat & BUSY)) + cpu_relax(); + + if (unlikely(ret)) + goto err_out; + + ret = regmap_write(ctx->regmap, MICMD, 0); + if (unlikely(ret)) + goto err_out; + + ret = regmap_read(ctx->regmap, MIRD, val); + +err_out: + if (ret) + pr_err("%s: error %d reading reg %02x\n", __func__, ret, + reg & PHREG_MASK); + + return ret; +} + +static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct encx24j600_context *ctx = context; + int ret; + unsigned int mistat; + + reg = MIREGADR_VAL | (reg & PHREG_MASK); + ret = regmap_write(ctx->regmap, MIREGADR, reg); + if (unlikely(ret)) + goto err_out; + + ret = regmap_write(ctx->regmap, MIWR, val); + if (unlikely(ret)) + goto err_out; + + usleep_range(26, 100); + while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) && + (mistat & BUSY)) + cpu_relax(); + +err_out: + if (ret) + pr_err("%s: error %d writing reg %02x=%04x\n", __func__, ret, + reg & PHREG_MASK, val); + + return ret; +} + +static bool encx24j600_phymap_readable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case PHCON1: + case PHSTAT1: + case PHANA: + case PHANLPA: + case PHANE: + case PHCON2: + case PHSTAT2: + case PHSTAT3: + return true; + default: + return false; + } +} + +static bool encx24j600_phymap_writeable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case PHCON1: + case PHCON2: + case PHANA: + return true; + case PHSTAT1: + case PHSTAT2: + case PHSTAT3: + case PHANLPA: + case PHANE: + default: + return false; + } +} + +static bool encx24j600_phymap_volatile(struct device *dev, unsigned int reg) +{ + switch (reg) { + case PHSTAT1: + case PHSTAT2: + case PHSTAT3: + case PHANLPA: + case PHANE: + case PHCON2: + return true; + default: + return false; + } +} + +static struct regmap_config regcfg = { + .name = "reg", + .reg_bits = 8, + .val_bits = 16, + .max_register = 0xee, + .reg_stride = 2, + .cache_type = REGCACHE_RBTREE, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + .readable_reg = encx24j600_regmap_readable, + .writeable_reg = encx24j600_regmap_writeable, + .volatile_reg = encx24j600_regmap_volatile, + .precious_reg = encx24j600_regmap_precious, + .lock = regmap_lock_mutex, + .unlock = regmap_unlock_mutex, +}; + +static struct regmap_bus regmap_encx24j600 = { + .write = regmap_encx24j600_write, + .read = regmap_encx24j600_read, + .reg_update_bits = regmap_encx24j600_reg_update_bits, +}; + +static struct regmap_config phycfg = { + .name = "phy", + .reg_bits = 8, + .val_bits = 16, + .max_register = 0x1f, + .cache_type = REGCACHE_RBTREE, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + .readable_reg = encx24j600_phymap_readable, + .writeable_reg = encx24j600_phymap_writeable, + .volatile_reg = encx24j600_phymap_volatile, +}; + +static struct regmap_bus phymap_encx24j600 = { + .reg_write = regmap_encx24j600_phy_reg_write, + .reg_read = regmap_encx24j600_phy_reg_read, +}; + +int devm_regmap_init_encx24j600(struct device *dev, + struct encx24j600_context *ctx) +{ + mutex_init(&ctx->mutex); + regcfg.lock_arg = ctx; + ctx->regmap = devm_regmap_init(dev, ®map_encx24j600, ctx, ®cfg); + if (IS_ERR(ctx->regmap)) + return PTR_ERR(ctx->regmap); + ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg); + if (IS_ERR(ctx->phymap)) + return PTR_ERR(ctx->phymap); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c new file mode 100644 index 000000000..d7c8aa77e --- /dev/null +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -0,0 +1,1127 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Microchip ENCX24J600 ethernet driver + * + * Copyright (C) 2015 Gridpoint + * Author: Jon Ringle <jringle@gridpoint.com> + */ + +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/regmap.h> +#include <linux/skbuff.h> +#include <linux/spi/spi.h> + +#include "encx24j600_hw.h" + +#define DRV_NAME "encx24j600" +#define DRV_VERSION "1.0" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0000); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/* SRAM memory layout: + * + * 0x0000-0x05ff TX buffers 1.5KB (1*1536) reside in the GP area in SRAM + * 0x0600-0x5fff RX buffers 22.5KB (15*1536) reside in the RX area in SRAM + */ +#define ENC_TX_BUF_START 0x0000U +#define ENC_RX_BUF_START 0x0600U +#define ENC_RX_BUF_END 0x5fffU +#define ENC_SRAM_SIZE 0x6000U + +enum { + RXFILTER_NORMAL, + RXFILTER_MULTI, + RXFILTER_PROMISC +}; + +struct encx24j600_priv { + struct net_device *ndev; + struct mutex lock; /* device access lock */ + struct encx24j600_context ctx; + struct sk_buff *tx_skb; + struct task_struct *kworker_task; + struct kthread_worker kworker; + struct kthread_work tx_work; + struct kthread_work setrx_work; + u16 next_packet; + bool hw_enabled; + bool full_duplex; + bool autoneg; + u16 speed; + int rxfilter; + u32 msg_enable; +}; + +static void dump_packet(const char *msg, int len, const char *data) +{ + pr_debug(DRV_NAME ": %s - packet len:%d\n", msg, len); + print_hex_dump_bytes("pk data: ", DUMP_PREFIX_OFFSET, data, len); +} + +static void encx24j600_dump_rsv(struct encx24j600_priv *priv, const char *msg, + struct rsv *rsv) +{ + struct net_device *dev = priv->ndev; + + netdev_info(dev, "RX packet Len:%d\n", rsv->len); + netdev_dbg(dev, "%s - NextPk: 0x%04x\n", msg, + rsv->next_packet); + netdev_dbg(dev, "RxOK: %d, DribbleNibble: %d\n", + RSV_GETBIT(rsv->rxstat, RSV_RXOK), + RSV_GETBIT(rsv->rxstat, RSV_DRIBBLENIBBLE)); + netdev_dbg(dev, "CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n", + RSV_GETBIT(rsv->rxstat, RSV_CRCERROR), + RSV_GETBIT(rsv->rxstat, RSV_LENCHECKERR), + RSV_GETBIT(rsv->rxstat, RSV_LENOUTOFRANGE)); + netdev_dbg(dev, "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n", + RSV_GETBIT(rsv->rxstat, RSV_RXMULTICAST), + RSV_GETBIT(rsv->rxstat, RSV_RXBROADCAST), + RSV_GETBIT(rsv->rxstat, RSV_RXLONGEVDROPEV), + RSV_GETBIT(rsv->rxstat, RSV_CARRIEREV)); + netdev_dbg(dev, "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n", + RSV_GETBIT(rsv->rxstat, RSV_RXCONTROLFRAME), + RSV_GETBIT(rsv->rxstat, RSV_RXPAUSEFRAME), + RSV_GETBIT(rsv->rxstat, RSV_RXUNKNOWNOPCODE), + RSV_GETBIT(rsv->rxstat, RSV_RXTYPEVLAN)); +} + +static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg) +{ + struct net_device *dev = priv->ndev; + unsigned int val = 0; + int ret = regmap_read(priv->ctx.regmap, reg, &val); + + if (unlikely(ret)) + netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n", + __func__, ret, reg); + return val; +} + +static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val) +{ + struct net_device *dev = priv->ndev; + int ret = regmap_write(priv->ctx.regmap, reg, val); + + if (unlikely(ret)) + netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n", + __func__, ret, reg, val); +} + +static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg, + u16 mask, u16 val) +{ + struct net_device *dev = priv->ndev; + int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val); + + if (unlikely(ret)) + netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n", + __func__, ret, reg, val, mask); +} + +static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg) +{ + struct net_device *dev = priv->ndev; + unsigned int val = 0; + int ret = regmap_read(priv->ctx.phymap, reg, &val); + + if (unlikely(ret)) + netif_err(priv, drv, dev, "%s: error %d reading %02x\n", + __func__, ret, reg); + return val; +} + +static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val) +{ + struct net_device *dev = priv->ndev; + int ret = regmap_write(priv->ctx.phymap, reg, val); + + if (unlikely(ret)) + netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n", + __func__, ret, reg, val); +} + +static void encx24j600_clr_bits(struct encx24j600_priv *priv, u8 reg, u16 mask) +{ + encx24j600_update_reg(priv, reg, mask, 0); +} + +static void encx24j600_set_bits(struct encx24j600_priv *priv, u8 reg, u16 mask) +{ + encx24j600_update_reg(priv, reg, mask, mask); +} + +static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd) +{ + struct net_device *dev = priv->ndev; + int ret = regmap_write(priv->ctx.regmap, cmd, 0); + + if (unlikely(ret)) + netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n", + __func__, ret, cmd); +} + +static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data, + size_t count) +{ + int ret; + + mutex_lock(&priv->ctx.mutex); + ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count); + mutex_unlock(&priv->ctx.mutex); + + return ret; +} + +static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg, + const u8 *data, size_t count) +{ + int ret; + + mutex_lock(&priv->ctx.mutex); + ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count); + mutex_unlock(&priv->ctx.mutex); + + return ret; +} + +static void encx24j600_update_phcon1(struct encx24j600_priv *priv) +{ + u16 phcon1 = encx24j600_read_phy(priv, PHCON1); + + if (priv->autoneg == AUTONEG_ENABLE) { + phcon1 |= ANEN | RENEG; + } else { + phcon1 &= ~ANEN; + if (priv->speed == SPEED_100) + phcon1 |= SPD100; + else + phcon1 &= ~SPD100; + + if (priv->full_duplex) + phcon1 |= PFULDPX; + else + phcon1 &= ~PFULDPX; + } + encx24j600_write_phy(priv, PHCON1, phcon1); +} + +/* Waits for autonegotiation to complete. */ +static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv) +{ + struct net_device *dev = priv->ndev; + unsigned long timeout = jiffies + msecs_to_jiffies(2000); + u16 phstat1; + u16 estat; + + phstat1 = encx24j600_read_phy(priv, PHSTAT1); + while ((phstat1 & ANDONE) == 0) { + if (time_after(jiffies, timeout)) { + u16 phstat3; + + netif_notice(priv, drv, dev, "timeout waiting for autoneg done\n"); + + priv->autoneg = AUTONEG_DISABLE; + phstat3 = encx24j600_read_phy(priv, PHSTAT3); + priv->speed = (phstat3 & PHY3SPD100) + ? SPEED_100 : SPEED_10; + priv->full_duplex = (phstat3 & PHY3DPX) ? 1 : 0; + encx24j600_update_phcon1(priv); + netif_notice(priv, drv, dev, "Using parallel detection: %s/%s", + priv->speed == SPEED_100 ? "100" : "10", + priv->full_duplex ? "Full" : "Half"); + + return -ETIMEDOUT; + } + cpu_relax(); + phstat1 = encx24j600_read_phy(priv, PHSTAT1); + } + + estat = encx24j600_read_reg(priv, ESTAT); + if (estat & PHYDPX) { + encx24j600_set_bits(priv, MACON2, FULDPX); + encx24j600_write_reg(priv, MABBIPG, 0x15); + } else { + encx24j600_clr_bits(priv, MACON2, FULDPX); + encx24j600_write_reg(priv, MABBIPG, 0x12); + /* Max retransmittions attempt */ + encx24j600_write_reg(priv, MACLCON, 0x370f); + } + + return 0; +} + +/* Access the PHY to determine link status */ +static void encx24j600_check_link_status(struct encx24j600_priv *priv) +{ + struct net_device *dev = priv->ndev; + u16 estat; + + estat = encx24j600_read_reg(priv, ESTAT); + + if (estat & PHYLNK) { + if (priv->autoneg == AUTONEG_ENABLE) + encx24j600_wait_for_autoneg(priv); + + netif_carrier_on(dev); + netif_info(priv, ifup, dev, "link up\n"); + } else { + netif_info(priv, ifdown, dev, "link down\n"); + + /* Re-enable autoneg since we won't know what we might be + * connected to when the link is brought back up again. + */ + priv->autoneg = AUTONEG_ENABLE; + priv->full_duplex = true; + priv->speed = SPEED_100; + netif_carrier_off(dev); + } +} + +static void encx24j600_int_link_handler(struct encx24j600_priv *priv) +{ + struct net_device *dev = priv->ndev; + + netif_dbg(priv, intr, dev, "%s", __func__); + encx24j600_check_link_status(priv); + encx24j600_clr_bits(priv, EIR, LINKIF); +} + +static void encx24j600_tx_complete(struct encx24j600_priv *priv, bool err) +{ + struct net_device *dev = priv->ndev; + + if (!priv->tx_skb) { + BUG(); + return; + } + + mutex_lock(&priv->lock); + + if (err) + dev->stats.tx_errors++; + else + dev->stats.tx_packets++; + + dev->stats.tx_bytes += priv->tx_skb->len; + + encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF); + + netif_dbg(priv, tx_done, dev, "TX Done%s\n", err ? ": Err" : ""); + + dev_kfree_skb(priv->tx_skb); + priv->tx_skb = NULL; + + netif_wake_queue(dev); + + mutex_unlock(&priv->lock); +} + +static int encx24j600_receive_packet(struct encx24j600_priv *priv, + struct rsv *rsv) +{ + struct net_device *dev = priv->ndev; + struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN); + + if (!skb) { + pr_err_ratelimited("RX: OOM: packet dropped\n"); + dev->stats.rx_dropped++; + return -ENOMEM; + } + skb_reserve(skb, NET_IP_ALIGN); + encx24j600_raw_read(priv, RRXDATA, skb_put(skb, rsv->len), rsv->len); + + if (netif_msg_pktdata(priv)) + dump_packet("RX", skb->len, skb->data); + + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); + skb->ip_summed = CHECKSUM_COMPLETE; + + /* Maintain stats */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += rsv->len; + + netif_rx(skb); + + return 0; +} + +static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count) +{ + struct net_device *dev = priv->ndev; + + while (packet_count--) { + struct rsv rsv; + u16 newrxtail; + + encx24j600_write_reg(priv, ERXRDPT, priv->next_packet); + encx24j600_raw_read(priv, RRXDATA, (u8 *)&rsv, sizeof(rsv)); + + if (netif_msg_rx_status(priv)) + encx24j600_dump_rsv(priv, __func__, &rsv); + + if (!RSV_GETBIT(rsv.rxstat, RSV_RXOK) || + (rsv.len > MAX_FRAMELEN)) { + netif_err(priv, rx_err, dev, "RX Error %04x\n", + rsv.rxstat); + dev->stats.rx_errors++; + + if (RSV_GETBIT(rsv.rxstat, RSV_CRCERROR)) + dev->stats.rx_crc_errors++; + if (RSV_GETBIT(rsv.rxstat, RSV_LENCHECKERR)) + dev->stats.rx_frame_errors++; + if (rsv.len > MAX_FRAMELEN) + dev->stats.rx_over_errors++; + } else { + encx24j600_receive_packet(priv, &rsv); + } + + priv->next_packet = rsv.next_packet; + + newrxtail = priv->next_packet - 2; + if (newrxtail == ENC_RX_BUF_START) + newrxtail = SRAM_SIZE - 2; + + encx24j600_cmd(priv, SETPKTDEC); + encx24j600_write_reg(priv, ERXTAIL, newrxtail); + } +} + +static irqreturn_t encx24j600_isr(int irq, void *dev_id) +{ + struct encx24j600_priv *priv = dev_id; + struct net_device *dev = priv->ndev; + int eir; + + /* Clear interrupts */ + encx24j600_cmd(priv, CLREIE); + + eir = encx24j600_read_reg(priv, EIR); + + if (eir & LINKIF) + encx24j600_int_link_handler(priv); + + if (eir & TXIF) + encx24j600_tx_complete(priv, false); + + if (eir & TXABTIF) + encx24j600_tx_complete(priv, true); + + if (eir & RXABTIF) { + if (eir & PCFULIF) { + /* Packet counter is full */ + netif_err(priv, rx_err, dev, "Packet counter full\n"); + } + dev->stats.rx_dropped++; + encx24j600_clr_bits(priv, EIR, RXABTIF); + } + + if (eir & PKTIF) { + u8 packet_count; + + mutex_lock(&priv->lock); + + packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff; + while (packet_count) { + encx24j600_rx_packets(priv, packet_count); + packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff; + } + + mutex_unlock(&priv->lock); + } + + /* Enable interrupts */ + encx24j600_cmd(priv, SETEIE); + + return IRQ_HANDLED; +} + +static int encx24j600_soft_reset(struct encx24j600_priv *priv) +{ + int ret = 0; + int timeout; + u16 eudast; + + /* Write and verify a test value to EUDAST */ + regcache_cache_bypass(priv->ctx.regmap, true); + timeout = 10; + do { + encx24j600_write_reg(priv, EUDAST, EUDAST_TEST_VAL); + eudast = encx24j600_read_reg(priv, EUDAST); + usleep_range(25, 100); + } while ((eudast != EUDAST_TEST_VAL) && --timeout); + regcache_cache_bypass(priv->ctx.regmap, false); + + if (timeout == 0) { + ret = -ETIMEDOUT; + goto err_out; + } + + /* Wait for CLKRDY to become set */ + timeout = 10; + while (!(encx24j600_read_reg(priv, ESTAT) & CLKRDY) && --timeout) + usleep_range(25, 100); + + if (timeout == 0) { + ret = -ETIMEDOUT; + goto err_out; + } + + /* Issue a System Reset command */ + encx24j600_cmd(priv, SETETHRST); + usleep_range(25, 100); + + /* Confirm that EUDAST has 0000h after system reset */ + if (encx24j600_read_reg(priv, EUDAST) != 0) { + ret = -EINVAL; + goto err_out; + } + + /* Wait for PHY register and status bits to become available */ + usleep_range(256, 1000); + +err_out: + return ret; +} + +static int encx24j600_hw_reset(struct encx24j600_priv *priv) +{ + int ret; + + mutex_lock(&priv->lock); + ret = encx24j600_soft_reset(priv); + mutex_unlock(&priv->lock); + + return ret; +} + +static void encx24j600_reset_hw_tx(struct encx24j600_priv *priv) +{ + encx24j600_set_bits(priv, ECON2, TXRST); + encx24j600_clr_bits(priv, ECON2, TXRST); +} + +static void encx24j600_hw_init_tx(struct encx24j600_priv *priv) +{ + /* Reset TX */ + encx24j600_reset_hw_tx(priv); + + /* Clear the TXIF flag if were previously set */ + encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF); + + /* Write the Tx Buffer pointer */ + encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START); +} + +static void encx24j600_hw_init_rx(struct encx24j600_priv *priv) +{ + encx24j600_cmd(priv, DISABLERX); + + /* Set up RX packet start address in the SRAM */ + encx24j600_write_reg(priv, ERXST, ENC_RX_BUF_START); + + /* Preload the RX Data pointer to the beginning of the RX area */ + encx24j600_write_reg(priv, ERXRDPT, ENC_RX_BUF_START); + + priv->next_packet = ENC_RX_BUF_START; + + /* Set up RX end address in the SRAM */ + encx24j600_write_reg(priv, ERXTAIL, ENC_SRAM_SIZE - 2); + + /* Reset the user data pointers */ + encx24j600_write_reg(priv, EUDAST, ENC_SRAM_SIZE); + encx24j600_write_reg(priv, EUDAND, ENC_SRAM_SIZE + 1); + + /* Set Max Frame length */ + encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN); +} + +static void encx24j600_dump_config(struct encx24j600_priv *priv, + const char *msg) +{ + pr_info(DRV_NAME ": %s\n", msg); + + /* CHIP configuration */ + pr_info(DRV_NAME " ECON1: %04X\n", encx24j600_read_reg(priv, ECON1)); + pr_info(DRV_NAME " ECON2: %04X\n", encx24j600_read_reg(priv, ECON2)); + pr_info(DRV_NAME " ERXFCON: %04X\n", encx24j600_read_reg(priv, + ERXFCON)); + pr_info(DRV_NAME " ESTAT: %04X\n", encx24j600_read_reg(priv, ESTAT)); + pr_info(DRV_NAME " EIR: %04X\n", encx24j600_read_reg(priv, EIR)); + pr_info(DRV_NAME " EIDLED: %04X\n", encx24j600_read_reg(priv, EIDLED)); + + /* MAC layer configuration */ + pr_info(DRV_NAME " MACON1: %04X\n", encx24j600_read_reg(priv, MACON1)); + pr_info(DRV_NAME " MACON2: %04X\n", encx24j600_read_reg(priv, MACON2)); + pr_info(DRV_NAME " MAIPG: %04X\n", encx24j600_read_reg(priv, MAIPG)); + pr_info(DRV_NAME " MACLCON: %04X\n", encx24j600_read_reg(priv, + MACLCON)); + pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv, + MABBIPG)); + + /* PHY configuation */ + pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1)); + pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2)); + pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA)); + pr_info(DRV_NAME " PHANLPA: %04X\n", encx24j600_read_phy(priv, + PHANLPA)); + pr_info(DRV_NAME " PHANE: %04X\n", encx24j600_read_phy(priv, PHANE)); + pr_info(DRV_NAME " PHSTAT1: %04X\n", encx24j600_read_phy(priv, + PHSTAT1)); + pr_info(DRV_NAME " PHSTAT2: %04X\n", encx24j600_read_phy(priv, + PHSTAT2)); + pr_info(DRV_NAME " PHSTAT3: %04X\n", encx24j600_read_phy(priv, + PHSTAT3)); +} + +static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv) +{ + switch (priv->rxfilter) { + case RXFILTER_PROMISC: + encx24j600_set_bits(priv, MACON1, PASSALL); + encx24j600_write_reg(priv, ERXFCON, UCEN | MCEN | NOTMEEN); + break; + case RXFILTER_MULTI: + encx24j600_clr_bits(priv, MACON1, PASSALL); + encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN | MCEN); + break; + case RXFILTER_NORMAL: + default: + encx24j600_clr_bits(priv, MACON1, PASSALL); + encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN); + break; + } +} + +static void encx24j600_hw_init(struct encx24j600_priv *priv) +{ + u16 macon2; + + priv->hw_enabled = false; + + /* PHY Leds: link status, + * LEDA: Link State + collision events + * LEDB: Link State + transmit/receive events + */ + encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00); + + /* Loopback disabled */ + encx24j600_write_reg(priv, MACON1, 0x9); + + /* interpacket gap value */ + encx24j600_write_reg(priv, MAIPG, 0x0c12); + + /* Write the auto negotiation pattern */ + encx24j600_write_phy(priv, PHANA, PHANA_DEFAULT); + + encx24j600_update_phcon1(priv); + encx24j600_check_link_status(priv); + + macon2 = MACON2_RSV1 | TXCRCEN | PADCFG0 | PADCFG2 | MACON2_DEFER; + if ((priv->autoneg == AUTONEG_DISABLE) && priv->full_duplex) + macon2 |= FULDPX; + + encx24j600_set_bits(priv, MACON2, macon2); + + priv->rxfilter = RXFILTER_NORMAL; + encx24j600_set_rxfilter_mode(priv); + + /* Program the Maximum frame length */ + encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN); + + /* Init Tx pointers */ + encx24j600_hw_init_tx(priv); + + /* Init Rx pointers */ + encx24j600_hw_init_rx(priv); + + if (netif_msg_hw(priv)) + encx24j600_dump_config(priv, "Hw is initialized"); +} + +static void encx24j600_hw_enable(struct encx24j600_priv *priv) +{ + /* Clear the interrupt flags in case was set */ + encx24j600_clr_bits(priv, EIR, (PCFULIF | RXABTIF | TXABTIF | TXIF | + PKTIF | LINKIF)); + + /* Enable the interrupts */ + encx24j600_write_reg(priv, EIE, (PCFULIE | RXABTIE | TXABTIE | TXIE | + PKTIE | LINKIE | INTIE)); + + /* Enable RX */ + encx24j600_cmd(priv, ENABLERX); + + priv->hw_enabled = true; +} + +static void encx24j600_hw_disable(struct encx24j600_priv *priv) +{ + /* Disable all interrupts */ + encx24j600_write_reg(priv, EIE, 0); + + /* Disable RX */ + encx24j600_cmd(priv, DISABLERX); + + priv->hw_enabled = false; +} + +static int encx24j600_setlink(struct net_device *dev, u8 autoneg, u16 speed, + u8 duplex) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + int ret = 0; + + if (!priv->hw_enabled) { + /* link is in low power mode now; duplex setting + * will take effect on next encx24j600_hw_init() + */ + if (speed == SPEED_10 || speed == SPEED_100) { + priv->autoneg = (autoneg == AUTONEG_ENABLE); + priv->full_duplex = (duplex == DUPLEX_FULL); + priv->speed = (speed == SPEED_100); + } else { + netif_warn(priv, link, dev, "unsupported link speed setting\n"); + /*speeds other than SPEED_10 and SPEED_100 */ + /*are not supported by chip */ + ret = -EOPNOTSUPP; + } + } else { + netif_warn(priv, link, dev, "Warning: hw must be disabled to set link mode\n"); + ret = -EBUSY; + } + return ret; +} + +static void encx24j600_hw_get_macaddr(struct encx24j600_priv *priv, + unsigned char *ethaddr) +{ + unsigned short val; + + val = encx24j600_read_reg(priv, MAADR1); + + ethaddr[0] = val & 0x00ff; + ethaddr[1] = (val & 0xff00) >> 8; + + val = encx24j600_read_reg(priv, MAADR2); + + ethaddr[2] = val & 0x00ffU; + ethaddr[3] = (val & 0xff00U) >> 8; + + val = encx24j600_read_reg(priv, MAADR3); + + ethaddr[4] = val & 0x00ffU; + ethaddr[5] = (val & 0xff00U) >> 8; +} + +/* Program the hardware MAC address from dev->dev_addr.*/ +static int encx24j600_set_hw_macaddr(struct net_device *dev) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + + if (priv->hw_enabled) { + netif_info(priv, drv, dev, "Hardware must be disabled to set Mac address\n"); + return -EBUSY; + } + + mutex_lock(&priv->lock); + + netif_info(priv, drv, dev, "%s: Setting MAC address to %pM\n", + dev->name, dev->dev_addr); + + encx24j600_write_reg(priv, MAADR3, (dev->dev_addr[4] | + dev->dev_addr[5] << 8)); + encx24j600_write_reg(priv, MAADR2, (dev->dev_addr[2] | + dev->dev_addr[3] << 8)); + encx24j600_write_reg(priv, MAADR1, (dev->dev_addr[0] | + dev->dev_addr[1] << 8)); + + mutex_unlock(&priv->lock); + + return 0; +} + +/* Store the new hardware address in dev->dev_addr, and update the MAC.*/ +static int encx24j600_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *address = addr; + + if (netif_running(dev)) + return -EBUSY; + if (!is_valid_ether_addr(address->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(dev, address->sa_data); + return encx24j600_set_hw_macaddr(dev); +} + +static int encx24j600_open(struct net_device *dev) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + + int ret = request_threaded_irq(priv->ctx.spi->irq, NULL, encx24j600_isr, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + DRV_NAME, priv); + if (unlikely(ret < 0)) { + netdev_err(dev, "request irq %d failed (ret = %d)\n", + priv->ctx.spi->irq, ret); + return ret; + } + + encx24j600_hw_disable(priv); + encx24j600_hw_init(priv); + encx24j600_hw_enable(priv); + netif_start_queue(dev); + + return 0; +} + +static int encx24j600_stop(struct net_device *dev) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + + netif_stop_queue(dev); + free_irq(priv->ctx.spi->irq, priv); + return 0; +} + +static void encx24j600_setrx_proc(struct kthread_work *ws) +{ + struct encx24j600_priv *priv = + container_of(ws, struct encx24j600_priv, setrx_work); + + mutex_lock(&priv->lock); + encx24j600_set_rxfilter_mode(priv); + mutex_unlock(&priv->lock); +} + +static void encx24j600_set_multicast_list(struct net_device *dev) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + int oldfilter = priv->rxfilter; + + if (dev->flags & IFF_PROMISC) { + netif_dbg(priv, link, dev, "promiscuous mode\n"); + priv->rxfilter = RXFILTER_PROMISC; + } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) { + netif_dbg(priv, link, dev, "%smulticast mode\n", + (dev->flags & IFF_ALLMULTI) ? "all-" : ""); + priv->rxfilter = RXFILTER_MULTI; + } else { + netif_dbg(priv, link, dev, "normal mode\n"); + priv->rxfilter = RXFILTER_NORMAL; + } + + if (oldfilter != priv->rxfilter) + kthread_queue_work(&priv->kworker, &priv->setrx_work); +} + +static void encx24j600_hw_tx(struct encx24j600_priv *priv) +{ + struct net_device *dev = priv->ndev; + + netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n", + priv->tx_skb->len); + + if (netif_msg_pktdata(priv)) + dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data); + + if (encx24j600_read_reg(priv, EIR) & TXABTIF) + /* Last transmition aborted due to error. Reset TX interface */ + encx24j600_reset_hw_tx(priv); + + /* Clear the TXIF flag if were previously set */ + encx24j600_clr_bits(priv, EIR, TXIF); + + /* Set the data pointer to the TX buffer address in the SRAM */ + encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START); + + /* Copy the packet into the SRAM */ + encx24j600_raw_write(priv, WGPDATA, (u8 *)priv->tx_skb->data, + priv->tx_skb->len); + + /* Program the Tx buffer start pointer */ + encx24j600_write_reg(priv, ETXST, ENC_TX_BUF_START); + + /* Program the packet length */ + encx24j600_write_reg(priv, ETXLEN, priv->tx_skb->len); + + /* Start the transmission */ + encx24j600_cmd(priv, SETTXRTS); +} + +static void encx24j600_tx_proc(struct kthread_work *ws) +{ + struct encx24j600_priv *priv = + container_of(ws, struct encx24j600_priv, tx_work); + + mutex_lock(&priv->lock); + encx24j600_hw_tx(priv); + mutex_unlock(&priv->lock); +} + +static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + + netif_stop_queue(dev); + + /* save the timestamp */ + netif_trans_update(dev); + + /* Remember the skb for deferred processing */ + priv->tx_skb = skb; + + kthread_queue_work(&priv->kworker, &priv->tx_work); + + return NETDEV_TX_OK; +} + +/* Deal with a transmit timeout */ +static void encx24j600_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + + netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n", + jiffies, jiffies - dev_trans_start(dev)); + + dev->stats.tx_errors++; + netif_wake_queue(dev); +} + +static int encx24j600_get_regs_len(struct net_device *dev) +{ + return SFR_REG_COUNT; +} + +static void encx24j600_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *p) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + u16 *buff = p; + u8 reg; + + regs->version = 1; + mutex_lock(&priv->lock); + for (reg = 0; reg < SFR_REG_COUNT; reg += 2) { + unsigned int val = 0; + /* ignore errors for unreadable registers */ + regmap_read(priv->ctx.regmap, reg, &val); + buff[reg] = val & 0xffff; + } + mutex_unlock(&priv->lock); +} + +static void encx24j600_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->version, DRV_VERSION, sizeof(info->version)); + strscpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); +} + +static int encx24j600_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + u32 supported; + + supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + + cmd->base.speed = priv->speed; + cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.port = PORT_TP; + cmd->base.autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + return 0; +} + +static int +encx24j600_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + return encx24j600_setlink(dev, cmd->base.autoneg, + cmd->base.speed, cmd->base.duplex); +} + +static u32 encx24j600_get_msglevel(struct net_device *dev) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void encx24j600_set_msglevel(struct net_device *dev, u32 val) +{ + struct encx24j600_priv *priv = netdev_priv(dev); + + priv->msg_enable = val; +} + +static const struct ethtool_ops encx24j600_ethtool_ops = { + .get_drvinfo = encx24j600_get_drvinfo, + .get_msglevel = encx24j600_get_msglevel, + .set_msglevel = encx24j600_set_msglevel, + .get_regs_len = encx24j600_get_regs_len, + .get_regs = encx24j600_get_regs, + .get_link_ksettings = encx24j600_get_link_ksettings, + .set_link_ksettings = encx24j600_set_link_ksettings, +}; + +static const struct net_device_ops encx24j600_netdev_ops = { + .ndo_open = encx24j600_open, + .ndo_stop = encx24j600_stop, + .ndo_start_xmit = encx24j600_tx, + .ndo_set_rx_mode = encx24j600_set_multicast_list, + .ndo_set_mac_address = encx24j600_set_mac_address, + .ndo_tx_timeout = encx24j600_tx_timeout, + .ndo_validate_addr = eth_validate_addr, +}; + +static int encx24j600_spi_probe(struct spi_device *spi) +{ + int ret; + + struct net_device *ndev; + struct encx24j600_priv *priv; + u16 eidled; + u8 addr[ETH_ALEN]; + + ndev = alloc_etherdev(sizeof(struct encx24j600_priv)); + + if (!ndev) { + ret = -ENOMEM; + goto error_out; + } + + priv = netdev_priv(ndev); + spi_set_drvdata(spi, priv); + dev_set_drvdata(&spi->dev, priv); + SET_NETDEV_DEV(ndev, &spi->dev); + + priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + priv->ndev = ndev; + + /* Default configuration PHY configuration */ + priv->full_duplex = true; + priv->autoneg = AUTONEG_ENABLE; + priv->speed = SPEED_100; + + priv->ctx.spi = spi; + ndev->irq = spi->irq; + ndev->netdev_ops = &encx24j600_netdev_ops; + + ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx); + if (ret) + goto out_free; + + mutex_init(&priv->lock); + + /* Reset device and check if it is connected */ + if (encx24j600_hw_reset(priv)) { + netif_err(priv, probe, ndev, + DRV_NAME ": Chip is not detected\n"); + ret = -EIO; + goto out_free; + } + + /* Initialize the device HW to the consistent state */ + encx24j600_hw_init(priv); + + kthread_init_worker(&priv->kworker); + kthread_init_work(&priv->tx_work, encx24j600_tx_proc); + kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc); + + priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker, + "encx24j600"); + + if (IS_ERR(priv->kworker_task)) { + ret = PTR_ERR(priv->kworker_task); + goto out_free; + } + + /* Get the MAC address from the chip */ + encx24j600_hw_get_macaddr(priv, addr); + eth_hw_addr_set(ndev, addr); + + ndev->ethtool_ops = &encx24j600_ethtool_ops; + + ret = register_netdev(ndev); + if (unlikely(ret)) { + netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n", + ret); + goto out_stop; + } + + eidled = encx24j600_read_reg(priv, EIDLED); + if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) { + ret = -EINVAL; + goto out_unregister; + } + + netif_info(priv, probe, ndev, "Silicon rev ID: 0x%02x\n", + (eidled & REVID_MASK) >> REVID_SHIFT); + + netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr); + + return ret; + +out_unregister: + unregister_netdev(priv->ndev); +out_stop: + kthread_stop(priv->kworker_task); +out_free: + free_netdev(ndev); + +error_out: + return ret; +} + +static void encx24j600_spi_remove(struct spi_device *spi) +{ + struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev); + + unregister_netdev(priv->ndev); + kthread_stop(priv->kworker_task); + + free_netdev(priv->ndev); +} + +static const struct spi_device_id encx24j600_spi_id_table[] = { + { .name = "encx24j600" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table); + +static struct spi_driver encx24j600_spi_net_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .bus = &spi_bus_type, + }, + .probe = encx24j600_spi_probe, + .remove = encx24j600_spi_remove, + .id_table = encx24j600_spi_id_table, +}; + +module_spi_driver(encx24j600_spi_net_driver); + +MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); +MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h new file mode 100644 index 000000000..34c5a2898 --- /dev/null +++ b/drivers/net/ethernet/microchip/encx24j600_hw.h @@ -0,0 +1,438 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * encx24j600_hw.h: Register definitions + * + */ + +#ifndef _ENCX24J600_HW_H +#define _ENCX24J600_HW_H + +struct encx24j600_context { + struct spi_device *spi; + struct regmap *regmap; + struct regmap *phymap; + struct mutex mutex; /* mutex to protect access to regmap */ + int bank; +}; + +int devm_regmap_init_encx24j600(struct device *dev, + struct encx24j600_context *ctx); + +/* Single-byte instructions */ +#define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1)) +#define B0SEL 0xC0 /* Bank 0 Select */ +#define B1SEL 0xC2 /* Bank 1 Select */ +#define B2SEL 0xC4 /* Bank 2 Select */ +#define B3SEL 0xC6 /* Bank 3 Select */ +#define SETETHRST 0xCA /* System Reset */ +#define FCDISABLE 0xE0 /* Flow Control Disable */ +#define FCSINGLE 0xE2 /* Flow Control Single */ +#define FCMULTIPLE 0xE4 /* Flow Control Multiple */ +#define FCCLEAR 0xE6 /* Flow Control Clear */ +#define SETPKTDEC 0xCC /* Decrement Packet Counter */ +#define DMASTOP 0xD2 /* DMA Stop */ +#define DMACKSUM 0xD8 /* DMA Start Checksum */ +#define DMACKSUMS 0xDA /* DMA Start Checksum with Seed */ +#define DMACOPY 0xDC /* DMA Start Copy */ +#define DMACOPYS 0xDE /* DMA Start Copy and Checksum with Seed */ +#define SETTXRTS 0xD4 /* Request Packet Transmission */ +#define ENABLERX 0xE8 /* Enable RX */ +#define DISABLERX 0xEA /* Disable RX */ +#define SETEIE 0xEC /* Enable Interrupts */ +#define CLREIE 0xEE /* Disable Interrupts */ + +/* Two byte instructions */ +#define RBSEL 0xC8 /* Read Bank Select */ + +/* Three byte instructions */ +#define WGPRDPT 0x60 /* Write EGPRDPT */ +#define RGPRDPT 0x62 /* Read EGPRDPT */ +#define WRXRDPT 0x64 /* Write ERXRDPT */ +#define RRXRDPT 0x66 /* Read ERXRDPT */ +#define WUDARDPT 0x68 /* Write EUDARDPT */ +#define RUDARDPT 0x6A /* Read EUDARDPT */ +#define WGPWRPT 0x6C /* Write EGPWRPT */ +#define RGPWRPT 0x6E /* Read EGPWRPT */ +#define WRXWRPT 0x70 /* Write ERXWRPT */ +#define RRXWRPT 0x72 /* Read ERXWRPT */ +#define WUDAWRPT 0x74 /* Write EUDAWRPT */ +#define RUDAWRPT 0x76 /* Read EUDAWRPT */ + +/* n byte instructions */ +#define RCRCODE 0x00 +#define WCRCODE 0x40 +#define BFSCODE 0x80 +#define BFCCODE 0xA0 +#define RCR(addr) (RCRCODE | (addr & ADDR_MASK)) /* Read Control Register */ +#define WCR(addr) (WCRCODE | (addr & ADDR_MASK)) /* Write Control Register */ +#define RCRU 0x20 /* Read Control Register Unbanked */ +#define WCRU 0x22 /* Write Control Register Unbanked */ +#define BFS(addr) (BFSCODE | (addr & ADDR_MASK)) /* Bit Field Set */ +#define BFC(addr) (BFCCODE | (addr & ADDR_MASK)) /* Bit Field Clear */ +#define BFSU 0x24 /* Bit Field Set Unbanked */ +#define BFCU 0x26 /* Bit Field Clear Unbanked */ +#define RGPDATA 0x28 /* Read EGPDATA */ +#define WGPDATA 0x2A /* Write EGPDATA */ +#define RRXDATA 0x2C /* Read ERXDATA */ +#define WRXDATA 0x2E /* Write ERXDATA */ +#define RUDADATA 0x30 /* Read EUDADATA */ +#define WUDADATA 0x32 /* Write EUDADATA */ + +#define SFR_REG_COUNT 0xA0 + +/* ENC424J600 Control Registers + * Control register definitions are a combination of address + * and bank number + * - Register address (bits 0-4) + * - Bank number (bits 5-6) + */ +#define ADDR_MASK 0x1F +#define BANK_MASK 0x60 +#define BANK_SHIFT 5 + +/* All-bank registers */ +#define EUDAST 0x16 +#define EUDAND 0x18 +#define ESTAT 0x1A +#define EIR 0x1C +#define ECON1 0x1E + +/* Bank 0 registers */ +#define ETXST (0x00 | 0x00) +#define ETXLEN (0x02 | 0x00) +#define ERXST (0x04 | 0x00) +#define ERXTAIL (0x06 | 0x00) +#define ERXHEAD (0x08 | 0x00) +#define EDMAST (0x0A | 0x00) +#define EDMALEN (0x0C | 0x00) +#define EDMADST (0x0E | 0x00) +#define EDMACS (0x10 | 0x00) +#define ETXSTAT (0x12 | 0x00) +#define ETXWIRE (0x14 | 0x00) + +/* Bank 1 registers */ +#define EHT1 (0x00 | 0x20) +#define EHT2 (0x02 | 0x20) +#define EHT3 (0x04 | 0x20) +#define EHT4 (0x06 | 0x20) +#define EPMM1 (0x08 | 0x20) +#define EPMM2 (0x0A | 0x20) +#define EPMM3 (0x0C | 0x20) +#define EPMM4 (0x0E | 0x20) +#define EPMCS (0x10 | 0x20) +#define EPMO (0x12 | 0x20) +#define ERXFCON (0x14 | 0x20) + +/* Bank 2 registers */ +#define MACON1 (0x00 | 0x40) +#define MACON2 (0x02 | 0x40) +#define MABBIPG (0x04 | 0x40) +#define MAIPG (0x06 | 0x40) +#define MACLCON (0x08 | 0x40) +#define MAMXFL (0x0A | 0x40) +#define MICMD (0x12 | 0x40) +#define MIREGADR (0x14 | 0x40) + +/* Bank 3 registers */ +#define MAADR3 (0x00 | 0x60) +#define MAADR2 (0x02 | 0x60) +#define MAADR1 (0x04 | 0x60) +#define MIWR (0x06 | 0x60) +#define MIRD (0x08 | 0x60) +#define MISTAT (0x0A | 0x60) +#define EPAUS (0x0C | 0x60) +#define ECON2 (0x0E | 0x60) +#define ERXWM (0x10 | 0x60) +#define EIE (0x12 | 0x60) +#define EIDLED (0x14 | 0x60) + +/* Unbanked registers */ +#define EGPDATA (0x00 | 0x80) +#define ERXDATA (0x02 | 0x80) +#define EUDADATA (0x04 | 0x80) +#define EGPRDPT (0x06 | 0x80) +#define EGPWRPT (0x08 | 0x80) +#define ERXRDPT (0x0A | 0x80) +#define ERXWRPT (0x0C | 0x80) +#define EUDARDPT (0x0E | 0x80) +#define EUDAWRPT (0x10 | 0x80) + + +/* Register bit definitions */ +/* ESTAT */ +#define INT (1 << 15) +#define FCIDLE (1 << 14) +#define RXBUSY (1 << 13) +#define CLKRDY (1 << 12) +#define PHYDPX (1 << 10) +#define PHYLNK (1 << 8) + +/* EIR */ +#define CRYPTEN (1 << 15) +#define MODEXIF (1 << 14) +#define HASHIF (1 << 13) +#define AESIF (1 << 12) +#define LINKIF (1 << 11) +#define PKTIF (1 << 6) +#define DMAIF (1 << 5) +#define TXIF (1 << 3) +#define TXABTIF (1 << 2) +#define RXABTIF (1 << 1) +#define PCFULIF (1 << 0) + +/* ECON1 */ +#define MODEXST (1 << 15) +#define HASHEN (1 << 14) +#define HASHOP (1 << 13) +#define HASHLST (1 << 12) +#define AESST (1 << 11) +#define AESOP1 (1 << 10) +#define AESOP0 (1 << 9) +#define PKTDEC (1 << 8) +#define FCOP1 (1 << 7) +#define FCOP0 (1 << 6) +#define DMAST (1 << 5) +#define DMACPY (1 << 4) +#define DMACSSD (1 << 3) +#define DMANOCS (1 << 2) +#define TXRTS (1 << 1) +#define RXEN (1 << 0) + +/* ETXSTAT */ +#define LATECOL (1 << 10) +#define MAXCOL (1 << 9) +#define EXDEFER (1 << 8) +#define ETXSTATL_DEFER (1 << 7) +#define CRCBAD (1 << 4) +#define COLCNT_MASK 0xF + +/* ERXFCON */ +#define HTEN (1 << 15) +#define MPEN (1 << 14) +#define NOTPM (1 << 12) +#define PMEN3 (1 << 11) +#define PMEN2 (1 << 10) +#define PMEN1 (1 << 9) +#define PMEN0 (1 << 8) +#define CRCEEN (1 << 7) +#define CRCEN (1 << 6) +#define RUNTEEN (1 << 5) +#define RUNTEN (1 << 4) +#define UCEN (1 << 3) +#define NOTMEEN (1 << 2) +#define MCEN (1 << 1) +#define BCEN (1 << 0) + +/* MACON1 */ +#define LOOPBK (1 << 4) +#define RXPAUS (1 << 2) +#define PASSALL (1 << 1) + +/* MACON2 */ +#define MACON2_DEFER (1 << 14) +#define BPEN (1 << 13) +#define NOBKOFF (1 << 12) +#define PADCFG2 (1 << 7) +#define PADCFG1 (1 << 6) +#define PADCFG0 (1 << 5) +#define TXCRCEN (1 << 4) +#define PHDREN (1 << 3) +#define HFRMEN (1 << 2) +#define MACON2_RSV1 (1 << 1) +#define FULDPX (1 << 0) + +/* MAIPG */ +/* value of the high byte is given by the reserved bits, + * value of the low byte is recomended setting of the + * IPG parameter. + */ +#define MAIPGH_VAL 0x0C +#define MAIPGL_VAL 0x12 + +/* MIREGADRH */ +#define MIREGADR_VAL (1 << 8) + +/* MIREGADRL */ +#define PHREG_MASK 0x1F + +/* MICMD */ +#define MIISCAN (1 << 1) +#define MIIRD (1 << 0) + +/* MISTAT */ +#define NVALID (1 << 2) +#define SCAN (1 << 1) +#define BUSY (1 << 0) + +/* ECON2 */ +#define ETHEN (1 << 15) +#define STRCH (1 << 14) +#define TXMAC (1 << 13) +#define SHA1MD5 (1 << 12) +#define COCON3 (1 << 11) +#define COCON2 (1 << 10) +#define COCON1 (1 << 9) +#define COCON0 (1 << 8) +#define AUTOFC (1 << 7) +#define TXRST (1 << 6) +#define RXRST (1 << 5) +#define ETHRST (1 << 4) +#define MODLEN1 (1 << 3) +#define MODLEN0 (1 << 2) +#define AESLEN1 (1 << 1) +#define AESLEN0 (1 << 0) + +/* EIE */ +#define INTIE (1 << 15) +#define MODEXIE (1 << 14) +#define HASHIE (1 << 13) +#define AESIE (1 << 12) +#define LINKIE (1 << 11) +#define PKTIE (1 << 6) +#define DMAIE (1 << 5) +#define TXIE (1 << 3) +#define TXABTIE (1 << 2) +#define RXABTIE (1 << 1) +#define PCFULIE (1 << 0) + +/* EIDLED */ +#define LACFG3 (1 << 15) +#define LACFG2 (1 << 14) +#define LACFG1 (1 << 13) +#define LACFG0 (1 << 12) +#define LBCFG3 (1 << 11) +#define LBCFG2 (1 << 10) +#define LBCFG1 (1 << 9) +#define LBCFG0 (1 << 8) +#define DEVID_SHIFT 5 +#define DEVID_MASK (0x7 << DEVID_SHIFT) +#define REVID_SHIFT 0 +#define REVID_MASK (0x1F << REVID_SHIFT) + +/* PHY registers */ +#define PHCON1 0x00 +#define PHSTAT1 0x01 +#define PHANA 0x04 +#define PHANLPA 0x05 +#define PHANE 0x06 +#define PHCON2 0x11 +#define PHSTAT2 0x1B +#define PHSTAT3 0x1F + +/* PHCON1 */ +#define PRST (1 << 15) +#define PLOOPBK (1 << 14) +#define SPD100 (1 << 13) +#define ANEN (1 << 12) +#define PSLEEP (1 << 11) +#define RENEG (1 << 9) +#define PFULDPX (1 << 8) + +/* PHSTAT1 */ +#define FULL100 (1 << 14) +#define HALF100 (1 << 13) +#define FULL10 (1 << 12) +#define HALF10 (1 << 11) +#define ANDONE (1 << 5) +#define LRFAULT (1 << 4) +#define ANABLE (1 << 3) +#define LLSTAT (1 << 2) +#define EXTREGS (1 << 0) + +/* PHSTAT2 */ +#define PLRITY (1 << 4) + +/* PHSTAT3 */ +#define PHY3SPD100 (1 << 3) +#define PHY3DPX (1 << 4) +#define SPDDPX_SHIFT 2 +#define SPDDPX_MASK (0x7 << SPDDPX_SHIFT) + +/* PHANA */ +/* Default value for PHY initialization*/ +#define PHANA_DEFAULT 0x05E1 + +/* PHANE */ +#define PDFLT (1 << 4) +#define LPARCD (1 << 1) +#define LPANABL (1 << 0) + +#define EUDAST_TEST_VAL 0x1234 + +#define TSV_SIZE 7 + +#define ENCX24J600_DEV_ID 0x1 + +/* Configuration */ + +/* Led is on when the link is present and driven low + * temporarily when packet is TX'd or RX'd + */ +#define LED_A_SETTINGS 0xC + +/* Led is on if the link is in 100 Mbps mode */ +#define LED_B_SETTINGS 0x8 + +/* maximum ethernet frame length + * Currently not used as a limit anywhere + * (we're using the "huge frame enable" feature of + * enc424j600). + */ +#define MAX_FRAMELEN 1518 + +/* Size in bytes of the receive buffer in enc424j600. + * Must be word aligned (even). + */ +#define RX_BUFFER_SIZE (15 * MAX_FRAMELEN) + +/* Start of the general purpose area in sram */ +#define SRAM_GP_START 0x0 + +/* SRAM size */ +#define SRAM_SIZE 0x6000 + +/* Start of the receive buffer */ +#define ERXST_VAL (SRAM_SIZE - RX_BUFFER_SIZE) + +#define RSV_RXLONGEVDROPEV 16 +#define RSV_CARRIEREV 18 +#define RSV_CRCERROR 20 +#define RSV_LENCHECKERR 21 +#define RSV_LENOUTOFRANGE 22 +#define RSV_RXOK 23 +#define RSV_RXMULTICAST 24 +#define RSV_RXBROADCAST 25 +#define RSV_DRIBBLENIBBLE 26 +#define RSV_RXCONTROLFRAME 27 +#define RSV_RXPAUSEFRAME 28 +#define RSV_RXUNKNOWNOPCODE 29 +#define RSV_RXTYPEVLAN 30 + +#define RSV_RUNTFILTERMATCH 31 +#define RSV_NOTMEFILTERMATCH 32 +#define RSV_HASHFILTERMATCH 33 +#define RSV_MAGICPKTFILTERMATCH 34 +#define RSV_PTRNMTCHFILTERMATCH 35 +#define RSV_UNICASTFILTERMATCH 36 + +#define RSV_SIZE 8 +#define RSV_BITMASK(x) (1 << ((x) - 16)) +#define RSV_GETBIT(x, y) (((x) & RSV_BITMASK(y)) ? 1 : 0) + +struct rsv { + u16 next_packet; + u16 len; + u32 rxstat; +}; + +/* Put RX buffer at 0 as suggested by the Errata datasheet */ + +#define RXSTART_INIT ERXST_VAL +#define RXEND_INIT 0x5FFF + +int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data, + size_t count); +int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count); + + +#endif diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c new file mode 100644 index 000000000..c739d60ee --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -0,0 +1,1266 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/pci.h> +#include <linux/phy.h> +#include "lan743x_main.h" +#include "lan743x_ethtool.h" +#include <linux/sched.h> +#include <linux/iopoll.h> + +/* eeprom */ +#define LAN743X_EEPROM_MAGIC (0x74A5) +#define LAN743X_OTP_MAGIC (0x74F3) +#define EEPROM_INDICATOR_1 (0xA5) +#define EEPROM_INDICATOR_2 (0xAA) +#define EEPROM_MAC_OFFSET (0x01) +#define MAX_EEPROM_SIZE (512) +#define MAX_OTP_SIZE (1024) +#define OTP_INDICATOR_1 (0xF3) +#define OTP_INDICATOR_2 (0xF7) + +#define LOCK_TIMEOUT_MAX_CNT (100) // 1 sec (10 msce * 100) + +#define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset) + +static int lan743x_otp_power_up(struct lan743x_adapter *adapter) +{ + u32 reg_value; + + reg_value = lan743x_csr_read(adapter, OTP_PWR_DN); + + if (reg_value & OTP_PWR_DN_PWRDN_N_) { + /* clear it and wait to be cleared */ + reg_value &= ~OTP_PWR_DN_PWRDN_N_; + lan743x_csr_write(adapter, OTP_PWR_DN, reg_value); + + usleep_range(100, 20000); + } + + return 0; +} + +static void lan743x_otp_power_down(struct lan743x_adapter *adapter) +{ + u32 reg_value; + + reg_value = lan743x_csr_read(adapter, OTP_PWR_DN); + if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) { + /* set power down bit */ + reg_value |= OTP_PWR_DN_PWRDN_N_; + lan743x_csr_write(adapter, OTP_PWR_DN, reg_value); + } +} + +static void lan743x_otp_set_address(struct lan743x_adapter *adapter, + u32 address) +{ + lan743x_csr_write(adapter, OTP_ADDR_HIGH, (address >> 8) & 0x03); + lan743x_csr_write(adapter, OTP_ADDR_LOW, address & 0xFF); +} + +static void lan743x_otp_read_go(struct lan743x_adapter *adapter) +{ + lan743x_csr_write(adapter, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); + lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_); +} + +static int lan743x_otp_wait_till_not_busy(struct lan743x_adapter *adapter) +{ + unsigned long timeout; + u32 reg_val; + + timeout = jiffies + HZ; + do { + if (time_after(jiffies, timeout)) { + netif_warn(adapter, drv, adapter->netdev, + "Timeout on OTP_STATUS completion\n"); + return -EIO; + } + udelay(1); + reg_val = lan743x_csr_read(adapter, OTP_STATUS); + } while (reg_val & OTP_STATUS_BUSY_); + + return 0; +} + +static int lan743x_otp_read(struct lan743x_adapter *adapter, u32 offset, + u32 length, u8 *data) +{ + int ret; + int i; + + if (offset + length > MAX_OTP_SIZE) + return -EINVAL; + + ret = lan743x_otp_power_up(adapter); + if (ret < 0) + return ret; + + ret = lan743x_otp_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + for (i = 0; i < length; i++) { + lan743x_otp_set_address(adapter, offset + i); + + lan743x_otp_read_go(adapter); + ret = lan743x_otp_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + data[i] = lan743x_csr_read(adapter, OTP_READ_DATA); + } + + lan743x_otp_power_down(adapter); + + return 0; +} + +static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset, + u32 length, u8 *data) +{ + int ret; + int i; + + if (offset + length > MAX_OTP_SIZE) + return -EINVAL; + + ret = lan743x_otp_power_up(adapter); + if (ret < 0) + return ret; + + ret = lan743x_otp_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + /* set to BYTE program mode */ + lan743x_csr_write(adapter, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); + + for (i = 0; i < length; i++) { + lan743x_otp_set_address(adapter, offset + i); + + lan743x_csr_write(adapter, OTP_PRGM_DATA, data[i]); + lan743x_csr_write(adapter, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); + lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_); + + ret = lan743x_otp_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + } + + lan743x_otp_power_down(adapter); + + return 0; +} + +int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter, + u16 timeout) +{ + u16 timeout_cnt = 0; + u32 val; + + do { + spin_lock(&adapter->eth_syslock_spinlock); + if (adapter->eth_syslock_acquire_cnt == 0) { + lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG, + SYS_LOCK_REG_ENET_SS_LOCK_); + val = lan743x_csr_read(adapter, + ETH_SYSTEM_SYS_LOCK_REG); + if (val & SYS_LOCK_REG_ENET_SS_LOCK_) { + adapter->eth_syslock_acquire_cnt++; + WARN_ON(adapter->eth_syslock_acquire_cnt == 0); + spin_unlock(&adapter->eth_syslock_spinlock); + break; + } + } else { + adapter->eth_syslock_acquire_cnt++; + WARN_ON(adapter->eth_syslock_acquire_cnt == 0); + spin_unlock(&adapter->eth_syslock_spinlock); + break; + } + + spin_unlock(&adapter->eth_syslock_spinlock); + + if (timeout_cnt++ < timeout) + usleep_range(10000, 11000); + else + return -ETIMEDOUT; + } while (true); + + return 0; +} + +void lan743x_hs_syslock_release(struct lan743x_adapter *adapter) +{ + u32 val; + + spin_lock(&adapter->eth_syslock_spinlock); + WARN_ON(adapter->eth_syslock_acquire_cnt == 0); + + if (adapter->eth_syslock_acquire_cnt) { + adapter->eth_syslock_acquire_cnt--; + if (adapter->eth_syslock_acquire_cnt == 0) { + lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG, 0); + val = lan743x_csr_read(adapter, + ETH_SYSTEM_SYS_LOCK_REG); + WARN_ON((val & SYS_LOCK_REG_ENET_SS_LOCK_) != 0); + } + } + + spin_unlock(&adapter->eth_syslock_spinlock); +} + +static void lan743x_hs_otp_power_up(struct lan743x_adapter *adapter) +{ + u32 reg_value; + + reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN); + if (reg_value & OTP_PWR_DN_PWRDN_N_) { + reg_value &= ~OTP_PWR_DN_PWRDN_N_; + lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value); + /* To flush the posted write so the subsequent delay is + * guaranteed to happen after the write at the hardware + */ + lan743x_csr_read(adapter, HS_OTP_PWR_DN); + udelay(1); + } +} + +static void lan743x_hs_otp_power_down(struct lan743x_adapter *adapter) +{ + u32 reg_value; + + reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN); + if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) { + reg_value |= OTP_PWR_DN_PWRDN_N_; + lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value); + /* To flush the posted write so the subsequent delay is + * guaranteed to happen after the write at the hardware + */ + lan743x_csr_read(adapter, HS_OTP_PWR_DN); + udelay(1); + } +} + +static void lan743x_hs_otp_set_address(struct lan743x_adapter *adapter, + u32 address) +{ + lan743x_csr_write(adapter, HS_OTP_ADDR_HIGH, (address >> 8) & 0x03); + lan743x_csr_write(adapter, HS_OTP_ADDR_LOW, address & 0xFF); +} + +static void lan743x_hs_otp_read_go(struct lan743x_adapter *adapter) +{ + lan743x_csr_write(adapter, HS_OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); + lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_); +} + +static int lan743x_hs_otp_cmd_cmplt_chk(struct lan743x_adapter *adapter) +{ + u32 val; + + return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_OTP_STATUS, val, + !(val & OTP_STATUS_BUSY_), + 80, 10000); +} + +static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset, + u32 length, u8 *data) +{ + int ret; + int i; + + ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); + if (ret < 0) + return ret; + + lan743x_hs_otp_power_up(adapter); + + ret = lan743x_hs_otp_cmd_cmplt_chk(adapter); + if (ret < 0) + goto power_down; + + lan743x_hs_syslock_release(adapter); + + for (i = 0; i < length; i++) { + ret = lan743x_hs_syslock_acquire(adapter, + LOCK_TIMEOUT_MAX_CNT); + if (ret < 0) + return ret; + + lan743x_hs_otp_set_address(adapter, offset + i); + + lan743x_hs_otp_read_go(adapter); + ret = lan743x_hs_otp_cmd_cmplt_chk(adapter); + if (ret < 0) + goto power_down; + + data[i] = lan743x_csr_read(adapter, HS_OTP_READ_DATA); + + lan743x_hs_syslock_release(adapter); + } + + ret = lan743x_hs_syslock_acquire(adapter, + LOCK_TIMEOUT_MAX_CNT); + if (ret < 0) + return ret; + +power_down: + lan743x_hs_otp_power_down(adapter); + lan743x_hs_syslock_release(adapter); + + return ret; +} + +static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset, + u32 length, u8 *data) +{ + int ret; + int i; + + ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); + if (ret < 0) + return ret; + + lan743x_hs_otp_power_up(adapter); + + ret = lan743x_hs_otp_cmd_cmplt_chk(adapter); + if (ret < 0) + goto power_down; + + /* set to BYTE program mode */ + lan743x_csr_write(adapter, HS_OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); + + lan743x_hs_syslock_release(adapter); + + for (i = 0; i < length; i++) { + ret = lan743x_hs_syslock_acquire(adapter, + LOCK_TIMEOUT_MAX_CNT); + if (ret < 0) + return ret; + + lan743x_hs_otp_set_address(adapter, offset + i); + + lan743x_csr_write(adapter, HS_OTP_PRGM_DATA, data[i]); + lan743x_csr_write(adapter, HS_OTP_TST_CMD, + OTP_TST_CMD_PRGVRFY_); + lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_); + + ret = lan743x_hs_otp_cmd_cmplt_chk(adapter); + if (ret < 0) + goto power_down; + + lan743x_hs_syslock_release(adapter); + } + + ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); + if (ret < 0) + return ret; + +power_down: + lan743x_hs_otp_power_down(adapter); + lan743x_hs_syslock_release(adapter); + + return ret; +} + +static int lan743x_eeprom_wait(struct lan743x_adapter *adapter) +{ + unsigned long start_time = jiffies; + u32 val; + + do { + val = lan743x_csr_read(adapter, E2P_CMD); + + if (!(val & E2P_CMD_EPC_BUSY_) || + (val & E2P_CMD_EPC_TIMEOUT_)) + break; + usleep_range(40, 100); + } while (!time_after(jiffies, start_time + HZ)); + + if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) { + netif_warn(adapter, drv, adapter->netdev, + "EEPROM read operation timeout\n"); + return -EIO; + } + + return 0; +} + +static int lan743x_eeprom_confirm_not_busy(struct lan743x_adapter *adapter) +{ + unsigned long start_time = jiffies; + u32 val; + + do { + val = lan743x_csr_read(adapter, E2P_CMD); + + if (!(val & E2P_CMD_EPC_BUSY_)) + return 0; + + usleep_range(40, 100); + } while (!time_after(jiffies, start_time + HZ)); + + netif_warn(adapter, drv, adapter->netdev, "EEPROM is busy\n"); + return -EIO; +} + +static int lan743x_eeprom_read(struct lan743x_adapter *adapter, + u32 offset, u32 length, u8 *data) +{ + int retval; + u32 val; + int i; + + if (offset + length > MAX_EEPROM_SIZE) + return -EINVAL; + + retval = lan743x_eeprom_confirm_not_busy(adapter); + if (retval) + return retval; + + for (i = 0; i < length; i++) { + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; + val |= (offset & E2P_CMD_EPC_ADDR_MASK_); + lan743x_csr_write(adapter, E2P_CMD, val); + + retval = lan743x_eeprom_wait(adapter); + if (retval < 0) + return retval; + + val = lan743x_csr_read(adapter, E2P_DATA); + data[i] = val & 0xFF; + offset++; + } + + return 0; +} + +static int lan743x_eeprom_write(struct lan743x_adapter *adapter, + u32 offset, u32 length, u8 *data) +{ + int retval; + u32 val; + int i; + + if (offset + length > MAX_EEPROM_SIZE) + return -EINVAL; + + retval = lan743x_eeprom_confirm_not_busy(adapter); + if (retval) + return retval; + + /* Issue write/erase enable command */ + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; + lan743x_csr_write(adapter, E2P_CMD, val); + + retval = lan743x_eeprom_wait(adapter); + if (retval < 0) + return retval; + + for (i = 0; i < length; i++) { + /* Fill data register */ + val = data[i]; + lan743x_csr_write(adapter, E2P_DATA, val); + + /* Send "write" command */ + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; + val |= (offset & E2P_CMD_EPC_ADDR_MASK_); + lan743x_csr_write(adapter, E2P_CMD, val); + + retval = lan743x_eeprom_wait(adapter); + if (retval < 0) + return retval; + + offset++; + } + + return 0; +} + +static int lan743x_hs_eeprom_cmd_cmplt_chk(struct lan743x_adapter *adapter) +{ + u32 val; + + return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_E2P_CMD, val, + (!(val & HS_E2P_CMD_EPC_BUSY_) || + (val & HS_E2P_CMD_EPC_TIMEOUT_)), + 50, 10000); +} + +static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter, + u32 offset, u32 length, u8 *data) +{ + int retval; + u32 val; + int i; + + retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); + if (retval < 0) + return retval; + + retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter); + lan743x_hs_syslock_release(adapter); + if (retval < 0) + return retval; + + for (i = 0; i < length; i++) { + retval = lan743x_hs_syslock_acquire(adapter, + LOCK_TIMEOUT_MAX_CNT); + if (retval < 0) + return retval; + + val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_READ_; + val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_); + lan743x_csr_write(adapter, HS_E2P_CMD, val); + retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter); + if (retval < 0) { + lan743x_hs_syslock_release(adapter); + return retval; + } + + val = lan743x_csr_read(adapter, HS_E2P_DATA); + + lan743x_hs_syslock_release(adapter); + + data[i] = val & 0xFF; + offset++; + } + + return 0; +} + +static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter, + u32 offset, u32 length, u8 *data) +{ + int retval; + u32 val; + int i; + + retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); + if (retval < 0) + return retval; + + retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter); + lan743x_hs_syslock_release(adapter); + if (retval < 0) + return retval; + + for (i = 0; i < length; i++) { + retval = lan743x_hs_syslock_acquire(adapter, + LOCK_TIMEOUT_MAX_CNT); + if (retval < 0) + return retval; + + /* Fill data register */ + val = data[i]; + lan743x_csr_write(adapter, HS_E2P_DATA, val); + + /* Send "write" command */ + val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_WRITE_; + val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_); + lan743x_csr_write(adapter, HS_E2P_CMD, val); + + retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter); + lan743x_hs_syslock_release(adapter); + if (retval < 0) + return retval; + + offset++; + } + + return 0; +} + +static void lan743x_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strscpy(info->bus_info, + pci_name(adapter->pdev), sizeof(info->bus_info)); +} + +static u32 lan743x_ethtool_get_msglevel(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void lan743x_ethtool_set_msglevel(struct net_device *netdev, + u32 msglevel) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = msglevel; +} + +static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) + return MAX_OTP_SIZE; + + return MAX_EEPROM_SIZE; +} + +static int lan743x_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) { + if (adapter->is_pci11x1x) + ret = lan743x_hs_otp_read(adapter, ee->offset, + ee->len, data); + else + ret = lan743x_otp_read(adapter, ee->offset, + ee->len, data); + } else { + if (adapter->is_pci11x1x) + ret = lan743x_hs_eeprom_read(adapter, ee->offset, + ee->len, data); + else + ret = lan743x_eeprom_read(adapter, ee->offset, + ee->len, data); + } + + return ret; +} + +static int lan743x_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret = -EINVAL; + + if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) { + /* Beware! OTP is One Time Programming ONLY! */ + if (ee->magic == LAN743X_OTP_MAGIC) { + if (adapter->is_pci11x1x) + ret = lan743x_hs_otp_write(adapter, ee->offset, + ee->len, data); + else + ret = lan743x_otp_write(adapter, ee->offset, + ee->len, data); + } + } else { + if (ee->magic == LAN743X_EEPROM_MAGIC) { + if (adapter->is_pci11x1x) + ret = lan743x_hs_eeprom_write(adapter, + ee->offset, + ee->len, data); + else + ret = lan743x_eeprom_write(adapter, ee->offset, + ee->len, data); + } + } + + return ret; +} + +static const char lan743x_set0_hw_cnt_strings[][ETH_GSTRING_LEN] = { + "RX FCS Errors", + "RX Alignment Errors", + "Rx Fragment Errors", + "RX Jabber Errors", + "RX Undersize Frame Errors", + "RX Oversize Frame Errors", + "RX Dropped Frames", + "RX Unicast Byte Count", + "RX Broadcast Byte Count", + "RX Multicast Byte Count", + "RX Unicast Frames", + "RX Broadcast Frames", + "RX Multicast Frames", + "RX Pause Frames", + "RX 64 Byte Frames", + "RX 65 - 127 Byte Frames", + "RX 128 - 255 Byte Frames", + "RX 256 - 511 Bytes Frames", + "RX 512 - 1023 Byte Frames", + "RX 1024 - 1518 Byte Frames", + "RX Greater 1518 Byte Frames", +}; + +static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = { + "RX Queue 0 Frames", + "RX Queue 1 Frames", + "RX Queue 2 Frames", + "RX Queue 3 Frames", +}; + +static const char lan743x_tx_queue_cnt_strings[][ETH_GSTRING_LEN] = { + "TX Queue 0 Frames", + "TX Queue 1 Frames", + "TX Queue 2 Frames", + "TX Queue 3 Frames", + "TX Total Queue Frames", +}; + +static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = { + "RX Total Frames", + "EEE RX LPI Transitions", + "EEE RX LPI Time", + "RX Counter Rollover Status", + "TX FCS Errors", + "TX Excess Deferral Errors", + "TX Carrier Errors", + "TX Bad Byte Count", + "TX Single Collisions", + "TX Multiple Collisions", + "TX Excessive Collision", + "TX Late Collisions", + "TX Unicast Byte Count", + "TX Broadcast Byte Count", + "TX Multicast Byte Count", + "TX Unicast Frames", + "TX Broadcast Frames", + "TX Multicast Frames", + "TX Pause Frames", + "TX 64 Byte Frames", + "TX 65 - 127 Byte Frames", + "TX 128 - 255 Byte Frames", + "TX 256 - 511 Bytes Frames", + "TX 512 - 1023 Byte Frames", + "TX 1024 - 1518 Byte Frames", + "TX Greater 1518 Byte Frames", + "TX Total Frames", + "EEE TX LPI Transitions", + "EEE TX LPI Time", + "TX Counter Rollover Status", +}; + +static const u32 lan743x_set0_hw_cnt_addr[] = { + STAT_RX_FCS_ERRORS, + STAT_RX_ALIGNMENT_ERRORS, + STAT_RX_FRAGMENT_ERRORS, + STAT_RX_JABBER_ERRORS, + STAT_RX_UNDERSIZE_FRAME_ERRORS, + STAT_RX_OVERSIZE_FRAME_ERRORS, + STAT_RX_DROPPED_FRAMES, + STAT_RX_UNICAST_BYTE_COUNT, + STAT_RX_BROADCAST_BYTE_COUNT, + STAT_RX_MULTICAST_BYTE_COUNT, + STAT_RX_UNICAST_FRAMES, + STAT_RX_BROADCAST_FRAMES, + STAT_RX_MULTICAST_FRAMES, + STAT_RX_PAUSE_FRAMES, + STAT_RX_64_BYTE_FRAMES, + STAT_RX_65_127_BYTE_FRAMES, + STAT_RX_128_255_BYTE_FRAMES, + STAT_RX_256_511_BYTES_FRAMES, + STAT_RX_512_1023_BYTE_FRAMES, + STAT_RX_1024_1518_BYTE_FRAMES, + STAT_RX_GREATER_1518_BYTE_FRAMES, +}; + +static const u32 lan743x_set2_hw_cnt_addr[] = { + STAT_RX_TOTAL_FRAMES, + STAT_EEE_RX_LPI_TRANSITIONS, + STAT_EEE_RX_LPI_TIME, + STAT_RX_COUNTER_ROLLOVER_STATUS, + STAT_TX_FCS_ERRORS, + STAT_TX_EXCESS_DEFERRAL_ERRORS, + STAT_TX_CARRIER_ERRORS, + STAT_TX_BAD_BYTE_COUNT, + STAT_TX_SINGLE_COLLISIONS, + STAT_TX_MULTIPLE_COLLISIONS, + STAT_TX_EXCESSIVE_COLLISION, + STAT_TX_LATE_COLLISIONS, + STAT_TX_UNICAST_BYTE_COUNT, + STAT_TX_BROADCAST_BYTE_COUNT, + STAT_TX_MULTICAST_BYTE_COUNT, + STAT_TX_UNICAST_FRAMES, + STAT_TX_BROADCAST_FRAMES, + STAT_TX_MULTICAST_FRAMES, + STAT_TX_PAUSE_FRAMES, + STAT_TX_64_BYTE_FRAMES, + STAT_TX_65_127_BYTE_FRAMES, + STAT_TX_128_255_BYTE_FRAMES, + STAT_TX_256_511_BYTES_FRAMES, + STAT_TX_512_1023_BYTE_FRAMES, + STAT_TX_1024_1518_BYTE_FRAMES, + STAT_TX_GREATER_1518_BYTE_FRAMES, + STAT_TX_TOTAL_FRAMES, + STAT_EEE_TX_LPI_TRANSITIONS, + STAT_EEE_TX_LPI_TIME, + STAT_TX_COUNTER_ROLLOVER_STATUS +}; + +static const char lan743x_priv_flags_strings[][ETH_GSTRING_LEN] = { + "OTP_ACCESS", +}; + +static void lan743x_ethtool_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, lan743x_set0_hw_cnt_strings, + sizeof(lan743x_set0_hw_cnt_strings)); + memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings)], + lan743x_set1_sw_cnt_strings, + sizeof(lan743x_set1_sw_cnt_strings)); + memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) + + sizeof(lan743x_set1_sw_cnt_strings)], + lan743x_set2_hw_cnt_strings, + sizeof(lan743x_set2_hw_cnt_strings)); + if (adapter->is_pci11x1x) { + memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) + + sizeof(lan743x_set1_sw_cnt_strings) + + sizeof(lan743x_set2_hw_cnt_strings)], + lan743x_tx_queue_cnt_strings, + sizeof(lan743x_tx_queue_cnt_strings)); + } + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, lan743x_priv_flags_strings, + sizeof(lan743x_priv_flags_strings)); + break; + } +} + +static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + u64 total_queue_count = 0; + int data_index = 0; + u64 pkt_cnt; + u32 buf; + int i; + + for (i = 0; i < ARRAY_SIZE(lan743x_set0_hw_cnt_addr); i++) { + buf = lan743x_csr_read(adapter, lan743x_set0_hw_cnt_addr[i]); + data[data_index++] = (u64)buf; + } + for (i = 0; i < ARRAY_SIZE(adapter->rx); i++) + data[data_index++] = (u64)(adapter->rx[i].frame_count); + for (i = 0; i < ARRAY_SIZE(lan743x_set2_hw_cnt_addr); i++) { + buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]); + data[data_index++] = (u64)buf; + } + if (adapter->is_pci11x1x) { + for (i = 0; i < ARRAY_SIZE(adapter->tx); i++) { + pkt_cnt = (u64)(adapter->tx[i].frame_count); + data[data_index++] = pkt_cnt; + total_queue_count += pkt_cnt; + } + data[data_index++] = total_queue_count; + } +} + +static u32 lan743x_ethtool_get_priv_flags(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return adapter->flags; +} + +static int lan743x_ethtool_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + adapter->flags = flags; + + return 0; +} + +static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_STATS: + { + int ret; + + ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings); + ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings); + ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings); + if (adapter->is_pci11x1x) + ret += ARRAY_SIZE(lan743x_tx_queue_cnt_strings); + return ret; + } + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(lan743x_priv_flags_strings); + default: + return -EOPNOTSUPP; + } +} + +static int lan743x_ethtool_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *rxnfc, + u32 *rule_locs) +{ + switch (rxnfc->cmd) { + case ETHTOOL_GRXFH: + rxnfc->data = 0; + switch (rxnfc->flow_type) { + case TCP_V4_FLOW:case UDP_V4_FLOW: + case TCP_V6_FLOW:case UDP_V6_FLOW: + rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case IPV4_FLOW: case IPV6_FLOW: + rxnfc->data |= RXH_IP_SRC | RXH_IP_DST; + return 0; + } + break; + case ETHTOOL_GRXRINGS: + rxnfc->data = LAN743X_USED_RX_CHANNELS; + return 0; + } + return -EOPNOTSUPP; +} + +static u32 lan743x_ethtool_get_rxfh_key_size(struct net_device *netdev) +{ + return 40; +} + +static u32 lan743x_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return 128; +} + +static int lan743x_ethtool_get_rxfh(struct net_device *netdev, + u32 *indir, u8 *key, u8 *hfunc) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + if (indir) { + int dw_index; + int byte_index = 0; + + for (dw_index = 0; dw_index < 32; dw_index++) { + u32 four_entries = + lan743x_csr_read(adapter, RFE_INDX(dw_index)); + + byte_index = dw_index << 2; + indir[byte_index + 0] = + ((four_entries >> 0) & 0x000000FF); + indir[byte_index + 1] = + ((four_entries >> 8) & 0x000000FF); + indir[byte_index + 2] = + ((four_entries >> 16) & 0x000000FF); + indir[byte_index + 3] = + ((four_entries >> 24) & 0x000000FF); + } + } + if (key) { + int dword_index; + int byte_index = 0; + + for (dword_index = 0; dword_index < 10; dword_index++) { + u32 four_entries = + lan743x_csr_read(adapter, + RFE_HASH_KEY(dword_index)); + + byte_index = dword_index << 2; + key[byte_index + 0] = + ((four_entries >> 0) & 0x000000FF); + key[byte_index + 1] = + ((four_entries >> 8) & 0x000000FF); + key[byte_index + 2] = + ((four_entries >> 16) & 0x000000FF); + key[byte_index + 3] = + ((four_entries >> 24) & 0x000000FF); + } + } + if (hfunc) + (*hfunc) = ETH_RSS_HASH_TOP; + return 0; +} + +static int lan743x_ethtool_set_rxfh(struct net_device *netdev, + const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + u32 indir_value = 0; + int dword_index = 0; + int byte_index = 0; + + for (dword_index = 0; dword_index < 32; dword_index++) { + byte_index = dword_index << 2; + indir_value = + (((indir[byte_index + 0] & 0x000000FF) << 0) | + ((indir[byte_index + 1] & 0x000000FF) << 8) | + ((indir[byte_index + 2] & 0x000000FF) << 16) | + ((indir[byte_index + 3] & 0x000000FF) << 24)); + lan743x_csr_write(adapter, RFE_INDX(dword_index), + indir_value); + } + } + if (key) { + int dword_index = 0; + int byte_index = 0; + u32 key_value = 0; + + for (dword_index = 0; dword_index < 10; dword_index++) { + byte_index = dword_index << 2; + key_value = + ((((u32)(key[byte_index + 0])) << 0) | + (((u32)(key[byte_index + 1])) << 8) | + (((u32)(key[byte_index + 2])) << 16) | + (((u32)(key[byte_index + 3])) << 24)); + lan743x_csr_write(adapter, RFE_HASH_KEY(dword_index), + key_value); + } + } + return 0; +} + +static int lan743x_ethtool_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *ts_info) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp.ptp_clock) + ts_info->phc_index = ptp_clock_index(adapter->ptp.ptp_clock); + else + ts_info->phc_index = -1; + + ts_info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC); + ts_info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + return 0; +} + +static int lan743x_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *eee) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + u32 buf; + int ret; + + if (!phydev) + return -EIO; + if (!phydev->drv) { + netif_err(adapter, drv, adapter->netdev, + "Missing PHY Driver\n"); + return -EIO; + } + + ret = phy_ethtool_get_eee(phydev, eee); + if (ret < 0) + return ret; + + buf = lan743x_csr_read(adapter, MAC_CR); + if (buf & MAC_CR_EEE_EN_) { + eee->eee_enabled = true; + eee->eee_active = !!(eee->advertised & eee->lp_advertised); + eee->tx_lpi_enabled = true; + /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ + buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT); + eee->tx_lpi_timer = buf; + } else { + eee->eee_enabled = false; + eee->eee_active = false; + eee->tx_lpi_enabled = false; + eee->tx_lpi_timer = 0; + } + + return 0; +} + +static int lan743x_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *eee) +{ + struct lan743x_adapter *adapter; + struct phy_device *phydev; + u32 buf = 0; + int ret = 0; + + if (!netdev) + return -EINVAL; + adapter = netdev_priv(netdev); + if (!adapter) + return -EINVAL; + phydev = netdev->phydev; + if (!phydev) + return -EIO; + if (!phydev->drv) { + netif_err(adapter, drv, adapter->netdev, + "Missing PHY Driver\n"); + return -EIO; + } + + if (eee->eee_enabled) { + ret = phy_init_eee(phydev, false); + if (ret) { + netif_err(adapter, drv, adapter->netdev, + "EEE initialization failed\n"); + return ret; + } + + buf = (u32)eee->tx_lpi_timer; + lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf); + + buf = lan743x_csr_read(adapter, MAC_CR); + buf |= MAC_CR_EEE_EN_; + lan743x_csr_write(adapter, MAC_CR, buf); + } else { + buf = lan743x_csr_read(adapter, MAC_CR); + buf &= ~MAC_CR_EEE_EN_; + lan743x_csr_write(adapter, MAC_CR, buf); + } + + return phy_ethtool_set_eee(phydev, eee); +} + +#ifdef CONFIG_PM +static void lan743x_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (netdev->phydev) + phy_ethtool_get_wol(netdev->phydev, wol); + + wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | + WAKE_MAGIC | WAKE_PHY | WAKE_ARP; + + if (adapter->is_pci11x1x) + wol->supported |= WAKE_MAGICSECURE; + + wol->wolopts |= adapter->wolopts; + if (adapter->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, adapter->sopass, sizeof(wol->sopass)); +} + +static int lan743x_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + adapter->wolopts = 0; + if (wol->wolopts & WAKE_UCAST) + adapter->wolopts |= WAKE_UCAST; + if (wol->wolopts & WAKE_MCAST) + adapter->wolopts |= WAKE_MCAST; + if (wol->wolopts & WAKE_BCAST) + adapter->wolopts |= WAKE_BCAST; + if (wol->wolopts & WAKE_MAGIC) + adapter->wolopts |= WAKE_MAGIC; + if (wol->wolopts & WAKE_PHY) + adapter->wolopts |= WAKE_PHY; + if (wol->wolopts & WAKE_ARP) + adapter->wolopts |= WAKE_ARP; + if (wol->wolopts & WAKE_MAGICSECURE && + wol->wolopts & WAKE_MAGIC) { + memcpy(adapter->sopass, wol->sopass, sizeof(wol->sopass)); + adapter->wolopts |= WAKE_MAGICSECURE; + } else { + memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX); + } + + device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts); + + return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol) + : -ENETDOWN; +} +#endif /* CONFIG_PM */ + +static void lan743x_common_regs(struct net_device *dev, + struct ethtool_regs *regs, void *p) + +{ + struct lan743x_adapter *adapter = netdev_priv(dev); + u32 *rb = p; + + memset(p, 0, (MAX_LAN743X_ETH_REGS * sizeof(u32))); + + rb[ETH_PRIV_FLAGS] = adapter->flags; + rb[ETH_ID_REV] = lan743x_csr_read(adapter, ID_REV); + rb[ETH_FPGA_REV] = lan743x_csr_read(adapter, FPGA_REV); + rb[ETH_STRAP_READ] = lan743x_csr_read(adapter, STRAP_READ); + rb[ETH_INT_STS] = lan743x_csr_read(adapter, INT_STS); + rb[ETH_HW_CFG] = lan743x_csr_read(adapter, HW_CFG); + rb[ETH_PMT_CTL] = lan743x_csr_read(adapter, PMT_CTL); + rb[ETH_E2P_CMD] = lan743x_csr_read(adapter, E2P_CMD); + rb[ETH_E2P_DATA] = lan743x_csr_read(adapter, E2P_DATA); + rb[ETH_MAC_CR] = lan743x_csr_read(adapter, MAC_CR); + rb[ETH_MAC_RX] = lan743x_csr_read(adapter, MAC_RX); + rb[ETH_MAC_TX] = lan743x_csr_read(adapter, MAC_TX); + rb[ETH_FLOW] = lan743x_csr_read(adapter, MAC_FLOW); + rb[ETH_MII_ACC] = lan743x_csr_read(adapter, MAC_MII_ACC); + rb[ETH_MII_DATA] = lan743x_csr_read(adapter, MAC_MII_DATA); + rb[ETH_EEE_TX_LPI_REQ_DLY] = lan743x_csr_read(adapter, + MAC_EEE_TX_LPI_REQ_DLY_CNT); + rb[ETH_WUCSR] = lan743x_csr_read(adapter, MAC_WUCSR); + rb[ETH_WK_SRC] = lan743x_csr_read(adapter, MAC_WK_SRC); +} + +static int lan743x_get_regs_len(struct net_device *dev) +{ + return MAX_LAN743X_ETH_REGS * sizeof(u32); +} + +static void lan743x_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *p) +{ + regs->version = LAN743X_ETH_REG_VERSION; + + lan743x_common_regs(dev, regs, p); +} + +const struct ethtool_ops lan743x_ethtool_ops = { + .get_drvinfo = lan743x_ethtool_get_drvinfo, + .get_msglevel = lan743x_ethtool_get_msglevel, + .set_msglevel = lan743x_ethtool_set_msglevel, + .get_link = ethtool_op_get_link, + + .get_eeprom_len = lan743x_ethtool_get_eeprom_len, + .get_eeprom = lan743x_ethtool_get_eeprom, + .set_eeprom = lan743x_ethtool_set_eeprom, + .get_strings = lan743x_ethtool_get_strings, + .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats, + .get_priv_flags = lan743x_ethtool_get_priv_flags, + .set_priv_flags = lan743x_ethtool_set_priv_flags, + .get_sset_count = lan743x_ethtool_get_sset_count, + .get_rxnfc = lan743x_ethtool_get_rxnfc, + .get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size, + .get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size, + .get_rxfh = lan743x_ethtool_get_rxfh, + .set_rxfh = lan743x_ethtool_set_rxfh, + .get_ts_info = lan743x_ethtool_get_ts_info, + .get_eee = lan743x_ethtool_get_eee, + .set_eee = lan743x_ethtool_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_regs_len = lan743x_get_regs_len, + .get_regs = lan743x_get_regs, +#ifdef CONFIG_PM + .get_wol = lan743x_ethtool_get_wol, + .set_wol = lan743x_ethtool_set_wol, +#endif +}; diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h new file mode 100644 index 000000000..7f5996a52 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#ifndef _LAN743X_ETHTOOL_H +#define _LAN743X_ETHTOOL_H + +#include "linux/ethtool.h" + +#define LAN743X_ETH_REG_VERSION 1 + +enum { + ETH_PRIV_FLAGS, + ETH_ID_REV, + ETH_FPGA_REV, + ETH_STRAP_READ, + ETH_INT_STS, + ETH_HW_CFG, + ETH_PMT_CTL, + ETH_E2P_CMD, + ETH_E2P_DATA, + ETH_MAC_CR, + ETH_MAC_RX, + ETH_MAC_TX, + ETH_FLOW, + ETH_MII_ACC, + ETH_MII_DATA, + ETH_EEE_TX_LPI_REQ_DLY, + ETH_WUCSR, + ETH_WK_SRC, + + /* Add new registers above */ + MAX_LAN743X_ETH_REGS +}; + +extern const struct ethtool_ops lan743x_ethtool_ops; + +#endif /* _LAN743X_ETHTOOL_H */ diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c new file mode 100644 index 000000000..e804613fa --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -0,0 +1,3696 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/crc32.h> +#include <linux/microchipphy.h> +#include <linux/net_tstamp.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> +#include <linux/rtnetlink.h> +#include <linux/iopoll.h> +#include <linux/crc16.h> +#include "lan743x_main.h" +#include "lan743x_ethtool.h" + +#define MMD_ACCESS_ADDRESS 0 +#define MMD_ACCESS_WRITE 1 +#define MMD_ACCESS_READ 2 +#define MMD_ACCESS_READ_INC 3 +#define PCS_POWER_STATE_DOWN 0x6 +#define PCS_POWER_STATE_UP 0x4 + +static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter) +{ + u32 chip_rev; + u32 cfg_load; + u32 hw_cfg; + u32 strap; + int ret; + + /* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */ + ret = lan743x_hs_syslock_acquire(adapter, 100); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "Sys Lock acquire failed ret:%d\n", ret); + return; + } + + cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG); + lan743x_hs_syslock_release(adapter); + hw_cfg = lan743x_csr_read(adapter, HW_CFG); + + if (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ || + hw_cfg & HW_CFG_RST_PROTECT_) { + strap = lan743x_csr_read(adapter, STRAP_READ); + if (strap & STRAP_READ_SGMII_EN_) + adapter->is_sgmii_en = true; + else + adapter->is_sgmii_en = false; + } else { + chip_rev = lan743x_csr_read(adapter, FPGA_REV); + if (chip_rev) { + if (chip_rev & FPGA_SGMII_OP) + adapter->is_sgmii_en = true; + else + adapter->is_sgmii_en = false; + } else { + adapter->is_sgmii_en = false; + } + } + netif_dbg(adapter, drv, adapter->netdev, + "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis"); +} + +static bool is_pci11x1x_chip(struct lan743x_adapter *adapter) +{ + struct lan743x_csr *csr = &adapter->csr; + u32 id_rev = csr->id_rev; + + if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) || + ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) { + return true; + } + return false; +} + +static void lan743x_pci_cleanup(struct lan743x_adapter *adapter) +{ + pci_release_selected_regions(adapter->pdev, + pci_select_bars(adapter->pdev, + IORESOURCE_MEM)); + pci_disable_device(adapter->pdev); +} + +static int lan743x_pci_init(struct lan743x_adapter *adapter, + struct pci_dev *pdev) +{ + unsigned long bars = 0; + int ret; + + adapter->pdev = pdev; + ret = pci_enable_device_mem(pdev); + if (ret) + goto return_error; + + netif_info(adapter, probe, adapter->netdev, + "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n", + pdev->vendor, pdev->device); + bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (!test_bit(0, &bars)) + goto disable_device; + + ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME); + if (ret) + goto disable_device; + + pci_set_master(pdev); + return 0; + +disable_device: + pci_disable_device(adapter->pdev); + +return_error: + return ret; +} + +u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) +{ + return ioread32(&adapter->csr.csr_address[offset]); +} + +void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, + u32 data) +{ + iowrite32(data, &adapter->csr.csr_address[offset]); +} + +#define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset) + +static int lan743x_csr_light_reset(struct lan743x_adapter *adapter) +{ + u32 data; + + data = lan743x_csr_read(adapter, HW_CFG); + data |= HW_CFG_LRST_; + lan743x_csr_write(adapter, HW_CFG, data); + + return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data, + !(data & HW_CFG_LRST_), 100000, 10000000); +} + +static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter, + int offset, u32 bit_mask, + int target_value, int udelay_min, + int udelay_max, int count) +{ + u32 data; + + return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data, + target_value == !!(data & bit_mask), + udelay_max, udelay_min * count); +} + +static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter, + int offset, u32 bit_mask, + int target_value, int usleep_min, + int usleep_max, int count) +{ + u32 data; + + return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data, + target_value == ((data & bit_mask) ? 1 : 0), + usleep_max, usleep_min * count); +} + +static int lan743x_csr_init(struct lan743x_adapter *adapter) +{ + struct lan743x_csr *csr = &adapter->csr; + resource_size_t bar_start, bar_length; + int result; + + bar_start = pci_resource_start(adapter->pdev, 0); + bar_length = pci_resource_len(adapter->pdev, 0); + csr->csr_address = devm_ioremap(&adapter->pdev->dev, + bar_start, bar_length); + if (!csr->csr_address) { + result = -ENOMEM; + goto clean_up; + } + + csr->id_rev = lan743x_csr_read(adapter, ID_REV); + csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV); + netif_info(adapter, probe, adapter->netdev, + "ID_REV = 0x%08X, FPGA_REV = %d.%d\n", + csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev), + FPGA_REV_GET_MINOR_(csr->fpga_rev)); + if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) { + result = -ENODEV; + goto clean_up; + } + + csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; + switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) { + case ID_REV_CHIP_REV_A0_: + csr->flags |= LAN743X_CSR_FLAG_IS_A0; + csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; + break; + case ID_REV_CHIP_REV_B0_: + csr->flags |= LAN743X_CSR_FLAG_IS_B0; + break; + } + + result = lan743x_csr_light_reset(adapter); + if (result) + goto clean_up; + return 0; +clean_up: + return result; +} + +static void lan743x_intr_software_isr(struct lan743x_adapter *adapter) +{ + struct lan743x_intr *intr = &adapter->intr; + + /* disable the interrupt to prevent repeated re-triggering */ + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); + intr->software_isr_flag = true; + wake_up(&intr->software_isr_wq); +} + +static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags) +{ + struct lan743x_tx *tx = context; + struct lan743x_adapter *adapter = tx->adapter; + bool enable_flag = true; + + lan743x_csr_read(adapter, INT_EN_SET); + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_TX_(tx->channel_number)); + } + + if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) { + u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); + u32 dmac_int_sts; + u32 dmac_int_en; + + if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) + dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); + else + dmac_int_sts = ioc_bit; + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) + dmac_int_en = lan743x_csr_read(adapter, + DMAC_INT_EN_SET); + else + dmac_int_en = ioc_bit; + + dmac_int_en &= ioc_bit; + dmac_int_sts &= dmac_int_en; + if (dmac_int_sts & ioc_bit) { + napi_schedule(&tx->napi); + enable_flag = false;/* poll func will enable later */ + } + } + + if (enable_flag) + /* enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_TX_(tx->channel_number)); +} + +static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags) +{ + struct lan743x_rx *rx = context; + struct lan743x_adapter *adapter = rx->adapter; + bool enable_flag = true; + + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_RX_(rx->channel_number)); + } + + if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) { + u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number); + u32 dmac_int_sts; + u32 dmac_int_en; + + if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) + dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); + else + dmac_int_sts = rx_frame_bit; + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) + dmac_int_en = lan743x_csr_read(adapter, + DMAC_INT_EN_SET); + else + dmac_int_en = rx_frame_bit; + + dmac_int_en &= rx_frame_bit; + dmac_int_sts &= dmac_int_en; + if (dmac_int_sts & rx_frame_bit) { + napi_schedule(&rx->napi); + enable_flag = false;/* poll funct will enable later */ + } + } + + if (enable_flag) { + /* enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_RX_(rx->channel_number)); + } +} + +static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags) +{ + struct lan743x_adapter *adapter = context; + unsigned int channel; + + if (int_sts & INT_BIT_ALL_RX_) { + for (channel = 0; channel < LAN743X_USED_RX_CHANNELS; + channel++) { + u32 int_bit = INT_BIT_DMA_RX_(channel); + + if (int_sts & int_bit) { + lan743x_rx_isr(&adapter->rx[channel], + int_bit, flags); + int_sts &= ~int_bit; + } + } + } + if (int_sts & INT_BIT_ALL_TX_) { + for (channel = 0; channel < adapter->used_tx_channels; + channel++) { + u32 int_bit = INT_BIT_DMA_TX_(channel); + + if (int_sts & int_bit) { + lan743x_tx_isr(&adapter->tx[channel], + int_bit, flags); + int_sts &= ~int_bit; + } + } + } + if (int_sts & INT_BIT_ALL_OTHER_) { + if (int_sts & INT_BIT_SW_GP_) { + lan743x_intr_software_isr(adapter); + int_sts &= ~INT_BIT_SW_GP_; + } + if (int_sts & INT_BIT_1588_) { + lan743x_ptp_isr(adapter); + int_sts &= ~INT_BIT_1588_; + } + } + if (int_sts) + lan743x_csr_write(adapter, INT_EN_CLR, int_sts); +} + +static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr) +{ + struct lan743x_vector *vector = ptr; + struct lan743x_adapter *adapter = vector->adapter; + irqreturn_t result = IRQ_NONE; + u32 int_enables; + u32 int_sts; + + if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) { + int_sts = lan743x_csr_read(adapter, INT_STS); + } else if (vector->flags & + (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) { + int_sts = lan743x_csr_read(adapter, INT_STS_R2C); + } else { + /* use mask as implied status */ + int_sts = vector->int_mask | INT_BIT_MAS_; + } + + if (!(int_sts & INT_BIT_MAS_)) + goto irq_done; + + if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR) + /* disable vector interrupt */ + lan743x_csr_write(adapter, + INT_VEC_EN_CLR, + INT_VEC_EN_(vector->vector_index)); + + if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR) + /* disable master interrupt */ + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); + + if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) { + int_enables = lan743x_csr_read(adapter, INT_EN_SET); + } else { + /* use vector mask as implied enable mask */ + int_enables = vector->int_mask; + } + + int_sts &= int_enables; + int_sts &= vector->int_mask; + if (int_sts) { + if (vector->handler) { + vector->handler(vector->context, + int_sts, vector->flags); + } else { + /* disable interrupts on this vector */ + lan743x_csr_write(adapter, INT_EN_CLR, + vector->int_mask); + } + result = IRQ_HANDLED; + } + + if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET) + /* enable master interrupt */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); + + if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET) + /* enable vector interrupt */ + lan743x_csr_write(adapter, + INT_VEC_EN_SET, + INT_VEC_EN_(vector->vector_index)); +irq_done: + return result; +} + +static int lan743x_intr_test_isr(struct lan743x_adapter *adapter) +{ + struct lan743x_intr *intr = &adapter->intr; + int ret; + + intr->software_isr_flag = false; + + /* enable and activate test interrupt */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_); + lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_); + + ret = wait_event_timeout(intr->software_isr_wq, + intr->software_isr_flag, + msecs_to_jiffies(200)); + + /* disable test interrupt */ + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); + + return ret > 0 ? 0 : -ENODEV; +} + +static int lan743x_intr_register_isr(struct lan743x_adapter *adapter, + int vector_index, u32 flags, + u32 int_mask, + lan743x_vector_handler handler, + void *context) +{ + struct lan743x_vector *vector = &adapter->intr.vector_list + [vector_index]; + int ret; + + vector->adapter = adapter; + vector->flags = flags; + vector->vector_index = vector_index; + vector->int_mask = int_mask; + vector->handler = handler; + vector->context = context; + + ret = request_irq(vector->irq, + lan743x_intr_entry_isr, + (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ? + IRQF_SHARED : 0, DRIVER_NAME, vector); + if (ret) { + vector->handler = NULL; + vector->context = NULL; + vector->int_mask = 0; + vector->flags = 0; + } + return ret; +} + +static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter, + int vector_index) +{ + struct lan743x_vector *vector = &adapter->intr.vector_list + [vector_index]; + + free_irq(vector->irq, vector); + vector->handler = NULL; + vector->context = NULL; + vector->int_mask = 0; + vector->flags = 0; +} + +static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter, + u32 int_mask) +{ + int index; + + for (index = 0; index < adapter->max_vector_count; index++) { + if (adapter->intr.vector_list[index].int_mask & int_mask) + return adapter->intr.vector_list[index].flags; + } + return 0; +} + +static void lan743x_intr_close(struct lan743x_adapter *adapter) +{ + struct lan743x_intr *intr = &adapter->intr; + int index = 0; + + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); + if (adapter->is_pci11x1x) + lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF); + else + lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF); + + for (index = 0; index < intr->number_of_vectors; index++) { + if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) { + lan743x_intr_unregister_isr(adapter, index); + intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index); + } + } + + if (intr->flags & INTR_FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + intr->flags &= ~INTR_FLAG_MSI_ENABLED; + } + + if (intr->flags & INTR_FLAG_MSIX_ENABLED) { + pci_disable_msix(adapter->pdev); + intr->flags &= ~INTR_FLAG_MSIX_ENABLED; + } +} + +static int lan743x_intr_open(struct lan743x_adapter *adapter) +{ + struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT]; + struct lan743x_intr *intr = &adapter->intr; + unsigned int used_tx_channels; + u32 int_vec_en_auto_clr = 0; + u8 max_vector_count; + u32 int_vec_map0 = 0; + u32 int_vec_map1 = 0; + int ret = -ENODEV; + int index = 0; + u32 flags = 0; + + intr->number_of_vectors = 0; + + /* Try to set up MSIX interrupts */ + max_vector_count = adapter->max_vector_count; + memset(&msix_entries[0], 0, + sizeof(struct msix_entry) * max_vector_count); + for (index = 0; index < max_vector_count; index++) + msix_entries[index].entry = index; + used_tx_channels = adapter->used_tx_channels; + ret = pci_enable_msix_range(adapter->pdev, + msix_entries, 1, + 1 + used_tx_channels + + LAN743X_USED_RX_CHANNELS); + + if (ret > 0) { + intr->flags |= INTR_FLAG_MSIX_ENABLED; + intr->number_of_vectors = ret; + intr->using_vectors = true; + for (index = 0; index < intr->number_of_vectors; index++) + intr->vector_list[index].irq = msix_entries + [index].vector; + netif_info(adapter, ifup, adapter->netdev, + "using MSIX interrupts, number of vectors = %d\n", + intr->number_of_vectors); + } + + /* If MSIX failed try to setup using MSI interrupts */ + if (!intr->number_of_vectors) { + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + if (!pci_enable_msi(adapter->pdev)) { + intr->flags |= INTR_FLAG_MSI_ENABLED; + intr->number_of_vectors = 1; + intr->using_vectors = true; + intr->vector_list[0].irq = + adapter->pdev->irq; + netif_info(adapter, ifup, adapter->netdev, + "using MSI interrupts, number of vectors = %d\n", + intr->number_of_vectors); + } + } + } + + /* If MSIX, and MSI failed, setup using legacy interrupt */ + if (!intr->number_of_vectors) { + intr->number_of_vectors = 1; + intr->using_vectors = false; + intr->vector_list[0].irq = intr->irq; + netif_info(adapter, ifup, adapter->netdev, + "using legacy interrupts\n"); + } + + /* At this point we must have at least one irq */ + lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF); + + /* map all interrupts to vector 0 */ + lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000); + lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000); + lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000); + flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; + + if (intr->using_vectors) { + flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; + } else { + flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR | + LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET | + LAN743X_VECTOR_FLAG_IRQ_SHARED; + } + + if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ; + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C; + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK; + flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C; + flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C; + } + + init_waitqueue_head(&intr->software_isr_wq); + + ret = lan743x_intr_register_isr(adapter, 0, flags, + INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ | + INT_BIT_ALL_OTHER_, + lan743x_intr_shared_isr, adapter); + if (ret) + goto clean_up; + intr->flags |= INTR_FLAG_IRQ_REQUESTED(0); + + if (intr->using_vectors) + lan743x_csr_write(adapter, INT_VEC_EN_SET, + INT_VEC_EN_(0)); + + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD); + if (adapter->is_pci11x1x) { + lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654); + lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210); + } else { + lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432); + lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001); + } + lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF); + } + + /* enable interrupts */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); + ret = lan743x_intr_test_isr(adapter); + if (ret) + goto clean_up; + + if (intr->number_of_vectors > 1) { + int number_of_tx_vectors = intr->number_of_vectors - 1; + + if (number_of_tx_vectors > used_tx_channels) + number_of_tx_vectors = used_tx_channels; + flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; + + if (adapter->csr.flags & + LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { + flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; + } + + for (index = 0; index < number_of_tx_vectors; index++) { + u32 int_bit = INT_BIT_DMA_TX_(index); + int vector = index + 1; + + /* map TX interrupt to vector */ + int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); + lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); + + /* Remove TX interrupt from shared mask */ + intr->vector_list[0].int_mask &= ~int_bit; + ret = lan743x_intr_register_isr(adapter, vector, flags, + int_bit, lan743x_tx_isr, + &adapter->tx[index]); + if (ret) + goto clean_up; + intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); + if (!(flags & + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)) + lan743x_csr_write(adapter, INT_VEC_EN_SET, + INT_VEC_EN_(vector)); + } + } + if ((intr->number_of_vectors - used_tx_channels) > 1) { + int number_of_rx_vectors = intr->number_of_vectors - + used_tx_channels - 1; + + if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS) + number_of_rx_vectors = LAN743X_USED_RX_CHANNELS; + + flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; + + if (adapter->csr.flags & + LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { + flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; + } + for (index = 0; index < number_of_rx_vectors; index++) { + int vector = index + 1 + used_tx_channels; + u32 int_bit = INT_BIT_DMA_RX_(index); + + /* map RX interrupt to vector */ + int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector); + lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0); + if (flags & + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { + int_vec_en_auto_clr |= INT_VEC_EN_(vector); + lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, + int_vec_en_auto_clr); + } + + /* Remove RX interrupt from shared mask */ + intr->vector_list[0].int_mask &= ~int_bit; + ret = lan743x_intr_register_isr(adapter, vector, flags, + int_bit, lan743x_rx_isr, + &adapter->rx[index]); + if (ret) + goto clean_up; + intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); + + lan743x_csr_write(adapter, INT_VEC_EN_SET, + INT_VEC_EN_(vector)); + } + } + return 0; + +clean_up: + lan743x_intr_close(adapter); + return ret; +} + +static int lan743x_dp_write(struct lan743x_adapter *adapter, + u32 select, u32 addr, u32 length, u32 *buf) +{ + u32 dp_sel; + int i; + + if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_, + 1, 40, 100, 100)) + return -EIO; + dp_sel = lan743x_csr_read(adapter, DP_SEL); + dp_sel &= ~DP_SEL_MASK_; + dp_sel |= select; + lan743x_csr_write(adapter, DP_SEL, dp_sel); + + for (i = 0; i < length; i++) { + lan743x_csr_write(adapter, DP_ADDR, addr + i); + lan743x_csr_write(adapter, DP_DATA_0, buf[i]); + lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_); + if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, + DP_SEL_DPRDY_, + 1, 40, 100, 100)) + return -EIO; + } + + return 0; +} + +static u32 lan743x_mac_mii_access(u16 id, u16 index, int read) +{ + u32 ret; + + ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & + MAC_MII_ACC_PHY_ADDR_MASK_; + ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) & + MAC_MII_ACC_MIIRINDA_MASK_; + + if (read) + ret |= MAC_MII_ACC_MII_READ_; + else + ret |= MAC_MII_ACC_MII_WRITE_; + ret |= MAC_MII_ACC_MII_BUSY_; + + return ret; +} + +static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter) +{ + u32 data; + + return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data, + !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000); +} + +static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index) +{ + struct lan743x_adapter *adapter = bus->priv; + u32 val, mii_access; + int ret; + + /* comfirm MII not busy */ + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + /* set the address, index & direction (read from PHY) */ + mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ); + lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + val = lan743x_csr_read(adapter, MAC_MII_DATA); + return (int)(val & 0xFFFF); +} + +static int lan743x_mdiobus_write(struct mii_bus *bus, + int phy_id, int index, u16 regval) +{ + struct lan743x_adapter *adapter = bus->priv; + u32 val, mii_access; + int ret; + + /* confirm MII not busy */ + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + val = (u32)regval; + lan743x_csr_write(adapter, MAC_MII_DATA, val); + + /* set the address, index & direction (write to PHY) */ + mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE); + lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + return ret; +} + +static u32 lan743x_mac_mmd_access(int id, int index, int op) +{ + u16 dev_addr; + u32 ret; + + dev_addr = (index >> 16) & 0x1f; + ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & + MAC_MII_ACC_PHY_ADDR_MASK_; + ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) & + MAC_MII_ACC_MIIMMD_MASK_; + if (op == MMD_ACCESS_WRITE) + ret |= MAC_MII_ACC_MIICMD_WRITE_; + else if (op == MMD_ACCESS_READ) + ret |= MAC_MII_ACC_MIICMD_READ_; + else if (op == MMD_ACCESS_READ_INC) + ret |= MAC_MII_ACC_MIICMD_READ_INC_; + else + ret |= MAC_MII_ACC_MIICMD_ADDR_; + ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_); + + return ret; +} + +static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index) +{ + struct lan743x_adapter *adapter = bus->priv; + u32 mmd_access; + int ret; + + /* comfirm MII not busy */ + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + if (index & MII_ADDR_C45) { + /* Load Register Address */ + lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff)); + mmd_access = lan743x_mac_mmd_access(phy_id, index, + MMD_ACCESS_ADDRESS); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + /* Read Data */ + mmd_access = lan743x_mac_mmd_access(phy_id, index, + MMD_ACCESS_READ); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + ret = lan743x_csr_read(adapter, MAC_MII_DATA); + return (int)(ret & 0xFFFF); + } + + ret = lan743x_mdiobus_read(bus, phy_id, index); + return ret; +} + +static int lan743x_mdiobus_c45_write(struct mii_bus *bus, + int phy_id, int index, u16 regval) +{ + struct lan743x_adapter *adapter = bus->priv; + u32 mmd_access; + int ret; + + /* confirm MII not busy */ + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + if (index & MII_ADDR_C45) { + /* Load Register Address */ + lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff)); + mmd_access = lan743x_mac_mmd_access(phy_id, index, + MMD_ACCESS_ADDRESS); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + /* Write Data */ + lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval); + mmd_access = lan743x_mac_mmd_access(phy_id, index, + MMD_ACCESS_WRITE); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + } else { + ret = lan743x_mdiobus_write(bus, phy_id, index, regval); + } + + return ret; +} + +static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter) +{ + u32 data; + int ret; + + ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data, + !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000); + if (ret < 0) + netif_err(adapter, drv, adapter->netdev, + "%s: error %d sgmii wait timeout\n", __func__, ret); + + return ret; +} + +static int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr) +{ + u32 mmd_access; + int ret; + u32 val; + + if (mmd > 31) { + netif_err(adapter, probe, adapter->netdev, + "%s mmd should <= 31\n", __func__); + return -EINVAL; + } + + mutex_lock(&adapter->sgmii_rw_lock); + /* Load Register Address */ + mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_; + mmd_access |= (addr | SGMII_ACC_SGMII_BZY_); + lan743x_csr_write(adapter, SGMII_ACC, mmd_access); + ret = lan743x_sgmii_wait_till_not_busy(adapter); + if (ret < 0) + goto sgmii_unlock; + + val = lan743x_csr_read(adapter, SGMII_DATA); + ret = (int)(val & SGMII_DATA_MASK_); + +sgmii_unlock: + mutex_unlock(&adapter->sgmii_rw_lock); + + return ret; +} + +static int lan743x_sgmii_write(struct lan743x_adapter *adapter, + u8 mmd, u16 addr, u16 val) +{ + u32 mmd_access; + int ret; + + if (mmd > 31) { + netif_err(adapter, probe, adapter->netdev, + "%s mmd should <= 31\n", __func__); + return -EINVAL; + } + mutex_lock(&adapter->sgmii_rw_lock); + /* Load Register Data */ + lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_)); + /* Load Register Address */ + mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_; + mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_); + lan743x_csr_write(adapter, SGMII_ACC, mmd_access); + ret = lan743x_sgmii_wait_till_not_busy(adapter); + mutex_unlock(&adapter->sgmii_rw_lock); + + return ret; +} + +static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter, + u16 baud) +{ + int mpllctrl0; + int mpllctrl1; + int miscctrl1; + int ret; + + mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, + VR_MII_GEN2_4_MPLL_CTRL0); + if (mpllctrl0 < 0) + return mpllctrl0; + + mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_; + if (baud == VR_MII_BAUD_RATE_1P25GBPS) { + mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100; + /* mpll_baud_clk/4 */ + miscctrl1 = 0xA; + } else { + mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125; + /* mpll_baud_clk/2 */ + miscctrl1 = 0x5; + } + + ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, + VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0); + if (ret < 0) + return ret; + + ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, + VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1); + if (ret < 0) + return ret; + + return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, + VR_MII_GEN2_4_MISC_CTRL1, miscctrl1); +} + +static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter, + bool enable) +{ + if (enable) + return lan743x_sgmii_mpll_set(adapter, + VR_MII_BAUD_RATE_3P125GBPS); + else + return lan743x_sgmii_mpll_set(adapter, + VR_MII_BAUD_RATE_1P25GBPS); +} + +static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter, + bool *status) +{ + int ret; + + ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, + VR_MII_GEN2_4_MPLL_CTRL1); + if (ret < 0) + return ret; + + if (ret == VR_MII_MPLL_MULTIPLIER_125 || + ret == VR_MII_MPLL_MULTIPLIER_50) + *status = true; + else + *status = false; + + return 0; +} + +static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter) +{ + enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd; + int mii_ctrl; + int dgt_ctrl; + int an_ctrl; + int ret; + + if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) + /* Switch to 2.5 Gbps */ + ret = lan743x_sgmii_2_5G_mode_set(adapter, true); + else + /* Switch to 10/100/1000 Mbps clock */ + ret = lan743x_sgmii_2_5G_mode_set(adapter, false); + if (ret < 0) + return ret; + + /* Enable SGMII Auto NEG */ + mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR); + if (mii_ctrl < 0) + return mii_ctrl; + + an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL); + if (an_ctrl < 0) + return an_ctrl; + + dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, + VR_MII_DIG_CTRL1); + if (dgt_ctrl < 0) + return dgt_ctrl; + + if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) { + mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100); + mii_ctrl |= BMCR_SPEED1000; + dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_; + dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_; + /* In order for Auto-Negotiation to operate properly at + * 2.5 Gbps the 1.6ms link timer values must be adjusted + * The VR_MII_LINK_TIMER_CTRL Register must be set to + * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the + * VR_MII_DIG_CTRL1 Register set to 1 + */ + ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, + VR_MII_LINK_TIMER_CTRL, 0x7A1); + if (ret < 0) + return ret; + } else { + mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART); + an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_; + dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_; + dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_; + } + + ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, + mii_ctrl); + if (ret < 0) + return ret; + + ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, + VR_MII_DIG_CTRL1, dgt_ctrl); + if (ret < 0) + return ret; + + return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, + VR_MII_AN_CTRL, an_ctrl); +} + +static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state) +{ + u8 wait_cnt = 0; + u32 dig_sts; + + do { + dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, + VR_MII_DIG_STS); + if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >> + VR_MII_DIG_STS_PSEQ_STATE_POS_) == state) + break; + usleep_range(1000, 2000); + } while (wait_cnt++ < 10); + + if (wait_cnt >= 10) + return -ETIMEDOUT; + + return 0; +} + +static int lan743x_sgmii_config(struct lan743x_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct phy_device *phydev = netdev->phydev; + enum lan743x_sgmii_lsd lsd = POWER_DOWN; + int mii_ctl; + bool status; + int ret; + + switch (phydev->speed) { + case SPEED_2500: + if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER) + lsd = LINK_2500_MASTER; + else + lsd = LINK_2500_SLAVE; + break; + case SPEED_1000: + if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER) + lsd = LINK_1000_MASTER; + else + lsd = LINK_1000_SLAVE; + break; + case SPEED_100: + if (phydev->duplex) + lsd = LINK_100FD; + else + lsd = LINK_100HD; + break; + case SPEED_10: + if (phydev->duplex) + lsd = LINK_10FD; + else + lsd = LINK_10HD; + break; + default: + netif_err(adapter, drv, adapter->netdev, + "Invalid speed %d\n", phydev->speed); + return -EINVAL; + } + + adapter->sgmii_lsd = lsd; + ret = lan743x_sgmii_aneg_update(adapter); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d SGMII cfg failed\n", ret); + return ret; + } + + ret = lan743x_is_sgmii_2_5G_mode(adapter, &status); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "erro %d SGMII get mode failed\n", ret); + return ret; + } + + if (status) + netif_dbg(adapter, drv, adapter->netdev, + "SGMII 2.5G mode enable\n"); + else + netif_dbg(adapter, drv, adapter->netdev, + "SGMII 1G mode enable\n"); + + /* SGMII/1000/2500BASE-X PCS power down */ + mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR); + if (mii_ctl < 0) + return mii_ctl; + + mii_ctl |= BMCR_PDOWN; + ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl); + if (ret < 0) + return ret; + + ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN); + if (ret < 0) + return ret; + + /* SGMII/1000/2500BASE-X PCS power up */ + mii_ctl &= ~BMCR_PDOWN; + ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl); + if (ret < 0) + return ret; + + ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP); + if (ret < 0) + return ret; + + return 0; +} + +static void lan743x_mac_set_address(struct lan743x_adapter *adapter, + u8 *addr) +{ + u32 addr_lo, addr_hi; + + addr_lo = addr[0] | + addr[1] << 8 | + addr[2] << 16 | + addr[3] << 24; + addr_hi = addr[4] | + addr[5] << 8; + lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo); + lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi); + + ether_addr_copy(adapter->mac_address, addr); + netif_info(adapter, drv, adapter->netdev, + "MAC address set to %pM\n", addr); +} + +static int lan743x_mac_init(struct lan743x_adapter *adapter) +{ + bool mac_address_valid = true; + struct net_device *netdev; + u32 mac_addr_hi = 0; + u32 mac_addr_lo = 0; + u32 data; + + netdev = adapter->netdev; + + /* disable auto duplex, and speed detection. Phylib does that */ + data = lan743x_csr_read(adapter, MAC_CR); + data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_); + data |= MAC_CR_CNTR_RST_; + lan743x_csr_write(adapter, MAC_CR, data); + + if (!is_valid_ether_addr(adapter->mac_address)) { + mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH); + mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL); + adapter->mac_address[0] = mac_addr_lo & 0xFF; + adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF; + adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF; + adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF; + adapter->mac_address[4] = mac_addr_hi & 0xFF; + adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF; + + if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) && + mac_addr_lo == 0xFFFFFFFF) { + mac_address_valid = false; + } else if (!is_valid_ether_addr(adapter->mac_address)) { + mac_address_valid = false; + } + + if (!mac_address_valid) + eth_random_addr(adapter->mac_address); + } + lan743x_mac_set_address(adapter, adapter->mac_address); + eth_hw_addr_set(netdev, adapter->mac_address); + + return 0; +} + +static int lan743x_mac_open(struct lan743x_adapter *adapter) +{ + u32 temp; + + temp = lan743x_csr_read(adapter, MAC_RX); + lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_); + temp = lan743x_csr_read(adapter, MAC_TX); + lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_); + return 0; +} + +static void lan743x_mac_close(struct lan743x_adapter *adapter) +{ + u32 temp; + + temp = lan743x_csr_read(adapter, MAC_TX); + temp &= ~MAC_TX_TXEN_; + lan743x_csr_write(adapter, MAC_TX, temp); + lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_, + 1, 1000, 20000, 100); + + temp = lan743x_csr_read(adapter, MAC_RX); + temp &= ~MAC_RX_RXEN_; + lan743x_csr_write(adapter, MAC_RX, temp); + lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, + 1, 1000, 20000, 100); +} + +static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter, + bool tx_enable, bool rx_enable) +{ + u32 flow_setting = 0; + + /* set maximum pause time because when fifo space frees + * up a zero value pause frame will be sent to release the pause + */ + flow_setting = MAC_FLOW_CR_FCPT_MASK_; + if (tx_enable) + flow_setting |= MAC_FLOW_CR_TX_FCEN_; + if (rx_enable) + flow_setting |= MAC_FLOW_CR_RX_FCEN_; + lan743x_csr_write(adapter, MAC_FLOW, flow_setting); +} + +static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu) +{ + int enabled = 0; + u32 mac_rx = 0; + + mac_rx = lan743x_csr_read(adapter, MAC_RX); + if (mac_rx & MAC_RX_RXEN_) { + enabled = 1; + if (mac_rx & MAC_RX_RXD_) { + lan743x_csr_write(adapter, MAC_RX, mac_rx); + mac_rx &= ~MAC_RX_RXD_; + } + mac_rx &= ~MAC_RX_RXEN_; + lan743x_csr_write(adapter, MAC_RX, mac_rx); + lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, + 1, 1000, 20000, 100); + lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_); + } + + mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_); + mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN) + << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); + lan743x_csr_write(adapter, MAC_RX, mac_rx); + + if (enabled) { + mac_rx |= MAC_RX_RXEN_; + lan743x_csr_write(adapter, MAC_RX, mac_rx); + } + return 0; +} + +/* PHY */ +static int lan743x_phy_reset(struct lan743x_adapter *adapter) +{ + u32 data; + + /* Only called with in probe, and before mdiobus_register */ + + data = lan743x_csr_read(adapter, PMT_CTL); + data |= PMT_CTL_ETH_PHY_RST_; + lan743x_csr_write(adapter, PMT_CTL, data); + + return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data, + (!(data & PMT_CTL_ETH_PHY_RST_) && + (data & PMT_CTL_READY_)), + 50000, 1000000); +} + +static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, + u16 local_adv, u16 remote_adv) +{ + struct lan743x_phy *phy = &adapter->phy; + u8 cap; + + if (phy->fc_autoneg) + cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv); + else + cap = phy->fc_request_control; + + lan743x_mac_flow_ctrl_set_enables(adapter, + cap & FLOW_CTRL_TX, + cap & FLOW_CTRL_RX); +} + +static int lan743x_phy_init(struct lan743x_adapter *adapter) +{ + return lan743x_phy_reset(adapter); +} + +static void lan743x_phy_link_status_change(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + u32 data; + + phy_print_status(phydev); + if (phydev->state == PHY_RUNNING) { + int remote_advertisement = 0; + int local_advertisement = 0; + + data = lan743x_csr_read(adapter, MAC_CR); + + /* set interface mode */ + if (phy_interface_is_rgmii(phydev)) + /* RGMII */ + data &= ~MAC_CR_MII_EN_; + else + /* GMII */ + data |= MAC_CR_MII_EN_; + + /* set duplex mode */ + if (phydev->duplex) + data |= MAC_CR_DPX_; + else + data &= ~MAC_CR_DPX_; + + /* set bus speed */ + switch (phydev->speed) { + case SPEED_10: + data &= ~MAC_CR_CFG_H_; + data &= ~MAC_CR_CFG_L_; + break; + case SPEED_100: + data &= ~MAC_CR_CFG_H_; + data |= MAC_CR_CFG_L_; + break; + case SPEED_1000: + data |= MAC_CR_CFG_H_; + data &= ~MAC_CR_CFG_L_; + break; + case SPEED_2500: + data |= MAC_CR_CFG_H_; + data |= MAC_CR_CFG_L_; + break; + } + lan743x_csr_write(adapter, MAC_CR, data); + + local_advertisement = + linkmode_adv_to_mii_adv_t(phydev->advertising); + remote_advertisement = + linkmode_adv_to_mii_adv_t(phydev->lp_advertising); + + lan743x_phy_update_flowcontrol(adapter, local_advertisement, + remote_advertisement); + lan743x_ptp_update_latency(adapter, phydev->speed); + if (phydev->interface == PHY_INTERFACE_MODE_SGMII || + phydev->interface == PHY_INTERFACE_MODE_1000BASEX || + phydev->interface == PHY_INTERFACE_MODE_2500BASEX) + lan743x_sgmii_config(adapter); + } +} + +static void lan743x_phy_close(struct lan743x_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + phy_stop(netdev->phydev); + phy_disconnect(netdev->phydev); + netdev->phydev = NULL; +} + +static int lan743x_phy_open(struct lan743x_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct lan743x_phy *phy = &adapter->phy; + struct phy_device *phydev; + int ret = -EIO; + + /* try devicetree phy, or fixed link */ + phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node, + lan743x_phy_link_status_change); + + if (!phydev) { + /* try internal phy */ + phydev = phy_find_first(adapter->mdiobus); + if (!phydev) + goto return_error; + + if (adapter->is_pci11x1x) + ret = phy_connect_direct(netdev, phydev, + lan743x_phy_link_status_change, + PHY_INTERFACE_MODE_RGMII); + else + ret = phy_connect_direct(netdev, phydev, + lan743x_phy_link_status_change, + PHY_INTERFACE_MODE_GMII); + if (ret) + goto return_error; + } + + /* MAC doesn't support 1000T Half */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + /* support both flow controls */ + phy_support_asym_pause(phydev); + phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); + phy->fc_autoneg = phydev->autoneg; + + phy_start(phydev); + phy_start_aneg(phydev); + phy_attached_info(phydev); + return 0; + +return_error: + return ret; +} + +static void lan743x_rfe_open(struct lan743x_adapter *adapter) +{ + lan743x_csr_write(adapter, RFE_RSS_CFG, + RFE_RSS_CFG_UDP_IPV6_EX_ | + RFE_RSS_CFG_TCP_IPV6_EX_ | + RFE_RSS_CFG_IPV6_EX_ | + RFE_RSS_CFG_UDP_IPV6_ | + RFE_RSS_CFG_TCP_IPV6_ | + RFE_RSS_CFG_IPV6_ | + RFE_RSS_CFG_UDP_IPV4_ | + RFE_RSS_CFG_TCP_IPV4_ | + RFE_RSS_CFG_IPV4_ | + RFE_RSS_CFG_VALID_HASH_BITS_ | + RFE_RSS_CFG_RSS_QUEUE_ENABLE_ | + RFE_RSS_CFG_RSS_HASH_STORE_ | + RFE_RSS_CFG_RSS_ENABLE_); +} + +static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter) +{ + u8 *mac_addr; + u32 mac_addr_hi = 0; + u32 mac_addr_lo = 0; + + /* Add mac address to perfect Filter */ + mac_addr = adapter->mac_address; + mac_addr_lo = ((((u32)(mac_addr[0])) << 0) | + (((u32)(mac_addr[1])) << 8) | + (((u32)(mac_addr[2])) << 16) | + (((u32)(mac_addr[3])) << 24)); + mac_addr_hi = ((((u32)(mac_addr[4])) << 0) | + (((u32)(mac_addr[5])) << 8)); + + lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo); + lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0), + mac_addr_hi | RFE_ADDR_FILT_HI_VALID_); +} + +static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 hash_table[DP_SEL_VHF_HASH_LEN]; + u32 rfctl; + u32 data; + + rfctl = lan743x_csr_read(adapter, RFE_CTL); + rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ | + RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); + rfctl |= RFE_CTL_AB_; + if (netdev->flags & IFF_PROMISC) { + rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_; + } else { + if (netdev->flags & IFF_ALLMULTI) + rfctl |= RFE_CTL_AM_; + } + + if (netdev->features & NETIF_F_RXCSUM) + rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_; + + memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32)); + if (netdev_mc_count(netdev)) { + struct netdev_hw_addr *ha; + int i; + + rfctl |= RFE_CTL_DA_PERFECT_; + i = 1; + netdev_for_each_mc_addr(ha, netdev) { + /* set first 32 into Perfect Filter */ + if (i < 33) { + lan743x_csr_write(adapter, + RFE_ADDR_FILT_HI(i), 0); + data = ha->addr[3]; + data = ha->addr[2] | (data << 8); + data = ha->addr[1] | (data << 8); + data = ha->addr[0] | (data << 8); + lan743x_csr_write(adapter, + RFE_ADDR_FILT_LO(i), data); + data = ha->addr[5]; + data = ha->addr[4] | (data << 8); + data |= RFE_ADDR_FILT_HI_VALID_; + lan743x_csr_write(adapter, + RFE_ADDR_FILT_HI(i), data); + } else { + u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >> + 23) & 0x1FF; + hash_table[bitnum / 32] |= (1 << (bitnum % 32)); + rfctl |= RFE_CTL_MCAST_HASH_; + } + i++; + } + } + + lan743x_dp_write(adapter, DP_SEL_RFE_RAM, + DP_SEL_VHF_VLAN_LEN, + DP_SEL_VHF_HASH_LEN, hash_table); + lan743x_csr_write(adapter, RFE_CTL, rfctl); +} + +static int lan743x_dmac_init(struct lan743x_adapter *adapter) +{ + u32 data = 0; + + lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_); + lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_, + 0, 1000, 20000, 100); + switch (DEFAULT_DMA_DESCRIPTOR_SPACING) { + case DMA_DESCRIPTOR_SPACING_16: + data = DMAC_CFG_MAX_DSPACE_16_; + break; + case DMA_DESCRIPTOR_SPACING_32: + data = DMAC_CFG_MAX_DSPACE_32_; + break; + case DMA_DESCRIPTOR_SPACING_64: + data = DMAC_CFG_MAX_DSPACE_64_; + break; + case DMA_DESCRIPTOR_SPACING_128: + data = DMAC_CFG_MAX_DSPACE_128_; + break; + default: + return -EPERM; + } + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) + data |= DMAC_CFG_COAL_EN_; + data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_; + data |= DMAC_CFG_MAX_READ_REQ_SET_(6); + lan743x_csr_write(adapter, DMAC_CFG, data); + data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1); + data |= DMAC_COAL_CFG_TIMER_TX_START_; + data |= DMAC_COAL_CFG_FLUSH_INTS_; + data |= DMAC_COAL_CFG_INT_EXIT_COAL_; + data |= DMAC_COAL_CFG_CSR_EXIT_COAL_; + data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A); + data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C); + lan743x_csr_write(adapter, DMAC_COAL_CFG, data); + data = DMAC_OBFF_TX_THRES_SET_(0x08); + data |= DMAC_OBFF_RX_THRES_SET_(0x0A); + lan743x_csr_write(adapter, DMAC_OBFF_CFG, data); + return 0; +} + +static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter, + int tx_channel) +{ + u32 dmac_cmd = 0; + + dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); + return DMAC_CHANNEL_STATE_SET((dmac_cmd & + DMAC_CMD_START_T_(tx_channel)), + (dmac_cmd & + DMAC_CMD_STOP_T_(tx_channel))); +} + +static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter, + int tx_channel) +{ + int timeout = 100; + int result = 0; + + while (timeout && + ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) == + DMAC_CHANNEL_STATE_STOP_PENDING)) { + usleep_range(1000, 20000); + timeout--; + } + if (result == DMAC_CHANNEL_STATE_STOP_PENDING) + result = -ENODEV; + return result; +} + +static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter, + int rx_channel) +{ + u32 dmac_cmd = 0; + + dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); + return DMAC_CHANNEL_STATE_SET((dmac_cmd & + DMAC_CMD_START_R_(rx_channel)), + (dmac_cmd & + DMAC_CMD_STOP_R_(rx_channel))); +} + +static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter, + int rx_channel) +{ + int timeout = 100; + int result = 0; + + while (timeout && + ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) == + DMAC_CHANNEL_STATE_STOP_PENDING)) { + usleep_range(1000, 20000); + timeout--; + } + if (result == DMAC_CHANNEL_STATE_STOP_PENDING) + result = -ENODEV; + return result; +} + +static void lan743x_tx_release_desc(struct lan743x_tx *tx, + int descriptor_index, bool cleanup) +{ + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_tx_descriptor *descriptor = NULL; + u32 descriptor_type = 0; + bool ignore_sync; + + descriptor = &tx->ring_cpu_ptr[descriptor_index]; + buffer_info = &tx->buffer_info[descriptor_index]; + if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE)) + goto done; + + descriptor_type = le32_to_cpu(descriptor->data0) & + TX_DESC_DATA0_DTYPE_MASK_; + if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_) + goto clean_up_data_descriptor; + else + goto clear_active; + +clean_up_data_descriptor: + if (buffer_info->dma_ptr) { + if (buffer_info->flags & + TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) { + dma_unmap_page(&tx->adapter->pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_TO_DEVICE); + } else { + dma_unmap_single(&tx->adapter->pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_TO_DEVICE); + } + buffer_info->dma_ptr = 0; + buffer_info->buffer_length = 0; + } + if (!buffer_info->skb) + goto clear_active; + + if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) { + dev_kfree_skb_any(buffer_info->skb); + goto clear_skb; + } + + if (cleanup) { + lan743x_ptp_unrequest_tx_timestamp(tx->adapter); + dev_kfree_skb_any(buffer_info->skb); + } else { + ignore_sync = (buffer_info->flags & + TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0; + lan743x_ptp_tx_timestamp_skb(tx->adapter, + buffer_info->skb, ignore_sync); + } + +clear_skb: + buffer_info->skb = NULL; + +clear_active: + buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE; + +done: + memset(buffer_info, 0, sizeof(*buffer_info)); + memset(descriptor, 0, sizeof(*descriptor)); +} + +static int lan743x_tx_next_index(struct lan743x_tx *tx, int index) +{ + return ((++index) % tx->ring_size); +} + +static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx) +{ + while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) { + lan743x_tx_release_desc(tx, tx->last_head, false); + tx->last_head = lan743x_tx_next_index(tx, tx->last_head); + } +} + +static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx) +{ + u32 original_head = 0; + + original_head = tx->last_head; + do { + lan743x_tx_release_desc(tx, tx->last_head, true); + tx->last_head = lan743x_tx_next_index(tx, tx->last_head); + } while (tx->last_head != original_head); + memset(tx->ring_cpu_ptr, 0, + sizeof(*tx->ring_cpu_ptr) * (tx->ring_size)); + memset(tx->buffer_info, 0, + sizeof(*tx->buffer_info) * (tx->ring_size)); +} + +static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx, + struct sk_buff *skb) +{ + int result = 1; /* 1 for the main skb buffer */ + int nr_frags = 0; + + if (skb_is_gso(skb)) + result++; /* requires an extension descriptor */ + nr_frags = skb_shinfo(skb)->nr_frags; + result += nr_frags; /* 1 for each fragment buffer */ + return result; +} + +static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx) +{ + int last_head = tx->last_head; + int last_tail = tx->last_tail; + + if (last_tail >= last_head) + return tx->ring_size - last_tail + last_head - 1; + else + return last_head - last_tail - 1; +} + +void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx, + bool enable_timestamping, + bool enable_onestep_sync) +{ + if (enable_timestamping) + tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED; + else + tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED; + if (enable_onestep_sync) + tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC; + else + tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC; +} + +static int lan743x_tx_frame_start(struct lan743x_tx *tx, + unsigned char *first_buffer, + unsigned int first_buffer_length, + unsigned int frame_length, + bool time_stamp, + bool check_sum) +{ + /* called only from within lan743x_tx_xmit_frame. + * assuming tx->ring_lock has already been acquired. + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_adapter *adapter = tx->adapter; + struct device *dev = &adapter->pdev->dev; + dma_addr_t dma_ptr; + + tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS; + tx->frame_first = tx->last_tail; + tx->frame_tail = tx->frame_first; + + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_ptr)) + return -ENOMEM; + + tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr)); + tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr)); + tx_descriptor->data3 = cpu_to_le32((frame_length << 16) & + TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_); + + buffer_info->skb = NULL; + buffer_info->dma_ptr = dma_ptr; + buffer_info->buffer_length = first_buffer_length; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; + + tx->frame_data0 = (first_buffer_length & + TX_DESC_DATA0_BUF_LENGTH_MASK_) | + TX_DESC_DATA0_DTYPE_DATA_ | + TX_DESC_DATA0_FS_ | + TX_DESC_DATA0_FCS_; + if (time_stamp) + tx->frame_data0 |= TX_DESC_DATA0_TSE_; + + if (check_sum) + tx->frame_data0 |= TX_DESC_DATA0_ICE_ | + TX_DESC_DATA0_IPE_ | + TX_DESC_DATA0_TPE_; + + /* data0 will be programmed in one of other frame assembler functions */ + return 0; +} + +static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, + unsigned int frame_length, + int nr_frags) +{ + /* called only from within lan743x_tx_xmit_frame. + * assuming tx->ring_lock has already been acquired. + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + + /* wrap up previous descriptor */ + tx->frame_data0 |= TX_DESC_DATA0_EXT_; + if (nr_frags <= 0) { + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; + } + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); + + /* move to next descriptor */ + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + + /* add extension descriptor */ + tx_descriptor->data1 = 0; + tx_descriptor->data2 = 0; + tx_descriptor->data3 = 0; + + buffer_info->skb = NULL; + buffer_info->dma_ptr = 0; + buffer_info->buffer_length = 0; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; + + tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) | + TX_DESC_DATA0_DTYPE_EXT_ | + TX_DESC_DATA0_EXT_LSO_; + + /* data0 will be programmed in one of other frame assembler functions */ +} + +static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, + const skb_frag_t *fragment, + unsigned int frame_length) +{ + /* called only from within lan743x_tx_xmit_frame + * assuming tx->ring_lock has already been acquired + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_adapter *adapter = tx->adapter; + struct device *dev = &adapter->pdev->dev; + unsigned int fragment_length = 0; + dma_addr_t dma_ptr; + + fragment_length = skb_frag_size(fragment); + if (!fragment_length) + return 0; + + /* wrap up previous descriptor */ + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); + + /* move to next descriptor */ + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + dma_ptr = skb_frag_dma_map(dev, fragment, + 0, fragment_length, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_ptr)) { + int desc_index; + + /* cleanup all previously setup descriptors */ + desc_index = tx->frame_first; + while (desc_index != tx->frame_tail) { + lan743x_tx_release_desc(tx, desc_index, true); + desc_index = lan743x_tx_next_index(tx, desc_index); + } + dma_wmb(); + tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; + tx->frame_first = 0; + tx->frame_data0 = 0; + tx->frame_tail = 0; + return -ENOMEM; + } + + tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr)); + tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr)); + tx_descriptor->data3 = cpu_to_le32((frame_length << 16) & + TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_); + + buffer_info->skb = NULL; + buffer_info->dma_ptr = dma_ptr; + buffer_info->buffer_length = fragment_length; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT; + + tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) | + TX_DESC_DATA0_DTYPE_DATA_ | + TX_DESC_DATA0_FCS_; + + /* data0 will be programmed in one of other frame assembler functions */ + return 0; +} + +static void lan743x_tx_frame_end(struct lan743x_tx *tx, + struct sk_buff *skb, + bool time_stamp, + bool ignore_sync) +{ + /* called only from within lan743x_tx_xmit_frame + * assuming tx->ring_lock has already been acquired + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_adapter *adapter = tx->adapter; + u32 tx_tail_flags = 0; + + /* wrap up previous descriptor */ + if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) == + TX_DESC_DATA0_DTYPE_DATA_) { + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; + } + + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + buffer_info->skb = skb; + if (time_stamp) + buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED; + if (ignore_sync) + buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; + + tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); + tx->last_tail = tx->frame_tail; + + dma_wmb(); + + if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) + tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) + tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ | + TX_TAIL_SET_TOP_INT_EN_; + + lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), + tx_tail_flags | tx->frame_tail); + tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; +} + +static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, + struct sk_buff *skb) +{ + int required_number_of_descriptors = 0; + unsigned int start_frame_length = 0; + netdev_tx_t retval = NETDEV_TX_OK; + unsigned int frame_length = 0; + unsigned int head_length = 0; + unsigned long irq_flags = 0; + bool do_timestamp = false; + bool ignore_sync = false; + struct netdev_queue *txq; + int nr_frags = 0; + bool gso = false; + int j; + + required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb); + + spin_lock_irqsave(&tx->ring_lock, irq_flags); + if (required_number_of_descriptors > + lan743x_tx_get_avail_desc(tx)) { + if (required_number_of_descriptors > (tx->ring_size - 1)) { + dev_kfree_skb_irq(skb); + } else { + /* save how many descriptors we needed to restart the queue */ + tx->rqd_descriptors = required_number_of_descriptors; + retval = NETDEV_TX_BUSY; + txq = netdev_get_tx_queue(tx->adapter->netdev, + tx->channel_number); + netif_tx_stop_queue(txq); + } + goto unlock; + } + + /* space available, transmit skb */ + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) && + (lan743x_ptp_request_tx_timestamp(tx->adapter))) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + do_timestamp = true; + if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC) + ignore_sync = true; + } + head_length = skb_headlen(skb); + frame_length = skb_pagelen(skb); + nr_frags = skb_shinfo(skb)->nr_frags; + start_frame_length = frame_length; + gso = skb_is_gso(skb); + if (gso) { + start_frame_length = max(skb_shinfo(skb)->gso_size, + (unsigned short)8); + } + + if (lan743x_tx_frame_start(tx, + skb->data, head_length, + start_frame_length, + do_timestamp, + skb->ip_summed == CHECKSUM_PARTIAL)) { + dev_kfree_skb_irq(skb); + goto unlock; + } + tx->frame_count++; + + if (gso) + lan743x_tx_frame_add_lso(tx, frame_length, nr_frags); + + if (nr_frags <= 0) + goto finish; + + for (j = 0; j < nr_frags; j++) { + const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]); + + if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) { + /* upon error no need to call + * lan743x_tx_frame_end + * frame assembler clean up was performed inside + * lan743x_tx_frame_add_fragment + */ + dev_kfree_skb_irq(skb); + goto unlock; + } + } + +finish: + lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync); + +unlock: + spin_unlock_irqrestore(&tx->ring_lock, irq_flags); + return retval; +} + +static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) +{ + struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi); + struct lan743x_adapter *adapter = tx->adapter; + unsigned long irq_flags = 0; + struct netdev_queue *txq; + u32 ioc_bit = 0; + + ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); + lan743x_csr_read(adapter, DMAC_INT_STS); + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) + lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit); + spin_lock_irqsave(&tx->ring_lock, irq_flags); + + /* clean up tx ring */ + lan743x_tx_release_completed_descriptors(tx); + txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number); + if (netif_tx_queue_stopped(txq)) { + if (tx->rqd_descriptors) { + if (tx->rqd_descriptors <= + lan743x_tx_get_avail_desc(tx)) { + tx->rqd_descriptors = 0; + netif_tx_wake_queue(txq); + } + } else { + netif_tx_wake_queue(txq); + } + } + spin_unlock_irqrestore(&tx->ring_lock, irq_flags); + + if (!napi_complete(napi)) + goto done; + + /* enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_TX_(tx->channel_number)); + lan743x_csr_read(adapter, INT_STS); + +done: + return 0; +} + +static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) +{ + if (tx->head_cpu_ptr) { + dma_free_coherent(&tx->adapter->pdev->dev, + sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr, + tx->head_dma_ptr); + tx->head_cpu_ptr = NULL; + tx->head_dma_ptr = 0; + } + kfree(tx->buffer_info); + tx->buffer_info = NULL; + + if (tx->ring_cpu_ptr) { + dma_free_coherent(&tx->adapter->pdev->dev, + tx->ring_allocation_size, tx->ring_cpu_ptr, + tx->ring_dma_ptr); + tx->ring_allocation_size = 0; + tx->ring_cpu_ptr = NULL; + tx->ring_dma_ptr = 0; + } + tx->ring_size = 0; +} + +static int lan743x_tx_ring_init(struct lan743x_tx *tx) +{ + size_t ring_allocation_size = 0; + void *cpu_ptr = NULL; + dma_addr_t dma_ptr; + int ret = -ENOMEM; + + tx->ring_size = LAN743X_TX_RING_SIZE; + if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) { + ret = -EINVAL; + goto cleanup; + } + if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, + DMA_BIT_MASK(64))) { + dev_warn(&tx->adapter->pdev->dev, + "lan743x_: No suitable DMA available\n"); + ret = -ENOMEM; + goto cleanup; + } + ring_allocation_size = ALIGN(tx->ring_size * + sizeof(struct lan743x_tx_descriptor), + PAGE_SIZE); + dma_ptr = 0; + cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, + ring_allocation_size, &dma_ptr, GFP_KERNEL); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + + tx->ring_allocation_size = ring_allocation_size; + tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr; + tx->ring_dma_ptr = dma_ptr; + + cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr; + dma_ptr = 0; + cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, + sizeof(*tx->head_cpu_ptr), &dma_ptr, + GFP_KERNEL); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + + tx->head_cpu_ptr = cpu_ptr; + tx->head_dma_ptr = dma_ptr; + if (tx->head_dma_ptr & 0x3) { + ret = -ENOMEM; + goto cleanup; + } + + return 0; + +cleanup: + lan743x_tx_ring_cleanup(tx); + return ret; +} + +static void lan743x_tx_close(struct lan743x_tx *tx) +{ + struct lan743x_adapter *adapter = tx->adapter; + + lan743x_csr_write(adapter, + DMAC_CMD, + DMAC_CMD_STOP_T_(tx->channel_number)); + lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number); + + lan743x_csr_write(adapter, + DMAC_INT_EN_CLR, + DMAC_INT_BIT_TX_IOC_(tx->channel_number)); + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_TX_(tx->channel_number)); + napi_disable(&tx->napi); + netif_napi_del(&tx->napi); + + lan743x_csr_write(adapter, FCT_TX_CTL, + FCT_TX_CTL_DIS_(tx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, + FCT_TX_CTL_EN_(tx->channel_number), + 0, 1000, 20000, 100); + + lan743x_tx_release_all_descriptors(tx); + + tx->rqd_descriptors = 0; + + lan743x_tx_ring_cleanup(tx); +} + +static int lan743x_tx_open(struct lan743x_tx *tx) +{ + struct lan743x_adapter *adapter = NULL; + u32 data = 0; + int ret; + + adapter = tx->adapter; + ret = lan743x_tx_ring_init(tx); + if (ret) + return ret; + + /* initialize fifo */ + lan743x_csr_write(adapter, FCT_TX_CTL, + FCT_TX_CTL_RESET_(tx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, + FCT_TX_CTL_RESET_(tx->channel_number), + 0, 1000, 20000, 100); + + /* enable fifo */ + lan743x_csr_write(adapter, FCT_TX_CTL, + FCT_TX_CTL_EN_(tx->channel_number)); + + /* reset tx channel */ + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_TX_SWR_(tx->channel_number)); + lan743x_csr_wait_for_bit(adapter, DMAC_CMD, + DMAC_CMD_TX_SWR_(tx->channel_number), + 0, 1000, 20000, 100); + + /* Write TX_BASE_ADDR */ + lan743x_csr_write(adapter, + TX_BASE_ADDRH(tx->channel_number), + DMA_ADDR_HIGH32(tx->ring_dma_ptr)); + lan743x_csr_write(adapter, + TX_BASE_ADDRL(tx->channel_number), + DMA_ADDR_LOW32(tx->ring_dma_ptr)); + + /* Write TX_CFG_B */ + data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number)); + data &= ~TX_CFG_B_TX_RING_LEN_MASK_; + data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_); + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) + data |= TX_CFG_B_TDMABL_512_; + lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data); + + /* Write TX_CFG_A */ + data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_; + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_; + data |= TX_CFG_A_TX_PF_THRES_SET_(0x10); + data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04); + data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07); + } + lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data); + + /* Write TX_HEAD_WRITEBACK_ADDR */ + lan743x_csr_write(adapter, + TX_HEAD_WRITEBACK_ADDRH(tx->channel_number), + DMA_ADDR_HIGH32(tx->head_dma_ptr)); + lan743x_csr_write(adapter, + TX_HEAD_WRITEBACK_ADDRL(tx->channel_number), + DMA_ADDR_LOW32(tx->head_dma_ptr)); + + /* set last head */ + tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number)); + + /* write TX_TAIL */ + tx->last_tail = 0; + lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), + (u32)(tx->last_tail)); + tx->vector_flags = lan743x_intr_get_vector_flags(adapter, + INT_BIT_DMA_TX_ + (tx->channel_number)); + netif_napi_add_tx_weight(adapter->netdev, + &tx->napi, lan743x_tx_napi_poll, + NAPI_POLL_WEIGHT); + napi_enable(&tx->napi); + + data = 0; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) + data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) + data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) + data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) + data |= TX_CFG_C_TX_INT_EN_R2C_; + lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data); + + if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)) + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_TX_(tx->channel_number)); + lan743x_csr_write(adapter, DMAC_INT_EN_SET, + DMAC_INT_BIT_TX_IOC_(tx->channel_number)); + + /* start dmac channel */ + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_START_T_(tx->channel_number)); + return 0; +} + +static int lan743x_rx_next_index(struct lan743x_rx *rx, int index) +{ + return ((++index) % rx->ring_size); +} + +static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index) +{ + /* update the tail once per 8 descriptors */ + if ((index & 7) == 7) + lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number), + index); +} + +static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, + gfp_t gfp) +{ + struct net_device *netdev = rx->adapter->netdev; + struct device *dev = &rx->adapter->pdev->dev; + struct lan743x_rx_buffer_info *buffer_info; + unsigned int buffer_length, used_length; + struct lan743x_rx_descriptor *descriptor; + struct sk_buff *skb; + dma_addr_t dma_ptr; + + buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING; + + descriptor = &rx->ring_cpu_ptr[index]; + buffer_info = &rx->buffer_info[index]; + skb = __netdev_alloc_skb(netdev, buffer_length, gfp); + if (!skb) + return -ENOMEM; + dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma_ptr)) { + dev_kfree_skb_any(skb); + return -ENOMEM; + } + if (buffer_info->dma_ptr) { + /* sync used area of buffer only */ + if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_) + /* frame length is valid only if LS bit is set. + * it's a safe upper bound for the used area in this + * buffer. + */ + used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_ + (le32_to_cpu(descriptor->data0)), + buffer_info->buffer_length); + else + used_length = buffer_info->buffer_length; + dma_sync_single_for_cpu(dev, buffer_info->dma_ptr, + used_length, + DMA_FROM_DEVICE); + dma_unmap_single_attrs(dev, buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + } + + buffer_info->skb = skb; + buffer_info->dma_ptr = dma_ptr; + buffer_info->buffer_length = buffer_length; + descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr)); + descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr)); + descriptor->data3 = 0; + descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ | + (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_))); + lan743x_rx_update_tail(rx, index); + + return 0; +} + +static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index) +{ + struct lan743x_rx_buffer_info *buffer_info; + struct lan743x_rx_descriptor *descriptor; + + descriptor = &rx->ring_cpu_ptr[index]; + buffer_info = &rx->buffer_info[index]; + + descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr)); + descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr)); + descriptor->data3 = 0; + descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ | + ((buffer_info->buffer_length) & + RX_DESC_DATA0_BUF_LENGTH_MASK_))); + lan743x_rx_update_tail(rx, index); +} + +static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index) +{ + struct lan743x_rx_buffer_info *buffer_info; + struct lan743x_rx_descriptor *descriptor; + + descriptor = &rx->ring_cpu_ptr[index]; + buffer_info = &rx->buffer_info[index]; + + memset(descriptor, 0, sizeof(*descriptor)); + + if (buffer_info->dma_ptr) { + dma_unmap_single(&rx->adapter->pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_FROM_DEVICE); + buffer_info->dma_ptr = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + memset(buffer_info, 0, sizeof(*buffer_info)); +} + +static struct sk_buff * +lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length) +{ + if (skb_linearize(skb)) { + dev_kfree_skb_irq(skb); + return NULL; + } + frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN); + if (skb->len > frame_length) { + skb->tail -= skb->len - frame_length; + skb->len = frame_length; + } + return skb; +} + +static int lan743x_rx_process_buffer(struct lan743x_rx *rx) +{ + int current_head_index = le32_to_cpu(*rx->head_cpu_ptr); + struct lan743x_rx_descriptor *descriptor, *desc_ext; + struct net_device *netdev = rx->adapter->netdev; + int result = RX_PROCESS_RESULT_NOTHING_TO_DO; + struct lan743x_rx_buffer_info *buffer_info; + int frame_length, buffer_length; + bool is_ice, is_tce, is_icsm; + int extension_index = -1; + bool is_last, is_first; + struct sk_buff *skb; + + if (current_head_index < 0 || current_head_index >= rx->ring_size) + goto done; + + if (rx->last_head < 0 || rx->last_head >= rx->ring_size) + goto done; + + if (rx->last_head == current_head_index) + goto done; + + descriptor = &rx->ring_cpu_ptr[rx->last_head]; + if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_) + goto done; + buffer_info = &rx->buffer_info[rx->last_head]; + + is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_; + is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_; + + if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) { + /* extension is expected to follow */ + int index = lan743x_rx_next_index(rx, rx->last_head); + + if (index == current_head_index) + /* extension not yet available */ + goto done; + desc_ext = &rx->ring_cpu_ptr[index]; + if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_) + /* extension not yet available */ + goto done; + if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_)) + goto move_forward; + extension_index = index; + } + + /* Only the last buffer in a multi-buffer frame contains the total frame + * length. The chip occasionally sends more buffers than strictly + * required to reach the total frame length. + * Handle this by adding all buffers to the skb in their entirety. + * Once the real frame length is known, trim the skb. + */ + frame_length = + RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0)); + buffer_length = buffer_info->buffer_length; + is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_; + is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_; + is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_; + + netdev_dbg(netdev, "%s%schunk: %d/%d", + is_first ? "first " : " ", + is_last ? "last " : " ", + frame_length, buffer_length); + + /* save existing skb, allocate new skb and map to dma */ + skb = buffer_info->skb; + if (lan743x_rx_init_ring_element(rx, rx->last_head, + GFP_ATOMIC | GFP_DMA)) { + /* failed to allocate next skb. + * Memory is very low. + * Drop this packet and reuse buffer. + */ + lan743x_rx_reuse_ring_element(rx, rx->last_head); + /* drop packet that was being assembled */ + dev_kfree_skb_irq(rx->skb_head); + rx->skb_head = NULL; + goto process_extension; + } + + /* add buffers to skb via skb->frag_list */ + if (is_first) { + skb_reserve(skb, RX_HEAD_PADDING); + skb_put(skb, buffer_length - RX_HEAD_PADDING); + if (rx->skb_head) + dev_kfree_skb_irq(rx->skb_head); + rx->skb_head = skb; + } else if (rx->skb_head) { + skb_put(skb, buffer_length); + if (skb_shinfo(rx->skb_head)->frag_list) + rx->skb_tail->next = skb; + else + skb_shinfo(rx->skb_head)->frag_list = skb; + rx->skb_tail = skb; + rx->skb_head->len += skb->len; + rx->skb_head->data_len += skb->len; + rx->skb_head->truesize += skb->truesize; + } else { + /* packet to assemble has already been dropped because one or + * more of its buffers could not be allocated + */ + netdev_dbg(netdev, "drop buffer intended for dropped packet"); + dev_kfree_skb_irq(skb); + } + +process_extension: + if (extension_index >= 0) { + u32 ts_sec; + u32 ts_nsec; + + ts_sec = le32_to_cpu(desc_ext->data1); + ts_nsec = (le32_to_cpu(desc_ext->data2) & + RX_DESC_DATA2_TS_NS_MASK_); + if (rx->skb_head) + skb_hwtstamps(rx->skb_head)->hwtstamp = + ktime_set(ts_sec, ts_nsec); + lan743x_rx_reuse_ring_element(rx, extension_index); + rx->last_head = extension_index; + netdev_dbg(netdev, "process extension"); + } + + if (is_last && rx->skb_head) + rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length); + + if (is_last && rx->skb_head) { + rx->skb_head->protocol = eth_type_trans(rx->skb_head, + rx->adapter->netdev); + if (rx->adapter->netdev->features & NETIF_F_RXCSUM) { + if (!is_ice && !is_tce && !is_icsm) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + netdev_dbg(netdev, "sending %d byte frame to OS", + rx->skb_head->len); + napi_gro_receive(&rx->napi, rx->skb_head); + rx->skb_head = NULL; + } + +move_forward: + /* push tail and head forward */ + rx->last_tail = rx->last_head; + rx->last_head = lan743x_rx_next_index(rx, rx->last_head); + result = RX_PROCESS_RESULT_BUFFER_RECEIVED; +done: + return result; +} + +static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) +{ + struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi); + struct lan743x_adapter *adapter = rx->adapter; + int result = RX_PROCESS_RESULT_NOTHING_TO_DO; + u32 rx_tail_flags = 0; + int count; + + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) { + /* clear int status bit before reading packet */ + lan743x_csr_write(adapter, DMAC_INT_STS, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + } + for (count = 0; count < weight; count++) { + result = lan743x_rx_process_buffer(rx); + if (result == RX_PROCESS_RESULT_NOTHING_TO_DO) + break; + } + rx->frame_count += count; + if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED) + return weight; + + if (!napi_complete_done(napi, count)) + return count; + + /* re-arm interrupts, must write to rx tail on some chip variants */ + if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) + rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) { + rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_; + } else { + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_RX_(rx->channel_number)); + } + + if (rx_tail_flags) + lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), + rx_tail_flags | rx->last_tail); + + return count; +} + +static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) +{ + if (rx->buffer_info && rx->ring_cpu_ptr) { + int index; + + for (index = 0; index < rx->ring_size; index++) + lan743x_rx_release_ring_element(rx, index); + } + + if (rx->head_cpu_ptr) { + dma_free_coherent(&rx->adapter->pdev->dev, + sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr, + rx->head_dma_ptr); + rx->head_cpu_ptr = NULL; + rx->head_dma_ptr = 0; + } + + kfree(rx->buffer_info); + rx->buffer_info = NULL; + + if (rx->ring_cpu_ptr) { + dma_free_coherent(&rx->adapter->pdev->dev, + rx->ring_allocation_size, rx->ring_cpu_ptr, + rx->ring_dma_ptr); + rx->ring_allocation_size = 0; + rx->ring_cpu_ptr = NULL; + rx->ring_dma_ptr = 0; + } + + rx->ring_size = 0; + rx->last_head = 0; +} + +static int lan743x_rx_ring_init(struct lan743x_rx *rx) +{ + size_t ring_allocation_size = 0; + dma_addr_t dma_ptr = 0; + void *cpu_ptr = NULL; + int ret = -ENOMEM; + int index = 0; + + rx->ring_size = LAN743X_RX_RING_SIZE; + if (rx->ring_size <= 1) { + ret = -EINVAL; + goto cleanup; + } + if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) { + ret = -EINVAL; + goto cleanup; + } + if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, + DMA_BIT_MASK(64))) { + dev_warn(&rx->adapter->pdev->dev, + "lan743x_: No suitable DMA available\n"); + ret = -ENOMEM; + goto cleanup; + } + ring_allocation_size = ALIGN(rx->ring_size * + sizeof(struct lan743x_rx_descriptor), + PAGE_SIZE); + dma_ptr = 0; + cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, + ring_allocation_size, &dma_ptr, GFP_KERNEL); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + rx->ring_allocation_size = ring_allocation_size; + rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr; + rx->ring_dma_ptr = dma_ptr; + + cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info), + GFP_KERNEL); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr; + dma_ptr = 0; + cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, + sizeof(*rx->head_cpu_ptr), &dma_ptr, + GFP_KERNEL); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + + rx->head_cpu_ptr = cpu_ptr; + rx->head_dma_ptr = dma_ptr; + if (rx->head_dma_ptr & 0x3) { + ret = -ENOMEM; + goto cleanup; + } + + rx->last_head = 0; + for (index = 0; index < rx->ring_size; index++) { + ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL); + if (ret) + goto cleanup; + } + return 0; + +cleanup: + netif_warn(rx->adapter, ifup, rx->adapter->netdev, + "Error allocating memory for LAN743x\n"); + + lan743x_rx_ring_cleanup(rx); + return ret; +} + +static void lan743x_rx_close(struct lan743x_rx *rx) +{ + struct lan743x_adapter *adapter = rx->adapter; + + lan743x_csr_write(adapter, FCT_RX_CTL, + FCT_RX_CTL_DIS_(rx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, + FCT_RX_CTL_EN_(rx->channel_number), + 0, 1000, 20000, 100); + + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_STOP_R_(rx->channel_number)); + lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number); + + lan743x_csr_write(adapter, DMAC_INT_EN_CLR, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_RX_(rx->channel_number)); + napi_disable(&rx->napi); + + netif_napi_del(&rx->napi); + + lan743x_rx_ring_cleanup(rx); +} + +static int lan743x_rx_open(struct lan743x_rx *rx) +{ + struct lan743x_adapter *adapter = rx->adapter; + u32 data = 0; + int ret; + + rx->frame_count = 0; + ret = lan743x_rx_ring_init(rx); + if (ret) + goto return_error; + + netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll); + + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_RX_SWR_(rx->channel_number)); + lan743x_csr_wait_for_bit(adapter, DMAC_CMD, + DMAC_CMD_RX_SWR_(rx->channel_number), + 0, 1000, 20000, 100); + + /* set ring base address */ + lan743x_csr_write(adapter, + RX_BASE_ADDRH(rx->channel_number), + DMA_ADDR_HIGH32(rx->ring_dma_ptr)); + lan743x_csr_write(adapter, + RX_BASE_ADDRL(rx->channel_number), + DMA_ADDR_LOW32(rx->ring_dma_ptr)); + + /* set rx write back address */ + lan743x_csr_write(adapter, + RX_HEAD_WRITEBACK_ADDRH(rx->channel_number), + DMA_ADDR_HIGH32(rx->head_dma_ptr)); + lan743x_csr_write(adapter, + RX_HEAD_WRITEBACK_ADDRL(rx->channel_number), + DMA_ADDR_LOW32(rx->head_dma_ptr)); + data = RX_CFG_A_RX_HP_WB_EN_; + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ | + RX_CFG_A_RX_WB_THRES_SET_(0x7) | + RX_CFG_A_RX_PF_THRES_SET_(16) | + RX_CFG_A_RX_PF_PRI_THRES_SET_(4)); + } + + /* set RX_CFG_A */ + lan743x_csr_write(adapter, + RX_CFG_A(rx->channel_number), data); + + /* set RX_CFG_B */ + data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number)); + data &= ~RX_CFG_B_RX_PAD_MASK_; + if (!RX_HEAD_PADDING) + data |= RX_CFG_B_RX_PAD_0_; + else + data |= RX_CFG_B_RX_PAD_2_; + data &= ~RX_CFG_B_RX_RING_LEN_MASK_; + data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_); + data |= RX_CFG_B_TS_ALL_RX_; + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) + data |= RX_CFG_B_RDMABL_512_; + + lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data); + rx->vector_flags = lan743x_intr_get_vector_flags(adapter, + INT_BIT_DMA_RX_ + (rx->channel_number)); + + /* set RX_CFG_C */ + data = 0; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) + data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) + data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) + data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) + data |= RX_CFG_C_RX_INT_EN_R2C_; + lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data); + + rx->last_tail = ((u32)(rx->ring_size - 1)); + lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), + rx->last_tail); + rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number)); + if (rx->last_head) { + ret = -EIO; + goto napi_delete; + } + + napi_enable(&rx->napi); + + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_RX_(rx->channel_number)); + lan743x_csr_write(adapter, DMAC_INT_STS, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + lan743x_csr_write(adapter, DMAC_INT_EN_SET, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_START_R_(rx->channel_number)); + + /* initialize fifo */ + lan743x_csr_write(adapter, FCT_RX_CTL, + FCT_RX_CTL_RESET_(rx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, + FCT_RX_CTL_RESET_(rx->channel_number), + 0, 1000, 20000, 100); + lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number), + FCT_FLOW_CTL_REQ_EN_ | + FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) | + FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA)); + + /* enable fifo */ + lan743x_csr_write(adapter, FCT_RX_CTL, + FCT_RX_CTL_EN_(rx->channel_number)); + return 0; + +napi_delete: + netif_napi_del(&rx->napi); + lan743x_rx_ring_cleanup(rx); + +return_error: + return ret; +} + +static int lan743x_netdev_close(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int index; + + for (index = 0; index < adapter->used_tx_channels; index++) + lan743x_tx_close(&adapter->tx[index]); + + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) + lan743x_rx_close(&adapter->rx[index]); + + lan743x_ptp_close(adapter); + + lan743x_phy_close(adapter); + + lan743x_mac_close(adapter); + + lan743x_intr_close(adapter); + + return 0; +} + +static int lan743x_netdev_open(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int index; + int ret; + + ret = lan743x_intr_open(adapter); + if (ret) + goto return_error; + + ret = lan743x_mac_open(adapter); + if (ret) + goto close_intr; + + ret = lan743x_phy_open(adapter); + if (ret) + goto close_mac; + + ret = lan743x_ptp_open(adapter); + if (ret) + goto close_phy; + + lan743x_rfe_open(adapter); + + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + ret = lan743x_rx_open(&adapter->rx[index]); + if (ret) + goto close_rx; + } + + for (index = 0; index < adapter->used_tx_channels; index++) { + ret = lan743x_tx_open(&adapter->tx[index]); + if (ret) + goto close_tx; + } + return 0; + +close_tx: + for (index = 0; index < adapter->used_tx_channels; index++) { + if (adapter->tx[index].ring_cpu_ptr) + lan743x_tx_close(&adapter->tx[index]); + } + +close_rx: + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + if (adapter->rx[index].ring_cpu_ptr) + lan743x_rx_close(&adapter->rx[index]); + } + lan743x_ptp_close(adapter); + +close_phy: + lan743x_phy_close(adapter); + +close_mac: + lan743x_mac_close(adapter); + +close_intr: + lan743x_intr_close(adapter); + +return_error: + netif_warn(adapter, ifup, adapter->netdev, + "Error opening LAN743x\n"); + return ret; +} + +static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + u8 ch = 0; + + if (adapter->is_pci11x1x) + ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS; + + return lan743x_tx_xmit_frame(&adapter->tx[ch], skb); +} + +static int lan743x_netdev_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + if (!netif_running(netdev)) + return -EINVAL; + if (cmd == SIOCSHWTSTAMP) + return lan743x_ptp_ioctl(netdev, ifr, cmd); + return phy_mii_ioctl(netdev->phydev, ifr, cmd); +} + +static void lan743x_netdev_set_multicast(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + lan743x_rfe_set_multicast(adapter); +} + +static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + ret = lan743x_mac_set_mtu(adapter, new_mtu); + if (!ret) + netdev->mtu = new_mtu; + return ret; +} + +static void lan743x_netdev_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES); + stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES); + stats->rx_bytes = lan743x_csr_read(adapter, + STAT_RX_UNICAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_RX_BROADCAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_RX_MULTICAST_BYTE_COUNT); + stats->tx_bytes = lan743x_csr_read(adapter, + STAT_TX_UNICAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_TX_BROADCAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_TX_MULTICAST_BYTE_COUNT); + stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) + + lan743x_csr_read(adapter, + STAT_RX_ALIGNMENT_ERRORS) + + lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) + + lan743x_csr_read(adapter, + STAT_RX_UNDERSIZE_FRAME_ERRORS) + + lan743x_csr_read(adapter, + STAT_RX_OVERSIZE_FRAME_ERRORS); + stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) + + lan743x_csr_read(adapter, + STAT_TX_EXCESS_DEFERRAL_ERRORS) + + lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS); + stats->rx_dropped = lan743x_csr_read(adapter, + STAT_RX_DROPPED_FRAMES); + stats->tx_dropped = lan743x_csr_read(adapter, + STAT_TX_EXCESSIVE_COLLISION); + stats->multicast = lan743x_csr_read(adapter, + STAT_RX_MULTICAST_FRAMES) + + lan743x_csr_read(adapter, + STAT_TX_MULTICAST_FRAMES); + stats->collisions = lan743x_csr_read(adapter, + STAT_TX_SINGLE_COLLISIONS) + + lan743x_csr_read(adapter, + STAT_TX_MULTIPLE_COLLISIONS) + + lan743x_csr_read(adapter, + STAT_TX_LATE_COLLISIONS); +} + +static int lan743x_netdev_set_mac_address(struct net_device *netdev, + void *addr) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct sockaddr *sock_addr = addr; + int ret; + + ret = eth_prepare_mac_addr_change(netdev, sock_addr); + if (ret) + return ret; + eth_hw_addr_set(netdev, sock_addr->sa_data); + lan743x_mac_set_address(adapter, sock_addr->sa_data); + lan743x_rfe_update_mac_address(adapter); + return 0; +} + +static const struct net_device_ops lan743x_netdev_ops = { + .ndo_open = lan743x_netdev_open, + .ndo_stop = lan743x_netdev_close, + .ndo_start_xmit = lan743x_netdev_xmit_frame, + .ndo_eth_ioctl = lan743x_netdev_ioctl, + .ndo_set_rx_mode = lan743x_netdev_set_multicast, + .ndo_change_mtu = lan743x_netdev_change_mtu, + .ndo_get_stats64 = lan743x_netdev_get_stats64, + .ndo_set_mac_address = lan743x_netdev_set_mac_address, +}; + +static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter) +{ + lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); +} + +static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter) +{ + mdiobus_unregister(adapter->mdiobus); +} + +static void lan743x_full_cleanup(struct lan743x_adapter *adapter) +{ + unregister_netdev(adapter->netdev); + + lan743x_mdiobus_cleanup(adapter); + lan743x_hardware_cleanup(adapter); + lan743x_pci_cleanup(adapter); +} + +static int lan743x_hardware_init(struct lan743x_adapter *adapter, + struct pci_dev *pdev) +{ + struct lan743x_tx *tx; + int index; + int ret; + + adapter->is_pci11x1x = is_pci11x1x_chip(adapter); + if (adapter->is_pci11x1x) { + adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS; + adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS; + adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT; + pci11x1x_strap_get_status(adapter); + spin_lock_init(&adapter->eth_syslock_spinlock); + mutex_init(&adapter->sgmii_rw_lock); + } else { + adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; + adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; + adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT; + } + + adapter->intr.irq = adapter->pdev->irq; + lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); + + ret = lan743x_gpio_init(adapter); + if (ret) + return ret; + + ret = lan743x_mac_init(adapter); + if (ret) + return ret; + + ret = lan743x_phy_init(adapter); + if (ret) + return ret; + + ret = lan743x_ptp_init(adapter); + if (ret) + return ret; + + lan743x_rfe_update_mac_address(adapter); + + ret = lan743x_dmac_init(adapter); + if (ret) + return ret; + + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + adapter->rx[index].adapter = adapter; + adapter->rx[index].channel_number = index; + } + + for (index = 0; index < adapter->used_tx_channels; index++) { + tx = &adapter->tx[index]; + tx->adapter = adapter; + tx->channel_number = index; + spin_lock_init(&tx->ring_lock); + } + + return 0; +} + +static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) +{ + u32 sgmii_ctl; + int ret; + + adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); + if (!(adapter->mdiobus)) { + ret = -ENOMEM; + goto return_error; + } + + adapter->mdiobus->priv = (void *)adapter; + if (adapter->is_pci11x1x) { + if (adapter->is_sgmii_en) { + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + netif_dbg(adapter, drv, adapter->netdev, + "SGMII operation\n"); + adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45; + adapter->mdiobus->read = lan743x_mdiobus_c45_read; + adapter->mdiobus->write = lan743x_mdiobus_c45_write; + adapter->mdiobus->name = "lan743x-mdiobus-c45"; + netif_dbg(adapter, drv, adapter->netdev, + "lan743x-mdiobus-c45\n"); + } else { + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + netif_dbg(adapter, drv, adapter->netdev, + "RGMII operation\n"); + // Only C22 support when RGMII I/F + adapter->mdiobus->probe_capabilities = MDIOBUS_C22; + adapter->mdiobus->read = lan743x_mdiobus_read; + adapter->mdiobus->write = lan743x_mdiobus_write; + adapter->mdiobus->name = "lan743x-mdiobus"; + netif_dbg(adapter, drv, adapter->netdev, + "lan743x-mdiobus\n"); + } + } else { + adapter->mdiobus->read = lan743x_mdiobus_read; + adapter->mdiobus->write = lan743x_mdiobus_write; + adapter->mdiobus->name = "lan743x-mdiobus"; + netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n"); + } + + snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, + "pci-%s", pci_name(adapter->pdev)); + + if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_) + /* LAN7430 uses internal phy at address 1 */ + adapter->mdiobus->phy_mask = ~(u32)BIT(1); + + /* register mdiobus */ + ret = mdiobus_register(adapter->mdiobus); + if (ret < 0) + goto return_error; + return 0; + +return_error: + return ret; +} + +/* lan743x_pcidev_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @id: entry in lan743x_pci_tbl + * + * Returns 0 on success, negative on failure + * + * initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int lan743x_pcidev_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct lan743x_adapter *adapter = NULL; + struct net_device *netdev = NULL; + int ret = -ENODEV; + + if (id->device == PCI_DEVICE_ID_SMSC_A011 || + id->device == PCI_DEVICE_ID_SMSC_A041) { + netdev = devm_alloc_etherdev_mqs(&pdev->dev, + sizeof(struct lan743x_adapter), + PCI11X1X_USED_TX_CHANNELS, + LAN743X_USED_RX_CHANNELS); + } else { + netdev = devm_alloc_etherdev_mqs(&pdev->dev, + sizeof(struct lan743x_adapter), + LAN743X_USED_TX_CHANNELS, + LAN743X_USED_RX_CHANNELS); + } + + if (!netdev) + goto return_error; + + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; + netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; + + of_get_mac_address(pdev->dev.of_node, adapter->mac_address); + + ret = lan743x_pci_init(adapter, pdev); + if (ret) + goto return_error; + + ret = lan743x_csr_init(adapter); + if (ret) + goto cleanup_pci; + + ret = lan743x_hardware_init(adapter, pdev); + if (ret) + goto cleanup_pci; + + ret = lan743x_mdiobus_init(adapter); + if (ret) + goto cleanup_hardware; + + adapter->netdev->netdev_ops = &lan743x_netdev_ops; + adapter->netdev->ethtool_ops = &lan743x_ethtool_ops; + adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | + NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + adapter->netdev->hw_features = adapter->netdev->features; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = register_netdev(adapter->netdev); + if (ret < 0) + goto cleanup_mdiobus; + return 0; + +cleanup_mdiobus: + lan743x_mdiobus_cleanup(adapter); + +cleanup_hardware: + lan743x_hardware_cleanup(adapter); + +cleanup_pci: + lan743x_pci_cleanup(adapter); + +return_error: + pr_warn("Initialization failed\n"); + return ret; +} + +/** + * lan743x_pcidev_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * this is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void lan743x_pcidev_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + + lan743x_full_cleanup(adapter); +} + +static void lan743x_pcidev_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + netif_device_detach(netdev); + + /* close netdev when netdev is at running state. + * For instance, it is true when system goes to sleep by pm-suspend + * However, it is false when system goes to sleep by suspend GUI menu + */ + if (netif_running(netdev)) + lan743x_netdev_close(netdev); + rtnl_unlock(); + +#ifdef CONFIG_PM + pci_save_state(pdev); +#endif + + /* clean up lan743x portion */ + lan743x_hardware_cleanup(adapter); +} + +#ifdef CONFIG_PM_SLEEP +static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) +{ + return bitrev16(crc16(0xFFFF, buf, len)); +} + +static void lan743x_pm_set_wol(struct lan743x_adapter *adapter) +{ + const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; + const u8 ipv6_multicast[3] = { 0x33, 0x33 }; + const u8 arp_type[2] = { 0x08, 0x06 }; + int mask_index; + u32 sopass; + u32 pmtctl; + u32 wucsr; + u32 macrx; + u16 crc; + + for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++) + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0); + + /* clear wake settings */ + pmtctl = lan743x_csr_read(adapter, PMT_CTL); + pmtctl |= PMT_CTL_WUPS_MASK_; + pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ | + PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ | + PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_); + + macrx = lan743x_csr_read(adapter, MAC_RX); + + wucsr = 0; + mask_index = 0; + + pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_; + + if (adapter->wolopts & WAKE_PHY) { + pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_; + pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_; + } + if (adapter->wolopts & WAKE_MAGIC) { + wucsr |= MAC_WUCSR_MPEN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + } + if (adapter->wolopts & WAKE_UCAST) { + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + if (adapter->wolopts & WAKE_BCAST) { + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + if (adapter->wolopts & WAKE_MCAST) { + /* IPv4 multicast */ + crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3); + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), + MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | + (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | + (crc & MAC_WUF_CFG_CRC16_MASK_)); + lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7); + lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); + mask_index++; + + /* IPv6 multicast */ + crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2); + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), + MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | + (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | + (crc & MAC_WUF_CFG_CRC16_MASK_)); + lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3); + lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); + mask_index++; + + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + if (adapter->wolopts & WAKE_ARP) { + /* set MAC_WUF_CFG & WUF_MASK + * for packettype (offset 12,13) = ARP (0x0806) + */ + crc = lan743x_pm_wakeframe_crc16(arp_type, 2); + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), + MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ | + (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | + (crc & MAC_WUF_CFG_CRC16_MASK_)); + lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000); + lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); + mask_index++; + + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + + if (adapter->wolopts & WAKE_MAGICSECURE) { + sopass = *(u32 *)adapter->sopass; + lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass); + sopass = *(u16 *)&adapter->sopass[4]; + lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass); + wucsr |= MAC_MP_SO_EN_; + } + + lan743x_csr_write(adapter, MAC_WUCSR, wucsr); + lan743x_csr_write(adapter, PMT_CTL, pmtctl); + lan743x_csr_write(adapter, MAC_RX, macrx); +} + +static int lan743x_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + u32 data; + + lan743x_pcidev_shutdown(pdev); + + /* clear all wakes */ + lan743x_csr_write(adapter, MAC_WUCSR, 0); + lan743x_csr_write(adapter, MAC_WUCSR2, 0); + lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF); + + if (adapter->wolopts) + lan743x_pm_set_wol(adapter); + + if (adapter->is_pci11x1x) { + /* Save HW_CFG to config again in PM resume */ + data = lan743x_csr_read(adapter, HW_CFG); + adapter->hw_cfg = data; + data |= (HW_CFG_RST_PROTECT_PCIE_ | + HW_CFG_D3_RESET_DIS_ | + HW_CFG_D3_VAUX_OVR_ | + HW_CFG_HOT_RESET_DIS_ | + HW_CFG_RST_PROTECT_); + lan743x_csr_write(adapter, HW_CFG, data); + } + + /* Host sets PME_En, put D3hot */ + return pci_prepare_to_sleep(pdev); +} + +static int lan743x_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + /* Restore HW_CFG that was saved during pm suspend */ + if (adapter->is_pci11x1x) + lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg); + + ret = lan743x_hardware_init(adapter, pdev); + if (ret) { + netif_err(adapter, probe, adapter->netdev, + "lan743x_hardware_init returned %d\n", ret); + lan743x_pci_cleanup(adapter); + return ret; + } + + /* open netdev when netdev is at running state while resume. + * For instance, it is true when system wakesup after pm-suspend + * However, it is false when system wakes up after suspend GUI menu + */ + if (netif_running(netdev)) + lan743x_netdev_open(netdev); + + netif_device_attach(netdev); + ret = lan743x_csr_read(adapter, MAC_WK_SRC); + netif_info(adapter, drv, adapter->netdev, + "Wakeup source : 0x%08X\n", ret); + + return 0; +} + +static const struct dev_pm_ops lan743x_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) +}; +#endif /* CONFIG_PM_SLEEP */ + +static const struct pci_device_id lan743x_pcidev_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, + { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) }, + { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) }, + { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl); + +static struct pci_driver lan743x_pcidev_driver = { + .name = DRIVER_NAME, + .id_table = lan743x_pcidev_tbl, + .probe = lan743x_pcidev_probe, + .remove = lan743x_pcidev_remove, +#ifdef CONFIG_PM_SLEEP + .driver.pm = &lan743x_pm_ops, +#endif + .shutdown = lan743x_pcidev_shutdown, +}; + +module_pci_driver(lan743x_pcidev_driver); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h new file mode 100644 index 000000000..67877d3b6 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -0,0 +1,1163 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#ifndef _LAN743X_H +#define _LAN743X_H + +#include <linux/phy.h> +#include "lan743x_ptp.h" + +#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>" +#define DRIVER_DESC "LAN743x PCIe Gigabit Ethernet Driver" +#define DRIVER_NAME "lan743x" + +/* Register Definitions */ +#define ID_REV (0x00) +#define ID_REV_ID_MASK_ (0xFFFF0000) +#define ID_REV_ID_LAN7430_ (0x74300000) +#define ID_REV_ID_LAN7431_ (0x74310000) +#define ID_REV_ID_LAN743X_ (0x74300000) +#define ID_REV_ID_A011_ (0xA0110000) // PCI11010 +#define ID_REV_ID_A041_ (0xA0410000) // PCI11414 +#define ID_REV_ID_A0X1_ (0xA0010000) +#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \ + ((((id_rev) & 0xFFF00000) == ID_REV_ID_LAN743X_) || \ + (((id_rev) & 0xFF0F0000) == ID_REV_ID_A0X1_)) +#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) +#define ID_REV_CHIP_REV_A0_ (0x00000000) +#define ID_REV_CHIP_REV_B0_ (0x00000010) + +#define FPGA_REV (0x04) +#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF) +#define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF) +#define FPGA_SGMII_OP BIT(24) + +#define STRAP_READ (0x0C) +#define STRAP_READ_USE_SGMII_EN_ BIT(22) +#define STRAP_READ_SGMII_EN_ BIT(6) +#define STRAP_READ_SGMII_REFCLK_ BIT(5) +#define STRAP_READ_SGMII_2_5G_ BIT(4) +#define STRAP_READ_BASE_X_ BIT(3) +#define STRAP_READ_RGMII_TXC_DELAY_EN_ BIT(2) +#define STRAP_READ_RGMII_RXC_DELAY_EN_ BIT(1) +#define STRAP_READ_ADV_PM_DISABLE_ BIT(0) + +#define HW_CFG (0x010) +#define HW_CFG_RST_PROTECT_PCIE_ BIT(19) +#define HW_CFG_HOT_RESET_DIS_ BIT(15) +#define HW_CFG_D3_VAUX_OVR_ BIT(14) +#define HW_CFG_D3_RESET_DIS_ BIT(13) +#define HW_CFG_RST_PROTECT_ BIT(12) +#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0) +#define HW_CFG_EE_OTP_RELOAD_ BIT(4) +#define HW_CFG_LRST_ BIT(1) + +#define PMT_CTL (0x014) +#define PMT_CTL_ETH_PHY_D3_COLD_OVR_ BIT(27) +#define PMT_CTL_MAC_D3_RX_CLK_OVR_ BIT(25) +#define PMT_CTL_ETH_PHY_EDPD_PLL_CTL_ BIT(24) +#define PMT_CTL_ETH_PHY_D3_OVR_ BIT(23) +#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18) +#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15) +#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13) +#define PMT_CTL_READY_ BIT(7) +#define PMT_CTL_ETH_PHY_RST_ BIT(4) +#define PMT_CTL_WOL_EN_ BIT(3) +#define PMT_CTL_ETH_PHY_WAKE_EN_ BIT(2) +#define PMT_CTL_WUPS_MASK_ (0x00000003) + +#define DP_SEL (0x024) +#define DP_SEL_DPRDY_ BIT(31) +#define DP_SEL_MASK_ (0x0000001F) +#define DP_SEL_RFE_RAM (0x00000001) + +#define DP_SEL_VHF_HASH_LEN (16) +#define DP_SEL_VHF_VLAN_LEN (128) + +#define DP_CMD (0x028) +#define DP_CMD_WRITE_ (0x00000001) + +#define DP_ADDR (0x02C) + +#define DP_DATA_0 (0x030) + +#define E2P_CMD (0x040) +#define E2P_CMD_EPC_BUSY_ BIT(31) +#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) +#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) +#define E2P_CMD_EPC_CMD_READ_ (0x00000000) +#define E2P_CMD_EPC_TIMEOUT_ BIT(10) +#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF) + +#define E2P_DATA (0x044) + +/* Hearthstone top level & System Reg Addresses */ +#define ETH_CTRL_REG_ADDR_BASE (0x0000) +#define ETH_SYS_REG_ADDR_BASE (0x4000) +#define CONFIG_REG_ADDR_BASE (0x0000) +#define ETH_EEPROM_REG_ADDR_BASE (0x0E00) +#define ETH_OTP_REG_ADDR_BASE (0x1000) +#define GEN_SYS_CONFIG_LOAD_STARTED_REG (0x0078) +#define ETH_SYS_CONFIG_LOAD_STARTED_REG (ETH_SYS_REG_ADDR_BASE + \ + CONFIG_REG_ADDR_BASE + \ + GEN_SYS_CONFIG_LOAD_STARTED_REG) +#define GEN_SYS_LOAD_STARTED_REG_ETH_ BIT(4) +#define SYS_LOCK_REG (0x00A0) +#define SYS_LOCK_REG_MAIN_LOCK_ BIT(7) +#define SYS_LOCK_REG_GEN_PERI_LOCK_ BIT(5) +#define SYS_LOCK_REG_SPI_PERI_LOCK_ BIT(4) +#define SYS_LOCK_REG_SMBUS_PERI_LOCK_ BIT(3) +#define SYS_LOCK_REG_UART_SS_LOCK_ BIT(2) +#define SYS_LOCK_REG_ENET_SS_LOCK_ BIT(1) +#define SYS_LOCK_REG_USB_SS_LOCK_ BIT(0) +#define ETH_SYSTEM_SYS_LOCK_REG (ETH_SYS_REG_ADDR_BASE + \ + CONFIG_REG_ADDR_BASE + \ + SYS_LOCK_REG) +#define HS_EEPROM_REG_ADDR_BASE (ETH_SYS_REG_ADDR_BASE + \ + ETH_EEPROM_REG_ADDR_BASE) +#define HS_E2P_CMD (HS_EEPROM_REG_ADDR_BASE + 0x0000) +#define HS_E2P_CMD_EPC_BUSY_ BIT(31) +#define HS_E2P_CMD_EPC_CMD_WRITE_ GENMASK(29, 28) +#define HS_E2P_CMD_EPC_CMD_READ_ (0x0) +#define HS_E2P_CMD_EPC_TIMEOUT_ BIT(17) +#define HS_E2P_CMD_EPC_ADDR_MASK_ GENMASK(15, 0) +#define HS_E2P_DATA (HS_EEPROM_REG_ADDR_BASE + 0x0004) +#define HS_E2P_DATA_MASK_ GENMASK(7, 0) +#define HS_E2P_CFG (HS_EEPROM_REG_ADDR_BASE + 0x0008) +#define HS_E2P_CFG_I2C_PULSE_MASK_ GENMASK(19, 16) +#define HS_E2P_CFG_EEPROM_SIZE_SEL_ BIT(12) +#define HS_E2P_CFG_I2C_BAUD_RATE_MASK_ GENMASK(9, 8) +#define HS_E2P_CFG_TEST_EEPR_TO_BYP_ BIT(0) +#define HS_E2P_PAD_CTL (HS_EEPROM_REG_ADDR_BASE + 0x000C) + +#define GPIO_CFG0 (0x050) +#define GPIO_CFG0_GPIO_DIR_BIT_(bit) BIT(16 + (bit)) +#define GPIO_CFG0_GPIO_DATA_BIT_(bit) BIT(0 + (bit)) + +#define GPIO_CFG1 (0x054) +#define GPIO_CFG1_GPIOEN_BIT_(bit) BIT(16 + (bit)) +#define GPIO_CFG1_GPIOBUF_BIT_(bit) BIT(0 + (bit)) + +#define GPIO_CFG2 (0x058) +#define GPIO_CFG2_1588_POL_BIT_(bit) BIT(0 + (bit)) + +#define GPIO_CFG3 (0x05C) +#define GPIO_CFG3_1588_CH_SEL_BIT_(bit) BIT(16 + (bit)) +#define GPIO_CFG3_1588_OE_BIT_(bit) BIT(0 + (bit)) + +#define FCT_RX_CTL (0xAC) +#define FCT_RX_CTL_EN_(channel) BIT(28 + (channel)) +#define FCT_RX_CTL_DIS_(channel) BIT(24 + (channel)) +#define FCT_RX_CTL_RESET_(channel) BIT(20 + (channel)) + +#define FCT_TX_CTL (0xC4) +#define FCT_TX_CTL_EN_(channel) BIT(28 + (channel)) +#define FCT_TX_CTL_DIS_(channel) BIT(24 + (channel)) +#define FCT_TX_CTL_RESET_(channel) BIT(20 + (channel)) + +#define FCT_FLOW(rx_channel) (0xE0 + ((rx_channel) << 2)) +#define FCT_FLOW_CTL_OFF_THRESHOLD_ (0x00007F00) +#define FCT_FLOW_CTL_OFF_THRESHOLD_SET_(value) \ + ((value << 8) & FCT_FLOW_CTL_OFF_THRESHOLD_) +#define FCT_FLOW_CTL_REQ_EN_ BIT(7) +#define FCT_FLOW_CTL_ON_THRESHOLD_ (0x0000007F) +#define FCT_FLOW_CTL_ON_THRESHOLD_SET_(value) \ + ((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_) + +#define MAC_CR (0x100) +#define MAC_CR_MII_EN_ BIT(19) +#define MAC_CR_EEE_EN_ BIT(17) +#define MAC_CR_ADD_ BIT(12) +#define MAC_CR_ASD_ BIT(11) +#define MAC_CR_CNTR_RST_ BIT(5) +#define MAC_CR_DPX_ BIT(3) +#define MAC_CR_CFG_H_ BIT(2) +#define MAC_CR_CFG_L_ BIT(1) +#define MAC_CR_RST_ BIT(0) + +#define MAC_RX (0x104) +#define MAC_RX_MAX_SIZE_SHIFT_ (16) +#define MAC_RX_MAX_SIZE_MASK_ (0x3FFF0000) +#define MAC_RX_RXD_ BIT(1) +#define MAC_RX_RXEN_ BIT(0) + +#define MAC_TX (0x108) +#define MAC_TX_TXD_ BIT(1) +#define MAC_TX_TXEN_ BIT(0) + +#define MAC_FLOW (0x10C) +#define MAC_FLOW_CR_TX_FCEN_ BIT(30) +#define MAC_FLOW_CR_RX_FCEN_ BIT(29) +#define MAC_FLOW_CR_FCPT_MASK_ (0x0000FFFF) + +#define MAC_RX_ADDRH (0x118) + +#define MAC_RX_ADDRL (0x11C) + +#define MAC_MII_ACC (0x120) +#define MAC_MII_ACC_MDC_CYCLE_SHIFT_ (16) +#define MAC_MII_ACC_MDC_CYCLE_MASK_ (0x00070000) +#define MAC_MII_ACC_MDC_CYCLE_2_5MHZ_ (0) +#define MAC_MII_ACC_MDC_CYCLE_5MHZ_ (1) +#define MAC_MII_ACC_MDC_CYCLE_12_5MHZ_ (2) +#define MAC_MII_ACC_MDC_CYCLE_25MHZ_ (3) +#define MAC_MII_ACC_MDC_CYCLE_1_25MHZ_ (4) +#define MAC_MII_ACC_PHY_ADDR_SHIFT_ (11) +#define MAC_MII_ACC_PHY_ADDR_MASK_ (0x0000F800) +#define MAC_MII_ACC_MIIRINDA_SHIFT_ (6) +#define MAC_MII_ACC_MIIRINDA_MASK_ (0x000007C0) +#define MAC_MII_ACC_MII_READ_ (0x00000000) +#define MAC_MII_ACC_MII_WRITE_ (0x00000002) +#define MAC_MII_ACC_MII_BUSY_ BIT(0) + +#define MAC_MII_ACC_MIIMMD_SHIFT_ (6) +#define MAC_MII_ACC_MIIMMD_MASK_ (0x000007C0) +#define MAC_MII_ACC_MIICL45_ BIT(3) +#define MAC_MII_ACC_MIICMD_MASK_ (0x00000006) +#define MAC_MII_ACC_MIICMD_ADDR_ (0x00000000) +#define MAC_MII_ACC_MIICMD_WRITE_ (0x00000002) +#define MAC_MII_ACC_MIICMD_READ_ (0x00000004) +#define MAC_MII_ACC_MIICMD_READ_INC_ (0x00000006) + +#define MAC_MII_DATA (0x124) + +#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130) + +#define MAC_WUCSR (0x140) +#define MAC_MP_SO_EN_ BIT(21) +#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14) +#define MAC_WUCSR_PFDA_EN_ BIT(3) +#define MAC_WUCSR_WAKE_EN_ BIT(2) +#define MAC_WUCSR_MPEN_ BIT(1) +#define MAC_WUCSR_BCST_EN_ BIT(0) + +#define MAC_WK_SRC (0x144) +#define MAC_MP_SO_HI (0x148) +#define MAC_MP_SO_LO (0x14C) + +#define MAC_WUF_CFG0 (0x150) +#define MAC_NUM_OF_WUF_CFG (32) +#define MAC_WUF_CFG_BEGIN (MAC_WUF_CFG0) +#define MAC_WUF_CFG(index) (MAC_WUF_CFG_BEGIN + (4 * (index))) +#define MAC_WUF_CFG_EN_ BIT(31) +#define MAC_WUF_CFG_TYPE_MCAST_ (0x02000000) +#define MAC_WUF_CFG_TYPE_ALL_ (0x01000000) +#define MAC_WUF_CFG_OFFSET_SHIFT_ (16) +#define MAC_WUF_CFG_CRC16_MASK_ (0x0000FFFF) + +#define MAC_WUF_MASK0_0 (0x200) +#define MAC_WUF_MASK0_1 (0x204) +#define MAC_WUF_MASK0_2 (0x208) +#define MAC_WUF_MASK0_3 (0x20C) +#define MAC_WUF_MASK0_BEGIN (MAC_WUF_MASK0_0) +#define MAC_WUF_MASK1_BEGIN (MAC_WUF_MASK0_1) +#define MAC_WUF_MASK2_BEGIN (MAC_WUF_MASK0_2) +#define MAC_WUF_MASK3_BEGIN (MAC_WUF_MASK0_3) +#define MAC_WUF_MASK0(index) (MAC_WUF_MASK0_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK1(index) (MAC_WUF_MASK1_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK2(index) (MAC_WUF_MASK2_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK3(index) (MAC_WUF_MASK3_BEGIN + (0x10 * (index))) + +/* offset 0x400 - 0x500, x may range from 0 to 32, for a total of 33 entries */ +#define RFE_ADDR_FILT_HI(x) (0x400 + (8 * (x))) +#define RFE_ADDR_FILT_HI_VALID_ BIT(31) + +/* offset 0x404 - 0x504, x may range from 0 to 32, for a total of 33 entries */ +#define RFE_ADDR_FILT_LO(x) (0x404 + (8 * (x))) + +#define RFE_CTL (0x508) +#define RFE_CTL_TCP_UDP_COE_ BIT(12) +#define RFE_CTL_IP_COE_ BIT(11) +#define RFE_CTL_AB_ BIT(10) +#define RFE_CTL_AM_ BIT(9) +#define RFE_CTL_AU_ BIT(8) +#define RFE_CTL_MCAST_HASH_ BIT(3) +#define RFE_CTL_DA_PERFECT_ BIT(1) + +#define RFE_RSS_CFG (0x554) +#define RFE_RSS_CFG_UDP_IPV6_EX_ BIT(16) +#define RFE_RSS_CFG_TCP_IPV6_EX_ BIT(15) +#define RFE_RSS_CFG_IPV6_EX_ BIT(14) +#define RFE_RSS_CFG_UDP_IPV6_ BIT(13) +#define RFE_RSS_CFG_TCP_IPV6_ BIT(12) +#define RFE_RSS_CFG_IPV6_ BIT(11) +#define RFE_RSS_CFG_UDP_IPV4_ BIT(10) +#define RFE_RSS_CFG_TCP_IPV4_ BIT(9) +#define RFE_RSS_CFG_IPV4_ BIT(8) +#define RFE_RSS_CFG_VALID_HASH_BITS_ (0x000000E0) +#define RFE_RSS_CFG_RSS_QUEUE_ENABLE_ BIT(2) +#define RFE_RSS_CFG_RSS_HASH_STORE_ BIT(1) +#define RFE_RSS_CFG_RSS_ENABLE_ BIT(0) + +#define RFE_HASH_KEY(index) (0x558 + (index << 2)) + +#define RFE_INDX(index) (0x580 + (index << 2)) + +#define MAC_WUCSR2 (0x600) + +#define SGMII_ACC (0x720) +#define SGMII_ACC_SGMII_BZY_ BIT(31) +#define SGMII_ACC_SGMII_WR_ BIT(30) +#define SGMII_ACC_SGMII_MMD_SHIFT_ (16) +#define SGMII_ACC_SGMII_MMD_MASK_ GENMASK(20, 16) +#define SGMII_ACC_SGMII_MMD_VSR_ BIT(15) +#define SGMII_ACC_SGMII_ADDR_SHIFT_ (0) +#define SGMII_ACC_SGMII_ADDR_MASK_ GENMASK(15, 0) +#define SGMII_DATA (0x724) +#define SGMII_DATA_SHIFT_ (0) +#define SGMII_DATA_MASK_ GENMASK(15, 0) +#define SGMII_CTL (0x728) +#define SGMII_CTL_SGMII_ENABLE_ BIT(31) +#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8) +#define SGMII_CTL_SGMII_POWER_DN_ BIT(1) + +/* Vendor Specific SGMII MMD details */ +#define SR_VSMMD_PCS_ID1 0x0004 +#define SR_VSMMD_PCS_ID2 0x0005 +#define SR_VSMMD_STS 0x0008 +#define SR_VSMMD_CTRL 0x0009 + +#define VR_MII_DIG_CTRL1 0x8000 +#define VR_MII_DIG_CTRL1_VR_RST_ BIT(15) +#define VR_MII_DIG_CTRL1_R2TLBE_ BIT(14) +#define VR_MII_DIG_CTRL1_EN_VSMMD1_ BIT(13) +#define VR_MII_DIG_CTRL1_CS_EN_ BIT(10) +#define VR_MII_DIG_CTRL1_MAC_AUTO_SW_ BIT(9) +#define VR_MII_DIG_CTRL1_INIT_ BIT(8) +#define VR_MII_DIG_CTRL1_DTXLANED_0_ BIT(4) +#define VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_ BIT(3) +#define VR_MII_DIG_CTRL1_EN_2_5G_MODE_ BIT(2) +#define VR_MII_DIG_CTRL1_BYP_PWRUP_ BIT(1) +#define VR_MII_DIG_CTRL1_PHY_MODE_CTRL_ BIT(0) +#define VR_MII_AN_CTRL 0x8001 +#define VR_MII_AN_CTRL_MII_CTRL_ BIT(8) +#define VR_MII_AN_CTRL_SGMII_LINK_STS_ BIT(4) +#define VR_MII_AN_CTRL_TX_CONFIG_ BIT(3) +#define VR_MII_AN_CTRL_1000BASE_X_ (0) +#define VR_MII_AN_CTRL_SGMII_MODE_ (2) +#define VR_MII_AN_CTRL_QSGMII_MODE_ (3) +#define VR_MII_AN_CTRL_PCS_MODE_SHIFT_ (1) +#define VR_MII_AN_CTRL_PCS_MODE_MASK_ GENMASK(2, 1) +#define VR_MII_AN_CTRL_MII_AN_INTR_EN_ BIT(0) +#define VR_MII_AN_INTR_STS 0x8002 +#define VR_MII_AN_INTR_STS_LINK_UP_ BIT(4) +#define VR_MII_AN_INTR_STS_SPEED_MASK_ GENMASK(3, 2) +#define VR_MII_AN_INTR_STS_1000_MBPS_ BIT(3) +#define VR_MII_AN_INTR_STS_100_MBPS_ BIT(2) +#define VR_MII_AN_INTR_STS_10_MBPS_ (0) +#define VR_MII_AN_INTR_STS_FDX_ BIT(1) +#define VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR_ BIT(0) + +#define VR_MII_LINK_TIMER_CTRL 0x800A +#define VR_MII_DIG_STS 0x8010 +#define VR_MII_DIG_STS_PSEQ_STATE_MASK_ GENMASK(4, 2) +#define VR_MII_DIG_STS_PSEQ_STATE_POS_ (2) +#define VR_MII_GEN2_4_MPLL_CTRL0 0x8078 +#define VR_MII_MPLL_CTRL0_REF_CLK_DIV2_ BIT(12) +#define VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_ BIT(4) +#define VR_MII_GEN2_4_MPLL_CTRL1 0x8079 +#define VR_MII_MPLL_CTRL1_MPLL_MULTIPLIER_ GENMASK(6, 0) +#define VR_MII_BAUD_RATE_3P125GBPS (3125) +#define VR_MII_BAUD_RATE_1P25GBPS (1250) +#define VR_MII_MPLL_MULTIPLIER_125 (125) +#define VR_MII_MPLL_MULTIPLIER_100 (100) +#define VR_MII_MPLL_MULTIPLIER_50 (50) +#define VR_MII_MPLL_MULTIPLIER_40 (40) +#define VR_MII_GEN2_4_MISC_CTRL1 0x809A +#define VR_MII_CTRL1_RX_RATE_0_MASK_ GENMASK(3, 2) +#define VR_MII_CTRL1_RX_RATE_0_SHIFT_ (2) +#define VR_MII_CTRL1_TX_RATE_0_MASK_ GENMASK(1, 0) +#define VR_MII_MPLL_BAUD_CLK (0) +#define VR_MII_MPLL_BAUD_CLK_DIV_2 (1) +#define VR_MII_MPLL_BAUD_CLK_DIV_4 (2) + +#define INT_STS (0x780) +#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel)) +#define INT_BIT_ALL_RX_ (0x0F000000) +#define INT_BIT_DMA_TX_(channel) BIT(16 + (channel)) +#define INT_BIT_ALL_TX_ (0x000F0000) +#define INT_BIT_SW_GP_ BIT(9) +#define INT_BIT_1588_ BIT(7) +#define INT_BIT_ALL_OTHER_ (INT_BIT_SW_GP_ | INT_BIT_1588_) +#define INT_BIT_MAS_ BIT(0) + +#define INT_SET (0x784) + +#define INT_EN_SET (0x788) + +#define INT_EN_CLR (0x78C) + +#define INT_STS_R2C (0x790) + +#define INT_VEC_EN_SET (0x794) +#define INT_VEC_EN_CLR (0x798) +#define INT_VEC_EN_AUTO_CLR (0x79C) +#define INT_VEC_EN_(vector_index) BIT(0 + vector_index) + +#define INT_VEC_MAP0 (0x7A0) +#define INT_VEC_MAP0_RX_VEC_(channel, vector) \ + (((u32)(vector)) << ((channel) << 2)) + +#define INT_VEC_MAP1 (0x7A4) +#define INT_VEC_MAP1_TX_VEC_(channel, vector) \ + (((u32)(vector)) << ((channel) << 2)) + +#define INT_VEC_MAP2 (0x7A8) + +#define INT_MOD_MAP0 (0x7B0) + +#define INT_MOD_MAP1 (0x7B4) + +#define INT_MOD_MAP2 (0x7B8) + +#define INT_MOD_CFG0 (0x7C0) +#define INT_MOD_CFG1 (0x7C4) +#define INT_MOD_CFG2 (0x7C8) +#define INT_MOD_CFG3 (0x7CC) +#define INT_MOD_CFG4 (0x7D0) +#define INT_MOD_CFG5 (0x7D4) +#define INT_MOD_CFG6 (0x7D8) +#define INT_MOD_CFG7 (0x7DC) +#define INT_MOD_CFG8 (0x7E0) +#define INT_MOD_CFG9 (0x7E4) + +#define PTP_CMD_CTL (0x0A00) +#define PTP_CMD_CTL_PTP_LTC_TARGET_READ_ BIT(13) +#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6) +#define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_ BIT(5) +#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4) +#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3) +#define PTP_CMD_CTL_PTP_ENABLE_ BIT(2) +#define PTP_CMD_CTL_PTP_DISABLE_ BIT(1) +#define PTP_CMD_CTL_PTP_RESET_ BIT(0) +#define PTP_GENERAL_CONFIG (0x0A04) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \ + (0x7 << (1 + ((channel) << 2))) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (1) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (2) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_ (6) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \ + (((value) & 0x7) << (1 + ((channel) << 2))) +#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2)) + +#define HS_PTP_GENERAL_CONFIG (0x0A04) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \ + (0xf << (4 + ((channel) << 2))) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_ (1) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_ (2) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_ (3) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (4) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_ (5) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (6) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_ (7) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (8) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_ (9) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (10) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_ (11) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_ (12) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (13) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGG_ (14) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_INT_ (15) +#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \ + (((value) & 0xf) << (4 + ((channel) << 2))) +#define HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(channel) (BIT(1 + ((channel) * 2))) +#define HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) * 2)) + +#define PTP_INT_STS (0x0A08) +#define PTP_INT_IO_FE_MASK_ GENMASK(31, 24) +#define PTP_INT_IO_FE_SHIFT_ (24) +#define PTP_INT_IO_FE_SET_(channel) BIT(24 + (channel)) +#define PTP_INT_IO_RE_MASK_ GENMASK(23, 16) +#define PTP_INT_IO_RE_SHIFT_ (16) +#define PTP_INT_IO_RE_SET_(channel) BIT(16 + (channel)) +#define PTP_INT_TX_TS_OVRFL_INT_ BIT(14) +#define PTP_INT_TX_SWTS_ERR_INT_ BIT(13) +#define PTP_INT_TX_TS_INT_ BIT(12) +#define PTP_INT_RX_TS_OVRFL_INT_ BIT(9) +#define PTP_INT_RX_TS_INT_ BIT(8) +#define PTP_INT_TIMER_INT_B_ BIT(1) +#define PTP_INT_TIMER_INT_A_ BIT(0) +#define PTP_INT_EN_SET (0x0A0C) +#define PTP_INT_EN_FE_EN_SET_(channel) BIT(24 + (channel)) +#define PTP_INT_EN_RE_EN_SET_(channel) BIT(16 + (channel)) +#define PTP_INT_EN_TIMER_SET_(channel) BIT(channel) +#define PTP_INT_EN_CLR (0x0A10) +#define PTP_INT_EN_FE_EN_CLR_(channel) BIT(24 + (channel)) +#define PTP_INT_EN_RE_EN_CLR_(channel) BIT(16 + (channel)) +#define PTP_INT_BIT_TX_SWTS_ERR_ BIT(13) +#define PTP_INT_BIT_TX_TS_ BIT(12) +#define PTP_INT_BIT_TIMER_B_ BIT(1) +#define PTP_INT_BIT_TIMER_A_ BIT(0) + +#define PTP_CLOCK_SEC (0x0A14) +#define PTP_CLOCK_NS (0x0A18) +#define PTP_CLOCK_SUBNS (0x0A1C) +#define PTP_CLOCK_RATE_ADJ (0x0A20) +#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(31) +#define PTP_CLOCK_STEP_ADJ (0x0A2C) +#define PTP_CLOCK_STEP_ADJ_DIR_ BIT(31) +#define PTP_CLOCK_STEP_ADJ_VALUE_MASK_ (0x3FFFFFFF) +#define PTP_CLOCK_TARGET_SEC_X(channel) (0x0A30 + ((channel) << 4)) +#define PTP_CLOCK_TARGET_NS_X(channel) (0x0A34 + ((channel) << 4)) +#define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4)) +#define PTP_CLOCK_TARGET_RELOAD_NS_X(channel) (0x0A3C + ((channel) << 4)) +#define PTP_LTC_SET_SEC_HI (0x0A50) +#define PTP_LTC_SET_SEC_HI_SEC_47_32_MASK_ GENMASK(15, 0) +#define PTP_VERSION (0x0A54) +#define PTP_VERSION_TX_UP_MASK_ GENMASK(31, 24) +#define PTP_VERSION_TX_LO_MASK_ GENMASK(23, 16) +#define PTP_VERSION_RX_UP_MASK_ GENMASK(15, 8) +#define PTP_VERSION_RX_LO_MASK_ GENMASK(7, 0) +#define PTP_IO_SEL (0x0A58) +#define PTP_IO_SEL_MASK_ GENMASK(10, 8) +#define PTP_IO_SEL_SHIFT_ (8) +#define PTP_LATENCY (0x0A5C) +#define PTP_LATENCY_TX_SET_(tx_latency) (((u32)(tx_latency)) << 16) +#define PTP_LATENCY_RX_SET_(rx_latency) \ + (((u32)(rx_latency)) & 0x0000FFFF) +#define PTP_CAP_INFO (0x0A60) +#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x00000070) >> 4) + +#define PTP_TX_MOD (0x0AA4) +#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000) + +#define PTP_TX_MOD2 (0x0AA8) +#define PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_ (0x00000001) + +#define PTP_TX_EGRESS_SEC (0x0AAC) +#define PTP_TX_EGRESS_NS (0x0AB0) +#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_ (0xC0000000) +#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_ (0x00000000) +#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_ (0x40000000) +#define PTP_TX_EGRESS_NS_TS_NS_MASK_ (0x3FFFFFFF) + +#define PTP_TX_MSG_HEADER (0x0AB4) +#define PTP_TX_MSG_HEADER_MSG_TYPE_ (0x000F0000) +#define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_ (0x00000000) + +#define PTP_TX_CAP_INFO (0x0AB8) +#define PTP_TX_CAP_INFO_TX_CH_MASK_ GENMASK(1, 0) +#define PTP_TX_DOMAIN (0x0ABC) +#define PTP_TX_DOMAIN_MASK_ GENMASK(23, 16) +#define PTP_TX_DOMAIN_RANGE_EN_ BIT(15) +#define PTP_TX_DOMAIN_RANGE_MASK_ GENMASK(7, 0) +#define PTP_TX_SDOID (0x0AC0) +#define PTP_TX_SDOID_MASK_ GENMASK(23, 16) +#define PTP_TX_SDOID_RANGE_EN_ BIT(15) +#define PTP_TX_SDOID_11_0_MASK_ GENMASK(7, 0) +#define PTP_IO_CAP_CONFIG (0x0AC4) +#define PTP_IO_CAP_CONFIG_LOCK_FE_(channel) BIT(24 + (channel)) +#define PTP_IO_CAP_CONFIG_LOCK_RE_(channel) BIT(16 + (channel)) +#define PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel) BIT(8 + (channel)) +#define PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel) BIT(0 + (channel)) +#define PTP_IO_RE_LTC_SEC_CAP_X (0x0AC8) +#define PTP_IO_RE_LTC_NS_CAP_X (0x0ACC) +#define PTP_IO_FE_LTC_SEC_CAP_X (0x0AD0) +#define PTP_IO_FE_LTC_NS_CAP_X (0x0AD4) +#define PTP_IO_EVENT_OUTPUT_CFG (0x0AD8) +#define PTP_IO_EVENT_OUTPUT_CFG_SEL_(channel) BIT(16 + (channel)) +#define PTP_IO_EVENT_OUTPUT_CFG_EN_(channel) BIT(0 + (channel)) +#define PTP_IO_PIN_CFG (0x0ADC) +#define PTP_IO_PIN_CFG_OBUF_TYPE_(channel) BIT(0 + (channel)) +#define PTP_LTC_RD_SEC_HI (0x0AF0) +#define PTP_LTC_RD_SEC_HI_SEC_47_32_MASK_ GENMASK(15, 0) +#define PTP_LTC_RD_SEC_LO (0x0AF4) +#define PTP_LTC_RD_NS (0x0AF8) +#define PTP_LTC_RD_NS_29_0_MASK_ GENMASK(29, 0) +#define PTP_LTC_RD_SUBNS (0x0AFC) +#define PTP_RX_USER_MAC_HI (0x0B00) +#define PTP_RX_USER_MAC_HI_47_32_MASK_ GENMASK(15, 0) +#define PTP_RX_USER_MAC_LO (0x0B04) +#define PTP_RX_USER_IP_ADDR_0 (0x0B20) +#define PTP_RX_USER_IP_ADDR_1 (0x0B24) +#define PTP_RX_USER_IP_ADDR_2 (0x0B28) +#define PTP_RX_USER_IP_ADDR_3 (0x0B2C) +#define PTP_RX_USER_IP_MASK_0 (0x0B30) +#define PTP_RX_USER_IP_MASK_1 (0x0B34) +#define PTP_RX_USER_IP_MASK_2 (0x0B38) +#define PTP_RX_USER_IP_MASK_3 (0x0B3C) +#define PTP_TX_USER_MAC_HI (0x0B40) +#define PTP_TX_USER_MAC_HI_47_32_MASK_ GENMASK(15, 0) +#define PTP_TX_USER_MAC_LO (0x0B44) +#define PTP_TX_USER_IP_ADDR_0 (0x0B60) +#define PTP_TX_USER_IP_ADDR_1 (0x0B64) +#define PTP_TX_USER_IP_ADDR_2 (0x0B68) +#define PTP_TX_USER_IP_ADDR_3 (0x0B6C) +#define PTP_TX_USER_IP_MASK_0 (0x0B70) +#define PTP_TX_USER_IP_MASK_1 (0x0B74) +#define PTP_TX_USER_IP_MASK_2 (0x0B78) +#define PTP_TX_USER_IP_MASK_3 (0x0B7C) + +#define DMAC_CFG (0xC00) +#define DMAC_CFG_COAL_EN_ BIT(16) +#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000) +#define DMAC_CFG_MAX_READ_REQ_MASK_ (0x00000070) +#define DMAC_CFG_MAX_READ_REQ_SET_(val) \ + ((((u32)(val)) << 4) & DMAC_CFG_MAX_READ_REQ_MASK_) +#define DMAC_CFG_MAX_DSPACE_16_ (0x00000000) +#define DMAC_CFG_MAX_DSPACE_32_ (0x00000001) +#define DMAC_CFG_MAX_DSPACE_64_ BIT(1) +#define DMAC_CFG_MAX_DSPACE_128_ (0x00000003) + +#define DMAC_COAL_CFG (0xC04) +#define DMAC_COAL_CFG_TIMER_LIMIT_MASK_ (0xFFF00000) +#define DMAC_COAL_CFG_TIMER_LIMIT_SET_(val) \ + ((((u32)(val)) << 20) & DMAC_COAL_CFG_TIMER_LIMIT_MASK_) +#define DMAC_COAL_CFG_TIMER_TX_START_ BIT(19) +#define DMAC_COAL_CFG_FLUSH_INTS_ BIT(18) +#define DMAC_COAL_CFG_INT_EXIT_COAL_ BIT(17) +#define DMAC_COAL_CFG_CSR_EXIT_COAL_ BIT(16) +#define DMAC_COAL_CFG_TX_THRES_MASK_ (0x0000FF00) +#define DMAC_COAL_CFG_TX_THRES_SET_(val) \ + ((((u32)(val)) << 8) & DMAC_COAL_CFG_TX_THRES_MASK_) +#define DMAC_COAL_CFG_RX_THRES_MASK_ (0x000000FF) +#define DMAC_COAL_CFG_RX_THRES_SET_(val) \ + (((u32)(val)) & DMAC_COAL_CFG_RX_THRES_MASK_) + +#define DMAC_OBFF_CFG (0xC08) +#define DMAC_OBFF_TX_THRES_MASK_ (0x0000FF00) +#define DMAC_OBFF_TX_THRES_SET_(val) \ + ((((u32)(val)) << 8) & DMAC_OBFF_TX_THRES_MASK_) +#define DMAC_OBFF_RX_THRES_MASK_ (0x000000FF) +#define DMAC_OBFF_RX_THRES_SET_(val) \ + (((u32)(val)) & DMAC_OBFF_RX_THRES_MASK_) + +#define DMAC_CMD (0xC0C) +#define DMAC_CMD_SWR_ BIT(31) +#define DMAC_CMD_TX_SWR_(channel) BIT(24 + (channel)) +#define DMAC_CMD_START_T_(channel) BIT(20 + (channel)) +#define DMAC_CMD_STOP_T_(channel) BIT(16 + (channel)) +#define DMAC_CMD_RX_SWR_(channel) BIT(8 + (channel)) +#define DMAC_CMD_START_R_(channel) BIT(4 + (channel)) +#define DMAC_CMD_STOP_R_(channel) BIT(0 + (channel)) + +#define DMAC_INT_STS (0xC10) +#define DMAC_INT_EN_SET (0xC14) +#define DMAC_INT_EN_CLR (0xC18) +#define DMAC_INT_BIT_RXFRM_(channel) BIT(16 + (channel)) +#define DMAC_INT_BIT_TX_IOC_(channel) BIT(0 + (channel)) + +#define RX_CFG_A(channel) (0xC40 + ((channel) << 6)) +#define RX_CFG_A_RX_WB_ON_INT_TMR_ BIT(30) +#define RX_CFG_A_RX_WB_THRES_MASK_ (0x1F000000) +#define RX_CFG_A_RX_WB_THRES_SET_(val) \ + ((((u32)(val)) << 24) & RX_CFG_A_RX_WB_THRES_MASK_) +#define RX_CFG_A_RX_PF_THRES_MASK_ (0x001F0000) +#define RX_CFG_A_RX_PF_THRES_SET_(val) \ + ((((u32)(val)) << 16) & RX_CFG_A_RX_PF_THRES_MASK_) +#define RX_CFG_A_RX_PF_PRI_THRES_MASK_ (0x00001F00) +#define RX_CFG_A_RX_PF_PRI_THRES_SET_(val) \ + ((((u32)(val)) << 8) & RX_CFG_A_RX_PF_PRI_THRES_MASK_) +#define RX_CFG_A_RX_HP_WB_EN_ BIT(5) + +#define RX_CFG_B(channel) (0xC44 + ((channel) << 6)) +#define RX_CFG_B_TS_ALL_RX_ BIT(29) +#define RX_CFG_B_RX_PAD_MASK_ (0x03000000) +#define RX_CFG_B_RX_PAD_0_ (0x00000000) +#define RX_CFG_B_RX_PAD_2_ (0x02000000) +#define RX_CFG_B_RDMABL_512_ (0x00040000) +#define RX_CFG_B_RX_RING_LEN_MASK_ (0x0000FFFF) + +#define RX_BASE_ADDRH(channel) (0xC48 + ((channel) << 6)) + +#define RX_BASE_ADDRL(channel) (0xC4C + ((channel) << 6)) + +#define RX_HEAD_WRITEBACK_ADDRH(channel) (0xC50 + ((channel) << 6)) + +#define RX_HEAD_WRITEBACK_ADDRL(channel) (0xC54 + ((channel) << 6)) + +#define RX_HEAD(channel) (0xC58 + ((channel) << 6)) + +#define RX_TAIL(channel) (0xC5C + ((channel) << 6)) +#define RX_TAIL_SET_TOP_INT_EN_ BIT(30) +#define RX_TAIL_SET_TOP_INT_VEC_EN_ BIT(29) + +#define RX_CFG_C(channel) (0xC64 + ((channel) << 6)) +#define RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_ BIT(6) +#define RX_CFG_C_RX_INT_EN_R2C_ BIT(4) +#define RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_ BIT(3) +#define RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_ (0x00000007) + +#define TX_CFG_A(channel) (0xD40 + ((channel) << 6)) +#define TX_CFG_A_TX_HP_WB_ON_INT_TMR_ BIT(30) +#define TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ (0x10000000) +#define TX_CFG_A_TX_PF_THRES_MASK_ (0x001F0000) +#define TX_CFG_A_TX_PF_THRES_SET_(value) \ + ((((u32)(value)) << 16) & TX_CFG_A_TX_PF_THRES_MASK_) +#define TX_CFG_A_TX_PF_PRI_THRES_MASK_ (0x00001F00) +#define TX_CFG_A_TX_PF_PRI_THRES_SET_(value) \ + ((((u32)(value)) << 8) & TX_CFG_A_TX_PF_PRI_THRES_MASK_) +#define TX_CFG_A_TX_HP_WB_EN_ BIT(5) +#define TX_CFG_A_TX_HP_WB_THRES_MASK_ (0x0000000F) +#define TX_CFG_A_TX_HP_WB_THRES_SET_(value) \ + (((u32)(value)) & TX_CFG_A_TX_HP_WB_THRES_MASK_) + +#define TX_CFG_B(channel) (0xD44 + ((channel) << 6)) +#define TX_CFG_B_TDMABL_512_ (0x00040000) +#define TX_CFG_B_TX_RING_LEN_MASK_ (0x0000FFFF) + +#define TX_BASE_ADDRH(channel) (0xD48 + ((channel) << 6)) + +#define TX_BASE_ADDRL(channel) (0xD4C + ((channel) << 6)) + +#define TX_HEAD_WRITEBACK_ADDRH(channel) (0xD50 + ((channel) << 6)) + +#define TX_HEAD_WRITEBACK_ADDRL(channel) (0xD54 + ((channel) << 6)) + +#define TX_HEAD(channel) (0xD58 + ((channel) << 6)) + +#define TX_TAIL(channel) (0xD5C + ((channel) << 6)) +#define TX_TAIL_SET_DMAC_INT_EN_ BIT(31) +#define TX_TAIL_SET_TOP_INT_EN_ BIT(30) +#define TX_TAIL_SET_TOP_INT_VEC_EN_ BIT(29) + +#define TX_CFG_C(channel) (0xD64 + ((channel) << 6)) +#define TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_ BIT(6) +#define TX_CFG_C_TX_DMA_INT_EN_AUTO_CLR_ BIT(5) +#define TX_CFG_C_TX_INT_EN_R2C_ BIT(4) +#define TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_ BIT(3) +#define TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_ (0x00000007) + +#define OTP_PWR_DN (0x1000) +#define OTP_PWR_DN_PWRDN_N_ BIT(0) + +#define OTP_ADDR_HIGH (0x1004) +#define OTP_ADDR_LOW (0x1008) + +#define OTP_PRGM_DATA (0x1010) + +#define OTP_PRGM_MODE (0x1014) +#define OTP_PRGM_MODE_BYTE_ BIT(0) + +#define OTP_READ_DATA (0x1018) + +#define OTP_FUNC_CMD (0x1020) +#define OTP_FUNC_CMD_READ_ BIT(0) + +#define OTP_TST_CMD (0x1024) +#define OTP_TST_CMD_PRGVRFY_ BIT(3) + +#define OTP_CMD_GO (0x1028) +#define OTP_CMD_GO_GO_ BIT(0) + +#define OTP_STATUS (0x1030) +#define OTP_STATUS_BUSY_ BIT(0) + +/* Hearthstone OTP block registers */ +#define HS_OTP_BLOCK_BASE (ETH_SYS_REG_ADDR_BASE + \ + ETH_OTP_REG_ADDR_BASE) +#define HS_OTP_PWR_DN (HS_OTP_BLOCK_BASE + 0x0) +#define HS_OTP_ADDR_HIGH (HS_OTP_BLOCK_BASE + 0x4) +#define HS_OTP_ADDR_LOW (HS_OTP_BLOCK_BASE + 0x8) +#define HS_OTP_PRGM_DATA (HS_OTP_BLOCK_BASE + 0x10) +#define HS_OTP_PRGM_MODE (HS_OTP_BLOCK_BASE + 0x14) +#define HS_OTP_READ_DATA (HS_OTP_BLOCK_BASE + 0x18) +#define HS_OTP_FUNC_CMD (HS_OTP_BLOCK_BASE + 0x20) +#define HS_OTP_TST_CMD (HS_OTP_BLOCK_BASE + 0x24) +#define HS_OTP_CMD_GO (HS_OTP_BLOCK_BASE + 0x28) +#define HS_OTP_STATUS (HS_OTP_BLOCK_BASE + 0x30) + +/* MAC statistics registers */ +#define STAT_RX_FCS_ERRORS (0x1200) +#define STAT_RX_ALIGNMENT_ERRORS (0x1204) +#define STAT_RX_FRAGMENT_ERRORS (0x1208) +#define STAT_RX_JABBER_ERRORS (0x120C) +#define STAT_RX_UNDERSIZE_FRAME_ERRORS (0x1210) +#define STAT_RX_OVERSIZE_FRAME_ERRORS (0x1214) +#define STAT_RX_DROPPED_FRAMES (0x1218) +#define STAT_RX_UNICAST_BYTE_COUNT (0x121C) +#define STAT_RX_BROADCAST_BYTE_COUNT (0x1220) +#define STAT_RX_MULTICAST_BYTE_COUNT (0x1224) +#define STAT_RX_UNICAST_FRAMES (0x1228) +#define STAT_RX_BROADCAST_FRAMES (0x122C) +#define STAT_RX_MULTICAST_FRAMES (0x1230) +#define STAT_RX_PAUSE_FRAMES (0x1234) +#define STAT_RX_64_BYTE_FRAMES (0x1238) +#define STAT_RX_65_127_BYTE_FRAMES (0x123C) +#define STAT_RX_128_255_BYTE_FRAMES (0x1240) +#define STAT_RX_256_511_BYTES_FRAMES (0x1244) +#define STAT_RX_512_1023_BYTE_FRAMES (0x1248) +#define STAT_RX_1024_1518_BYTE_FRAMES (0x124C) +#define STAT_RX_GREATER_1518_BYTE_FRAMES (0x1250) +#define STAT_RX_TOTAL_FRAMES (0x1254) +#define STAT_EEE_RX_LPI_TRANSITIONS (0x1258) +#define STAT_EEE_RX_LPI_TIME (0x125C) +#define STAT_RX_COUNTER_ROLLOVER_STATUS (0x127C) + +#define STAT_TX_FCS_ERRORS (0x1280) +#define STAT_TX_EXCESS_DEFERRAL_ERRORS (0x1284) +#define STAT_TX_CARRIER_ERRORS (0x1288) +#define STAT_TX_BAD_BYTE_COUNT (0x128C) +#define STAT_TX_SINGLE_COLLISIONS (0x1290) +#define STAT_TX_MULTIPLE_COLLISIONS (0x1294) +#define STAT_TX_EXCESSIVE_COLLISION (0x1298) +#define STAT_TX_LATE_COLLISIONS (0x129C) +#define STAT_TX_UNICAST_BYTE_COUNT (0x12A0) +#define STAT_TX_BROADCAST_BYTE_COUNT (0x12A4) +#define STAT_TX_MULTICAST_BYTE_COUNT (0x12A8) +#define STAT_TX_UNICAST_FRAMES (0x12AC) +#define STAT_TX_BROADCAST_FRAMES (0x12B0) +#define STAT_TX_MULTICAST_FRAMES (0x12B4) +#define STAT_TX_PAUSE_FRAMES (0x12B8) +#define STAT_TX_64_BYTE_FRAMES (0x12BC) +#define STAT_TX_65_127_BYTE_FRAMES (0x12C0) +#define STAT_TX_128_255_BYTE_FRAMES (0x12C4) +#define STAT_TX_256_511_BYTES_FRAMES (0x12C8) +#define STAT_TX_512_1023_BYTE_FRAMES (0x12CC) +#define STAT_TX_1024_1518_BYTE_FRAMES (0x12D0) +#define STAT_TX_GREATER_1518_BYTE_FRAMES (0x12D4) +#define STAT_TX_TOTAL_FRAMES (0x12D8) +#define STAT_EEE_TX_LPI_TRANSITIONS (0x12DC) +#define STAT_EEE_TX_LPI_TIME (0x12E0) +#define STAT_TX_COUNTER_ROLLOVER_STATUS (0x12FC) + +/* End of Register definitions */ + +#define LAN743X_MAX_RX_CHANNELS (4) +#define LAN743X_MAX_TX_CHANNELS (1) +#define PCI11X1X_MAX_TX_CHANNELS (4) +struct lan743x_adapter; + +#define LAN743X_USED_RX_CHANNELS (4) +#define LAN743X_USED_TX_CHANNELS (1) +#define PCI11X1X_USED_TX_CHANNELS (4) +#define LAN743X_INT_MOD (400) + +#if (LAN743X_USED_RX_CHANNELS > LAN743X_MAX_RX_CHANNELS) +#error Invalid LAN743X_USED_RX_CHANNELS +#endif +#if (LAN743X_USED_TX_CHANNELS > LAN743X_MAX_TX_CHANNELS) +#error Invalid LAN743X_USED_TX_CHANNELS +#endif +#if (PCI11X1X_USED_TX_CHANNELS > PCI11X1X_MAX_TX_CHANNELS) +#error Invalid PCI11X1X_USED_TX_CHANNELS +#endif + +/* PCI */ +/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */ +#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR +#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430) +#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431) +#define PCI_DEVICE_ID_SMSC_A011 (0xA011) +#define PCI_DEVICE_ID_SMSC_A041 (0xA041) + +#define PCI_CONFIG_LENGTH (0x1000) + +/* CSR */ +#define CSR_LENGTH (0x2000) + +#define LAN743X_CSR_FLAG_IS_A0 BIT(0) +#define LAN743X_CSR_FLAG_IS_B0 BIT(1) +#define LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR BIT(8) + +struct lan743x_csr { + u32 flags; + u8 __iomem *csr_address; + u32 id_rev; + u32 fpga_rev; +}; + +/* INTERRUPTS */ +typedef void(*lan743x_vector_handler)(void *context, u32 int_sts, u32 flags); + +#define LAN743X_VECTOR_FLAG_IRQ_SHARED BIT(0) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ BIT(1) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C BIT(2) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C BIT(3) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK BIT(4) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR BIT(5) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C BIT(6) +#define LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR BIT(7) +#define LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET BIT(8) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR BIT(9) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET BIT(10) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR BIT(11) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET BIT(12) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR BIT(13) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET BIT(14) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR BIT(15) + +struct lan743x_vector { + int irq; + u32 flags; + struct lan743x_adapter *adapter; + int vector_index; + u32 int_mask; + lan743x_vector_handler handler; + void *context; +}; + +#define LAN743X_MAX_VECTOR_COUNT (8) +#define PCI11X1X_MAX_VECTOR_COUNT (16) + +struct lan743x_intr { + int flags; + + unsigned int irq; + + struct lan743x_vector vector_list[PCI11X1X_MAX_VECTOR_COUNT]; + int number_of_vectors; + bool using_vectors; + + bool software_isr_flag; + wait_queue_head_t software_isr_wq; +}; + +#define LAN743X_MAX_FRAME_SIZE (9 * 1024) + +/* PHY */ +struct lan743x_phy { + bool fc_autoneg; + u8 fc_request_control; +}; + +/* TX */ +struct lan743x_tx_descriptor; +struct lan743x_tx_buffer_info; + +#define GPIO_QUEUE_STARTED (0) +#define GPIO_TX_FUNCTION (1) +#define GPIO_TX_COMPLETION (2) +#define GPIO_TX_FRAGMENT (3) + +#define TX_FRAME_FLAG_IN_PROGRESS BIT(0) + +#define TX_TS_FLAG_TIMESTAMPING_ENABLED BIT(0) +#define TX_TS_FLAG_ONE_STEP_SYNC BIT(1) + +struct lan743x_tx { + struct lan743x_adapter *adapter; + u32 ts_flags; + u32 vector_flags; + int channel_number; + + int ring_size; + size_t ring_allocation_size; + struct lan743x_tx_descriptor *ring_cpu_ptr; + dma_addr_t ring_dma_ptr; + /* ring_lock: used to prevent concurrent access to tx ring */ + spinlock_t ring_lock; + u32 frame_flags; + u32 frame_first; + u32 frame_data0; + u32 frame_tail; + + struct lan743x_tx_buffer_info *buffer_info; + + __le32 *head_cpu_ptr; + dma_addr_t head_dma_ptr; + int last_head; + int last_tail; + + struct napi_struct napi; + u32 frame_count; + u32 rqd_descriptors; +}; + +void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx, + bool enable_timestamping, + bool enable_onestep_sync); + +/* RX */ +struct lan743x_rx_descriptor; +struct lan743x_rx_buffer_info; + +struct lan743x_rx { + struct lan743x_adapter *adapter; + u32 vector_flags; + int channel_number; + + int ring_size; + size_t ring_allocation_size; + struct lan743x_rx_descriptor *ring_cpu_ptr; + dma_addr_t ring_dma_ptr; + + struct lan743x_rx_buffer_info *buffer_info; + + __le32 *head_cpu_ptr; + dma_addr_t head_dma_ptr; + u32 last_head; + u32 last_tail; + + struct napi_struct napi; + + u32 frame_count; + + struct sk_buff *skb_head, *skb_tail; +}; + +/* SGMII Link Speed Duplex status */ +enum lan743x_sgmii_lsd { + POWER_DOWN = 0, + LINK_DOWN, + ANEG_BUSY, + LINK_10HD, + LINK_10FD, + LINK_100HD, + LINK_100FD, + LINK_1000_MASTER, + LINK_1000_SLAVE, + LINK_2500_MASTER, + LINK_2500_SLAVE +}; + +struct lan743x_adapter { + struct net_device *netdev; + struct mii_bus *mdiobus; + int msg_enable; +#ifdef CONFIG_PM + u32 wolopts; + u8 sopass[SOPASS_MAX]; +#endif + struct pci_dev *pdev; + struct lan743x_csr csr; + struct lan743x_intr intr; + + struct lan743x_gpio gpio; + struct lan743x_ptp ptp; + + u8 mac_address[ETH_ALEN]; + + struct lan743x_phy phy; + struct lan743x_tx tx[PCI11X1X_USED_TX_CHANNELS]; + struct lan743x_rx rx[LAN743X_USED_RX_CHANNELS]; + bool is_pci11x1x; + bool is_sgmii_en; + /* protect ethernet syslock */ + spinlock_t eth_syslock_spinlock; + bool eth_syslock_en; + u32 eth_syslock_acquire_cnt; + struct mutex sgmii_rw_lock; + /* SGMII Link Speed & Duplex status */ + enum lan743x_sgmii_lsd sgmii_lsd; + u8 max_tx_channels; + u8 used_tx_channels; + u8 max_vector_count; + +#define LAN743X_ADAPTER_FLAG_OTP BIT(0) + u32 flags; + u32 hw_cfg; +}; + +#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel)) + +#define INTR_FLAG_IRQ_REQUESTED(vector_index) BIT(0 + vector_index) +#define INTR_FLAG_MSI_ENABLED BIT(8) +#define INTR_FLAG_MSIX_ENABLED BIT(9) + +#define MAC_MII_READ 1 +#define MAC_MII_WRITE 0 + +#define PHY_FLAG_OPENED BIT(0) +#define PHY_FLAG_ATTACHED BIT(1) + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#define DMA_ADDR_HIGH32(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) +#else +#define DMA_ADDR_HIGH32(dma_addr) ((u32)(0)) +#endif +#define DMA_ADDR_LOW32(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF)) +#define DMA_DESCRIPTOR_SPACING_16 (16) +#define DMA_DESCRIPTOR_SPACING_32 (32) +#define DMA_DESCRIPTOR_SPACING_64 (64) +#define DMA_DESCRIPTOR_SPACING_128 (128) +#define DEFAULT_DMA_DESCRIPTOR_SPACING (L1_CACHE_BYTES) + +#define DMAC_CHANNEL_STATE_SET(start_bit, stop_bit) \ + (((start_bit) ? 2 : 0) | ((stop_bit) ? 1 : 0)) +#define DMAC_CHANNEL_STATE_INITIAL DMAC_CHANNEL_STATE_SET(0, 0) +#define DMAC_CHANNEL_STATE_STARTED DMAC_CHANNEL_STATE_SET(1, 0) +#define DMAC_CHANNEL_STATE_STOP_PENDING DMAC_CHANNEL_STATE_SET(1, 1) +#define DMAC_CHANNEL_STATE_STOPPED DMAC_CHANNEL_STATE_SET(0, 1) + +/* TX Descriptor bits */ +#define TX_DESC_DATA0_DTYPE_MASK_ (0xC0000000) +#define TX_DESC_DATA0_DTYPE_DATA_ (0x00000000) +#define TX_DESC_DATA0_DTYPE_EXT_ (0x40000000) +#define TX_DESC_DATA0_FS_ (0x20000000) +#define TX_DESC_DATA0_LS_ (0x10000000) +#define TX_DESC_DATA0_EXT_ (0x08000000) +#define TX_DESC_DATA0_IOC_ (0x04000000) +#define TX_DESC_DATA0_ICE_ (0x00400000) +#define TX_DESC_DATA0_IPE_ (0x00200000) +#define TX_DESC_DATA0_TPE_ (0x00100000) +#define TX_DESC_DATA0_FCS_ (0x00020000) +#define TX_DESC_DATA0_TSE_ (0x00010000) +#define TX_DESC_DATA0_BUF_LENGTH_MASK_ (0x0000FFFF) +#define TX_DESC_DATA0_EXT_LSO_ (0x00200000) +#define TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_ (0x000FFFFF) +#define TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_ (0x3FFF0000) + +struct lan743x_tx_descriptor { + __le32 data0; + __le32 data1; + __le32 data2; + __le32 data3; +} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING); + +#define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0) +#define TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED BIT(1) +#define TX_BUFFER_INFO_FLAG_IGNORE_SYNC BIT(2) +#define TX_BUFFER_INFO_FLAG_SKB_FRAGMENT BIT(3) +struct lan743x_tx_buffer_info { + int flags; + struct sk_buff *skb; + dma_addr_t dma_ptr; + unsigned int buffer_length; +}; + +#define LAN743X_TX_RING_SIZE (128) + +/* OWN bit is set. ie, Descs are owned by RX DMAC */ +#define RX_DESC_DATA0_OWN_ (0x00008000) +/* OWN bit is clear. ie, Descs are owned by host */ +#define RX_DESC_DATA0_FS_ (0x80000000) +#define RX_DESC_DATA0_LS_ (0x40000000) +#define RX_DESC_DATA0_FRAME_LENGTH_MASK_ (0x3FFF0000) +#define RX_DESC_DATA0_FRAME_LENGTH_GET_(data0) \ + (((data0) & RX_DESC_DATA0_FRAME_LENGTH_MASK_) >> 16) +#define RX_DESC_DATA0_EXT_ (0x00004000) +#define RX_DESC_DATA0_BUF_LENGTH_MASK_ (0x00003FFF) +#define RX_DESC_DATA1_STATUS_ICE_ (0x00020000) +#define RX_DESC_DATA1_STATUS_TCE_ (0x00010000) +#define RX_DESC_DATA1_STATUS_ICSM_ (0x00000001) +#define RX_DESC_DATA2_TS_NS_MASK_ (0x3FFFFFFF) + +#if ((NET_IP_ALIGN != 0) && (NET_IP_ALIGN != 2)) +#error NET_IP_ALIGN must be 0 or 2 +#endif + +#define RX_HEAD_PADDING NET_IP_ALIGN + +struct lan743x_rx_descriptor { + __le32 data0; + __le32 data1; + __le32 data2; + __le32 data3; +} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING); + +#define RX_BUFFER_INFO_FLAG_ACTIVE BIT(0) +struct lan743x_rx_buffer_info { + int flags; + struct sk_buff *skb; + + dma_addr_t dma_ptr; + unsigned int buffer_length; +}; + +#define LAN743X_RX_RING_SIZE (128) + +#define RX_PROCESS_RESULT_NOTHING_TO_DO (0) +#define RX_PROCESS_RESULT_BUFFER_RECEIVED (1) + +u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset); +void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data); +int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter, u16 timeout); +void lan743x_hs_syslock_release(struct lan743x_adapter *adapter); + +#endif /* _LAN743X_H */ diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c new file mode 100644 index 000000000..da3ea905a --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -0,0 +1,1834 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#include <linux/netdevice.h> + +#include <linux/ptp_clock_kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/net_tstamp.h> +#include "lan743x_main.h" + +#include "lan743x_ptp.h" + +#define LAN743X_LED0_ENABLE 20 /* LED0 offset in HW_CFG */ +#define LAN743X_LED_ENABLE(pin) BIT(LAN743X_LED0_ENABLE + (pin)) + +#define LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB (31249999) +#define LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM (2047999934) + +static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter); +static void lan743x_ptp_enable(struct lan743x_adapter *adapter); +static void lan743x_ptp_disable(struct lan743x_adapter *adapter); +static void lan743x_ptp_reset(struct lan743x_adapter *adapter); +static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter, + u32 seconds, u32 nano_seconds, + u32 sub_nano_seconds); + +static int lan743x_get_channel(u32 ch_map) +{ + int idx; + + for (idx = 0; idx < 32; idx++) { + if (ch_map & (0x1 << idx)) + return idx; + } + + return -EINVAL; +} + +int lan743x_gpio_init(struct lan743x_adapter *adapter) +{ + struct lan743x_gpio *gpio = &adapter->gpio; + + spin_lock_init(&gpio->gpio_lock); + + gpio->gpio_cfg0 = 0; /* set all direction to input, data = 0 */ + gpio->gpio_cfg1 = 0x0FFF0000;/* disable all gpio, set to open drain */ + gpio->gpio_cfg2 = 0;/* set all to 1588 low polarity level */ + gpio->gpio_cfg3 = 0;/* disable all 1588 output */ + lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0); + lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1); + lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2); + lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3); + + return 0; +} + +static void lan743x_ptp_wait_till_cmd_done(struct lan743x_adapter *adapter, + u32 bit_mask) +{ + int timeout = 1000; + u32 data = 0; + + while (timeout && + (data = (lan743x_csr_read(adapter, PTP_CMD_CTL) & + bit_mask))) { + usleep_range(1000, 20000); + timeout--; + } + if (data) { + netif_err(adapter, drv, adapter->netdev, + "timeout waiting for cmd to be done, cmd = 0x%08X\n", + bit_mask); + } +} + +static void lan743x_ptp_tx_ts_enqueue_ts(struct lan743x_adapter *adapter, + u32 seconds, u32 nano_seconds, + u32 header) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + spin_lock_bh(&ptp->tx_ts_lock); + if (ptp->tx_ts_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) { + ptp->tx_ts_seconds_queue[ptp->tx_ts_queue_size] = seconds; + ptp->tx_ts_nseconds_queue[ptp->tx_ts_queue_size] = nano_seconds; + ptp->tx_ts_header_queue[ptp->tx_ts_queue_size] = header; + ptp->tx_ts_queue_size++; + } else { + netif_err(adapter, drv, adapter->netdev, + "tx ts queue overflow\n"); + } + spin_unlock_bh(&ptp->tx_ts_lock); +} + +static void lan743x_ptp_tx_ts_complete(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + struct skb_shared_hwtstamps tstamps; + u32 header, nseconds, seconds; + bool ignore_sync = false; + struct sk_buff *skb; + int c, i; + + spin_lock_bh(&ptp->tx_ts_lock); + c = ptp->tx_ts_skb_queue_size; + + if (c > ptp->tx_ts_queue_size) + c = ptp->tx_ts_queue_size; + if (c <= 0) + goto done; + + for (i = 0; i < c; i++) { + ignore_sync = ((ptp->tx_ts_ignore_sync_queue & + BIT(i)) != 0); + skb = ptp->tx_ts_skb_queue[i]; + nseconds = ptp->tx_ts_nseconds_queue[i]; + seconds = ptp->tx_ts_seconds_queue[i]; + header = ptp->tx_ts_header_queue[i]; + + memset(&tstamps, 0, sizeof(tstamps)); + tstamps.hwtstamp = ktime_set(seconds, nseconds); + if (!ignore_sync || + ((header & PTP_TX_MSG_HEADER_MSG_TYPE_) != + PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_)) + skb_tstamp_tx(skb, &tstamps); + + dev_kfree_skb(skb); + + ptp->tx_ts_skb_queue[i] = NULL; + ptp->tx_ts_seconds_queue[i] = 0; + ptp->tx_ts_nseconds_queue[i] = 0; + ptp->tx_ts_header_queue[i] = 0; + } + + /* shift queue */ + ptp->tx_ts_ignore_sync_queue >>= c; + for (i = c; i < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS; i++) { + ptp->tx_ts_skb_queue[i - c] = ptp->tx_ts_skb_queue[i]; + ptp->tx_ts_seconds_queue[i - c] = ptp->tx_ts_seconds_queue[i]; + ptp->tx_ts_nseconds_queue[i - c] = ptp->tx_ts_nseconds_queue[i]; + ptp->tx_ts_header_queue[i - c] = ptp->tx_ts_header_queue[i]; + + ptp->tx_ts_skb_queue[i] = NULL; + ptp->tx_ts_seconds_queue[i] = 0; + ptp->tx_ts_nseconds_queue[i] = 0; + ptp->tx_ts_header_queue[i] = 0; + } + ptp->tx_ts_skb_queue_size -= c; + ptp->tx_ts_queue_size -= c; +done: + ptp->pending_tx_timestamps -= c; + spin_unlock_bh(&ptp->tx_ts_lock); +} + +static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter, + int event_channel) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + int result = -ENODEV; + + mutex_lock(&ptp->command_lock); + if (!(test_bit(event_channel, &ptp->used_event_ch))) { + ptp->used_event_ch |= BIT(event_channel); + result = event_channel; + } else { + netif_warn(adapter, drv, adapter->netdev, + "attempted to reserved a used event_channel = %d\n", + event_channel); + } + mutex_unlock(&ptp->command_lock); + return result; +} + +static void lan743x_ptp_release_event_ch(struct lan743x_adapter *adapter, + int event_channel) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + if (test_bit(event_channel, &ptp->used_event_ch)) { + ptp->used_event_ch &= ~BIT(event_channel); + } else { + netif_warn(adapter, drv, adapter->netdev, + "attempted release on a not used event_channel = %d\n", + event_channel); + } + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter, + u32 *seconds, u32 *nano_seconds, + u32 *sub_nano_seconds); +static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter, + u32 *sec, u32 *nsec, u32 *sub_nsec); +static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter, + s64 time_step_ns); + +static void lan743x_led_mux_enable(struct lan743x_adapter *adapter, + int pin, bool enable) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + if (ptp->leds_multiplexed && + ptp->led_enabled[pin]) { + u32 val = lan743x_csr_read(adapter, HW_CFG); + + if (enable) + val |= LAN743X_LED_ENABLE(pin); + else + val &= ~LAN743X_LED_ENABLE(pin); + + lan743x_csr_write(adapter, HW_CFG, val); + } +} + +static void lan743x_led_mux_save(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_; + + if (id_rev == ID_REV_ID_LAN7430_) { + int i; + u32 val = lan743x_csr_read(adapter, HW_CFG); + + for (i = 0; i < LAN7430_N_LED; i++) { + bool led_enabled = (val & LAN743X_LED_ENABLE(i)) != 0; + + ptp->led_enabled[i] = led_enabled; + } + ptp->leds_multiplexed = true; + } else { + ptp->leds_multiplexed = false; + } +} + +static void lan743x_led_mux_restore(struct lan743x_adapter *adapter) +{ + u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_; + + if (id_rev == ID_REV_ID_LAN7430_) { + int i; + + for (i = 0; i < LAN7430_N_LED; i++) + lan743x_led_mux_enable(adapter, i, true); + } +} + +static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter, + int pin, int event_channel) +{ + struct lan743x_gpio *gpio = &adapter->gpio; + unsigned long irq_flags = 0; + int bit_mask = BIT(pin); + int ret = -EBUSY; + + spin_lock_irqsave(&gpio->gpio_lock, irq_flags); + + if (!(gpio->used_bits & bit_mask)) { + gpio->used_bits |= bit_mask; + gpio->output_bits |= bit_mask; + gpio->ptp_bits |= bit_mask; + + /* assign pin to GPIO function */ + lan743x_led_mux_enable(adapter, pin, false); + + /* set as output, and zero initial value */ + gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(pin); + gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin); + lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0); + + /* enable gpio, and set buffer type to push pull */ + gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(pin); + gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(pin); + lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1); + + /* set 1588 polarity to high */ + gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(pin); + lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2); + + if (event_channel == 0) { + /* use channel A */ + gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(pin); + } else { + /* use channel B */ + gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(pin); + } + gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(pin); + lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3); + + ret = pin; + } + spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags); + return ret; +} + +static void lan743x_gpio_release(struct lan743x_adapter *adapter, int pin) +{ + struct lan743x_gpio *gpio = &adapter->gpio; + unsigned long irq_flags = 0; + int bit_mask = BIT(pin); + + spin_lock_irqsave(&gpio->gpio_lock, irq_flags); + if (gpio->used_bits & bit_mask) { + gpio->used_bits &= ~bit_mask; + if (gpio->output_bits & bit_mask) { + gpio->output_bits &= ~bit_mask; + + if (gpio->ptp_bits & bit_mask) { + gpio->ptp_bits &= ~bit_mask; + /* disable ptp output */ + gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(pin); + lan743x_csr_write(adapter, GPIO_CFG3, + gpio->gpio_cfg3); + } + /* release gpio output */ + + /* disable gpio */ + gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(pin); + gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(pin); + lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1); + + /* reset back to input */ + gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(pin); + gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin); + lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0); + + /* assign pin to original function */ + lan743x_led_mux_enable(adapter, pin, true); + } + } + spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags); +} + +static int lan743x_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + u32 lan743x_rate_adj = 0; + bool positive = true; + u64 u64_delta = 0; + + if ((scaled_ppm < (-LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM)) || + scaled_ppm > LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM) { + return -EINVAL; + } + if (scaled_ppm > 0) { + u64_delta = (u64)scaled_ppm; + positive = true; + } else { + u64_delta = (u64)(-scaled_ppm); + positive = false; + } + u64_delta = (u64_delta << 19); + lan743x_rate_adj = div_u64(u64_delta, 1000000); + + if (positive) + lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_; + + lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ, + lan743x_rate_adj); + + return 0; +} + +static int lan743x_ptpci_adjfreq(struct ptp_clock_info *ptpci, s32 delta_ppb) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + u32 lan743x_rate_adj = 0; + bool positive = true; + u32 u32_delta = 0; + u64 u64_delta = 0; + + if ((delta_ppb < (-LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB)) || + delta_ppb > LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB) { + return -EINVAL; + } + if (delta_ppb > 0) { + u32_delta = (u32)delta_ppb; + positive = true; + } else { + u32_delta = (u32)(-delta_ppb); + positive = false; + } + u64_delta = (((u64)u32_delta) << 35); + lan743x_rate_adj = div_u64(u64_delta, 1000000000); + + if (positive) + lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_; + + lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ, + lan743x_rate_adj); + + return 0; +} + +static int lan743x_ptpci_adjtime(struct ptp_clock_info *ptpci, s64 delta) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + + lan743x_ptp_clock_step(adapter, delta); + + return 0; +} + +static int lan743x_ptpci_gettime64(struct ptp_clock_info *ptpci, + struct timespec64 *ts) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + u32 nano_seconds = 0; + u32 seconds = 0; + + if (adapter->is_pci11x1x) + lan743x_ptp_io_clock_get(adapter, &seconds, &nano_seconds, + NULL); + else + lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL); + ts->tv_sec = seconds; + ts->tv_nsec = nano_seconds; + + return 0; +} + +static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci, + const struct timespec64 *ts) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + u32 nano_seconds = 0; + u32 seconds = 0; + + if (ts) { + if (ts->tv_sec > 0xFFFFFFFFLL || + ts->tv_sec < 0) { + netif_warn(adapter, drv, adapter->netdev, + "ts->tv_sec out of range, %lld\n", + ts->tv_sec); + return -ERANGE; + } + if (ts->tv_nsec >= 1000000000L || + ts->tv_nsec < 0) { + netif_warn(adapter, drv, adapter->netdev, + "ts->tv_nsec out of range, %ld\n", + ts->tv_nsec); + return -ERANGE; + } + seconds = ts->tv_sec; + nano_seconds = ts->tv_nsec; + lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0); + } else { + netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n"); + return -EINVAL; + } + + return 0; +} + +static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter, + unsigned int index) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + u32 general_config = 0; + struct lan743x_ptp_perout *perout = &ptp->perout[index]; + + if (perout->gpio_pin >= 0) { + lan743x_gpio_release(adapter, perout->gpio_pin); + perout->gpio_pin = -1; + } + + if (perout->event_ch >= 0) { + /* set target to far in the future, effectively disabling it */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_SEC_X(perout->event_ch), + 0xFFFF0000); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_NS_X(perout->event_ch), + 0); + + general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG); + general_config |= PTP_GENERAL_CONFIG_RELOAD_ADD_X_ + (perout->event_ch); + lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config); + lan743x_ptp_release_event_ch(adapter, perout->event_ch); + perout->event_ch = -1; + } +} + +static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, + struct ptp_perout_request *perout_request) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + u32 period_sec = 0, period_nsec = 0; + u32 start_sec = 0, start_nsec = 0; + u32 general_config = 0; + int pulse_width = 0; + int perout_pin = 0; + unsigned int index = perout_request->index; + struct lan743x_ptp_perout *perout = &ptp->perout[index]; + int ret = 0; + + /* Reject requests with unsupported flags */ + if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE) + return -EOPNOTSUPP; + + if (on) { + perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, + perout_request->index); + if (perout_pin < 0) + return -EBUSY; + } else { + lan743x_ptp_perout_off(adapter, index); + return 0; + } + + if (perout->event_ch >= 0 || + perout->gpio_pin >= 0) { + /* already on, turn off first */ + lan743x_ptp_perout_off(adapter, index); + } + + perout->event_ch = lan743x_ptp_reserve_event_ch(adapter, index); + + if (perout->event_ch < 0) { + netif_warn(adapter, drv, adapter->netdev, + "Failed to reserve event channel %d for PEROUT\n", + index); + ret = -EBUSY; + goto failed; + } + + perout->gpio_pin = lan743x_gpio_rsrv_ptp_out(adapter, + perout_pin, + perout->event_ch); + + if (perout->gpio_pin < 0) { + netif_warn(adapter, drv, adapter->netdev, + "Failed to reserve gpio %d for PEROUT\n", + perout_pin); + ret = -EBUSY; + goto failed; + } + + start_sec = perout_request->start.sec; + start_sec += perout_request->start.nsec / 1000000000; + start_nsec = perout_request->start.nsec % 1000000000; + + period_sec = perout_request->period.sec; + period_sec += perout_request->period.nsec / 1000000000; + period_nsec = perout_request->period.nsec % 1000000000; + + if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) { + struct timespec64 ts_on, ts_period; + s64 wf_high, period64, half; + s32 reminder; + + ts_on.tv_sec = perout_request->on.sec; + ts_on.tv_nsec = perout_request->on.nsec; + wf_high = timespec64_to_ns(&ts_on); + ts_period.tv_sec = perout_request->period.sec; + ts_period.tv_nsec = perout_request->period.nsec; + period64 = timespec64_to_ns(&ts_period); + + if (period64 < 200) { + netif_warn(adapter, drv, adapter->netdev, + "perout period too small, minimum is 200nS\n"); + ret = -EOPNOTSUPP; + goto failed; + } + if (wf_high >= period64) { + netif_warn(adapter, drv, adapter->netdev, + "pulse width must be smaller than period\n"); + ret = -EINVAL; + goto failed; + } + + /* Check if we can do 50% toggle on an even value of period. + * If the period number is odd, then check if the requested + * pulse width is the same as one of pre-defined width values. + * Otherwise, return failure. + */ + half = div_s64_rem(period64, 2, &reminder); + if (!reminder) { + if (half == wf_high) { + /* It's 50% match. Use the toggle option */ + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_; + /* In this case, devide period value by 2 */ + ts_period = ns_to_timespec64(div_s64(period64, 2)); + period_sec = ts_period.tv_sec; + period_nsec = ts_period.tv_nsec; + + goto program; + } + } + /* if we can't do toggle, then the width option needs to be the exact match */ + if (wf_high == 200000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; + } else if (wf_high == 10000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_; + } else if (wf_high == 1000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_; + } else if (wf_high == 100000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_; + } else if (wf_high == 10000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_; + } else if (wf_high == 100) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_; + } else { + netif_warn(adapter, drv, adapter->netdev, + "duty cycle specified is not supported\n"); + ret = -EOPNOTSUPP; + goto failed; + } + } else { + if (period_sec == 0) { + if (period_nsec >= 400000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; + } else if (period_nsec >= 20000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_; + } else if (period_nsec >= 2000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_; + } else if (period_nsec >= 200000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_; + } else if (period_nsec >= 20000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_; + } else if (period_nsec >= 200) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_; + } else { + netif_warn(adapter, drv, adapter->netdev, + "perout period too small, minimum is 200nS\n"); + ret = -EOPNOTSUPP; + goto failed; + } + } else { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; + } + } +program: + + /* turn off by setting target far in future */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_SEC_X(perout->event_ch), + 0xFFFF0000); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_NS_X(perout->event_ch), 0); + + /* Configure to pulse every period */ + general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG); + general_config &= ~(PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_ + (perout->event_ch)); + general_config |= PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_ + (perout->event_ch, pulse_width); + general_config &= ~PTP_GENERAL_CONFIG_RELOAD_ADD_X_ + (perout->event_ch); + lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config); + + /* set the reload to one toggle cycle */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_RELOAD_SEC_X(perout->event_ch), + period_sec); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_RELOAD_NS_X(perout->event_ch), + period_nsec); + + /* set the start time */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_SEC_X(perout->event_ch), + start_sec); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_NS_X(perout->event_ch), + start_nsec); + + return 0; + +failed: + lan743x_ptp_perout_off(adapter, index); + return ret; +} + +static void lan743x_ptp_io_perout_off(struct lan743x_adapter *adapter, + u32 index) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + int perout_pin; + int event_ch; + u32 gen_cfg; + int val; + + event_ch = ptp->ptp_io_perout[index]; + if (event_ch >= 0) { + /* set target to far in the future, effectively disabling it */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_SEC_X(event_ch), + 0xFFFF0000); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_NS_X(event_ch), + 0); + + gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG); + gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_ + (event_ch)); + gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch)); + gen_cfg |= HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch); + lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg); + if (event_ch) + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_TIMER_INT_B_); + else + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_TIMER_INT_A_); + lan743x_ptp_release_event_ch(adapter, event_ch); + ptp->ptp_io_perout[index] = -1; + } + + perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index); + + /* Deselect Event output */ + val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG); + + /* Disables the output of Local Time Target compare events */ + val &= ~PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin); + lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val); + + /* Configured as an opendrain driver*/ + val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG); + val &= ~PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin); + lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val); + /* Dummy read to make sure write operation success */ + val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG); +} + +static int lan743x_ptp_io_perout(struct lan743x_adapter *adapter, int on, + struct ptp_perout_request *perout_request) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + u32 period_sec, period_nsec; + u32 start_sec, start_nsec; + u32 pulse_sec, pulse_nsec; + int pulse_width; + int perout_pin; + int event_ch; + u32 gen_cfg; + u32 index; + int val; + + index = perout_request->index; + event_ch = ptp->ptp_io_perout[index]; + + if (on) { + perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index); + if (perout_pin < 0) + return -EBUSY; + } else { + lan743x_ptp_io_perout_off(adapter, index); + return 0; + } + + if (event_ch >= LAN743X_PTP_N_EVENT_CHAN) { + /* already on, turn off first */ + lan743x_ptp_io_perout_off(adapter, index); + } + + event_ch = lan743x_ptp_reserve_event_ch(adapter, index); + if (event_ch < 0) { + netif_warn(adapter, drv, adapter->netdev, + "Failed to reserve event channel %d for PEROUT\n", + index); + goto failed; + } + ptp->ptp_io_perout[index] = event_ch; + + if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) { + pulse_sec = perout_request->on.sec; + pulse_sec += perout_request->on.nsec / 1000000000; + pulse_nsec = perout_request->on.nsec % 1000000000; + } else { + pulse_sec = perout_request->period.sec; + pulse_sec += perout_request->period.nsec / 1000000000; + pulse_nsec = perout_request->period.nsec % 1000000000; + } + + if (pulse_sec == 0) { + if (pulse_nsec >= 400000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; + } else if (pulse_nsec >= 200000000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_; + } else if (pulse_nsec >= 100000000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_; + } else if (pulse_nsec >= 20000000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_; + } else if (pulse_nsec >= 10000000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_; + } else if (pulse_nsec >= 2000000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_; + } else if (pulse_nsec >= 1000000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_; + } else if (pulse_nsec >= 200000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_; + } else if (pulse_nsec >= 100000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_; + } else if (pulse_nsec >= 20000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_; + } else if (pulse_nsec >= 10000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_; + } else if (pulse_nsec >= 2000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_; + } else if (pulse_nsec >= 1000) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_; + } else if (pulse_nsec >= 200) { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_; + } else { + netif_warn(adapter, drv, adapter->netdev, + "perout period too small, min is 200nS\n"); + goto failed; + } + } else { + pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; + } + + /* turn off by setting target far in future */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_SEC_X(event_ch), + 0xFFFF0000); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_NS_X(event_ch), 0); + + /* Configure to pulse every period */ + gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG); + gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(event_ch)); + gen_cfg |= HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_ + (event_ch, pulse_width); + gen_cfg |= HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch); + gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch)); + lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg); + + /* set the reload to one toggle cycle */ + period_sec = perout_request->period.sec; + period_sec += perout_request->period.nsec / 1000000000; + period_nsec = perout_request->period.nsec % 1000000000; + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_RELOAD_SEC_X(event_ch), + period_sec); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_RELOAD_NS_X(event_ch), + period_nsec); + + start_sec = perout_request->start.sec; + start_sec += perout_request->start.nsec / 1000000000; + start_nsec = perout_request->start.nsec % 1000000000; + + /* set the start time */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_SEC_X(event_ch), + start_sec); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_NS_X(event_ch), + start_nsec); + + /* Enable LTC Target Read */ + val = lan743x_csr_read(adapter, PTP_CMD_CTL); + val |= PTP_CMD_CTL_PTP_LTC_TARGET_READ_; + lan743x_csr_write(adapter, PTP_CMD_CTL, val); + + /* Configure as an push/pull driver */ + val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG); + val |= PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin); + lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val); + + /* Select Event output */ + val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG); + if (event_ch) + /* Channel B as the output */ + val |= PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin); + else + /* Channel A as the output */ + val &= ~PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin); + + /* Enables the output of Local Time Target compare events */ + val |= PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin); + lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val); + + return 0; + +failed: + lan743x_ptp_io_perout_off(adapter, index); + return -ENODEV; +} + +static void lan743x_ptp_io_extts_off(struct lan743x_adapter *adapter, + u32 index) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + struct lan743x_extts *extts; + int val; + + extts = &ptp->extts[index]; + /* PTP Interrupt Enable Clear Register */ + if (extts->flags & PTP_FALLING_EDGE) + val = PTP_INT_EN_FE_EN_CLR_(index); + else + val = PTP_INT_EN_RE_EN_CLR_(index); + lan743x_csr_write(adapter, PTP_INT_EN_CLR, val); + + /* Disables PTP-IO edge lock */ + val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG); + if (extts->flags & PTP_FALLING_EDGE) { + val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(index); + val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(index); + } else { + val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(index); + val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(index); + } + lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val); + + /* PTP-IO De-select register */ + val = lan743x_csr_read(adapter, PTP_IO_SEL); + val &= ~PTP_IO_SEL_MASK_; + lan743x_csr_write(adapter, PTP_IO_SEL, val); + + /* Clear timestamp */ + memset(&extts->ts, 0, sizeof(struct timespec64)); + extts->flags = 0; +} + +static int lan743x_ptp_io_event_cap_en(struct lan743x_adapter *adapter, + u32 flags, u32 channel) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + int val; + + if ((flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + mutex_lock(&ptp->command_lock); + /* PTP-IO Event Capture Enable */ + val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG); + if (flags & PTP_FALLING_EDGE) { + val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(channel); + val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel); + val |= PTP_IO_CAP_CONFIG_LOCK_FE_(channel); + val |= PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel); + } else { + /* Rising eventing as Default */ + val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(channel); + val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel); + val |= PTP_IO_CAP_CONFIG_LOCK_RE_(channel); + val |= PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel); + } + lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val); + + /* PTP-IO Select */ + val = lan743x_csr_read(adapter, PTP_IO_SEL); + val &= ~PTP_IO_SEL_MASK_; + val |= channel << PTP_IO_SEL_SHIFT_; + lan743x_csr_write(adapter, PTP_IO_SEL, val); + + /* PTP Interrupt Enable Register */ + if (flags & PTP_FALLING_EDGE) + val = PTP_INT_EN_FE_EN_SET_(channel); + else + val = PTP_INT_EN_RE_EN_SET_(channel); + lan743x_csr_write(adapter, PTP_INT_EN_SET, val); + + mutex_unlock(&ptp->command_lock); + + return 0; +} + +static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on, + struct ptp_extts_request *extts_request) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + u32 flags = extts_request->flags; + u32 index = extts_request->index; + struct lan743x_extts *extts; + int extts_pin; + int ret = 0; + + extts = &ptp->extts[index]; + + if (on) { + extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index); + if (extts_pin < 0) + return -EBUSY; + + ret = lan743x_ptp_io_event_cap_en(adapter, flags, index); + if (!ret) + extts->flags = flags; + } else { + lan743x_ptp_io_extts_off(adapter, index); + } + + return ret; +} + +static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci, + struct ptp_clock_request *request, int on) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + + if (request) { + switch (request->type) { + case PTP_CLK_REQ_EXTTS: + if (request->extts.index < ptpci->n_ext_ts) + return lan743x_ptp_io_extts(adapter, on, + &request->extts); + return -EINVAL; + case PTP_CLK_REQ_PEROUT: + if (request->perout.index < ptpci->n_per_out) { + if (adapter->is_pci11x1x) + return lan743x_ptp_io_perout(adapter, on, + &request->perout); + else + return lan743x_ptp_perout(adapter, on, + &request->perout); + } + return -EINVAL; + case PTP_CLK_REQ_PPS: + return -EINVAL; + default: + netif_err(adapter, drv, adapter->netdev, + "request->type == %d, Unknown\n", + request->type); + break; + } + } else { + netif_err(adapter, drv, adapter->netdev, "request == NULL\n"); + } + return 0; +} + +static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp, + unsigned int pin, + enum ptp_pin_function func, + unsigned int chan) +{ + struct lan743x_ptp *lan_ptp = + container_of(ptp, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(lan_ptp, struct lan743x_adapter, ptp); + int result = 0; + + /* Confirm the requested function is supported. Parameter + * validation is done by the caller. + */ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_PEROUT: + break; + case PTP_PF_EXTTS: + if (!adapter->is_pci11x1x) + result = -1; + break; + case PTP_PF_PHYSYNC: + default: + result = -1; + break; + } + return result; +} + +static void lan743x_ptp_io_event_clock_get(struct lan743x_adapter *adapter, + bool fe, u8 channel, + struct timespec64 *ts) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + struct lan743x_extts *extts; + u32 sec, nsec; + + mutex_lock(&ptp->command_lock); + if (fe) { + sec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_SEC_CAP_X); + nsec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_NS_CAP_X); + } else { + sec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_SEC_CAP_X); + nsec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_NS_CAP_X); + } + + mutex_unlock(&ptp->command_lock); + + /* Update Local timestamp */ + extts = &ptp->extts[channel]; + extts->ts.tv_sec = sec; + extts->ts.tv_nsec = nsec; + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + u32 cap_info, cause, header, nsec, seconds; + bool new_timestamp_available = false; + struct ptp_clock_event ptp_event; + struct timespec64 ts; + int ptp_int_sts; + int count = 0; + int channel; + s64 ns; + + ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS); + while ((count < 100) && ptp_int_sts) { + count++; + + if (ptp_int_sts & PTP_INT_BIT_TX_TS_) { + cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO); + + if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) { + seconds = lan743x_csr_read(adapter, + PTP_TX_EGRESS_SEC); + nsec = lan743x_csr_read(adapter, + PTP_TX_EGRESS_NS); + cause = (nsec & + PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_); + header = lan743x_csr_read(adapter, + PTP_TX_MSG_HEADER); + + if (cause == + PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) { + nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_; + lan743x_ptp_tx_ts_enqueue_ts(adapter, + seconds, + nsec, + header); + new_timestamp_available = true; + } else if (cause == + PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) { + netif_err(adapter, drv, adapter->netdev, + "Auto capture cause not supported\n"); + } else { + netif_warn(adapter, drv, adapter->netdev, + "unknown tx timestamp capture cause\n"); + } + } else { + netif_warn(adapter, drv, adapter->netdev, + "TX TS INT but no TX TS CNT\n"); + } + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_BIT_TX_TS_); + } + + if (ptp_int_sts & PTP_INT_IO_FE_MASK_) { + do { + channel = lan743x_get_channel((ptp_int_sts & + PTP_INT_IO_FE_MASK_) >> + PTP_INT_IO_FE_SHIFT_); + if (channel >= 0 && + channel < PCI11X1X_PTP_IO_MAX_CHANNELS) { + lan743x_ptp_io_event_clock_get(adapter, + true, + channel, + &ts); + /* PTP Falling Event post */ + ns = timespec64_to_ns(&ts); + ptp_event.timestamp = ns; + ptp_event.index = channel; + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_clock_event(ptp->ptp_clock, + &ptp_event); + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_IO_FE_SET_ + (channel)); + ptp_int_sts &= ~(1 << + (PTP_INT_IO_FE_SHIFT_ + + channel)); + } else { + /* Clear falling event interrupts */ + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_IO_FE_MASK_); + ptp_int_sts &= ~PTP_INT_IO_FE_MASK_; + } + } while (ptp_int_sts & PTP_INT_IO_FE_MASK_); + } + + if (ptp_int_sts & PTP_INT_IO_RE_MASK_) { + do { + channel = lan743x_get_channel((ptp_int_sts & + PTP_INT_IO_RE_MASK_) >> + PTP_INT_IO_RE_SHIFT_); + if (channel >= 0 && + channel < PCI11X1X_PTP_IO_MAX_CHANNELS) { + lan743x_ptp_io_event_clock_get(adapter, + false, + channel, + &ts); + /* PTP Rising Event post */ + ns = timespec64_to_ns(&ts); + ptp_event.timestamp = ns; + ptp_event.index = channel; + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_clock_event(ptp->ptp_clock, + &ptp_event); + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_IO_RE_SET_ + (channel)); + ptp_int_sts &= ~(1 << + (PTP_INT_IO_RE_SHIFT_ + + channel)); + } else { + /* Clear Rising event interrupt */ + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_IO_RE_MASK_); + ptp_int_sts &= ~PTP_INT_IO_RE_MASK_; + } + } while (ptp_int_sts & PTP_INT_IO_RE_MASK_); + } + + ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS); + } + + if (new_timestamp_available) + lan743x_ptp_tx_ts_complete(adapter); + + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_); + + return -1; +} + +static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter, + u32 *seconds, u32 *nano_seconds, + u32 *sub_nano_seconds) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_); + lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_); + + if (seconds) + (*seconds) = lan743x_csr_read(adapter, PTP_CLOCK_SEC); + + if (nano_seconds) + (*nano_seconds) = lan743x_csr_read(adapter, PTP_CLOCK_NS); + + if (sub_nano_seconds) + (*sub_nano_seconds) = + lan743x_csr_read(adapter, PTP_CLOCK_SUBNS); + + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter, + u32 *sec, u32 *nsec, u32 *sub_nsec) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_); + lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_); + + if (sec) + (*sec) = lan743x_csr_read(adapter, PTP_LTC_RD_SEC_LO); + + if (nsec) + (*nsec) = lan743x_csr_read(adapter, PTP_LTC_RD_NS); + + if (sub_nsec) + (*sub_nsec) = + lan743x_csr_read(adapter, PTP_LTC_RD_SUBNS); + + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter, + s64 time_step_ns) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + u32 nano_seconds_step = 0; + u64 abs_time_step_ns = 0; + u32 unsigned_seconds = 0; + u32 nano_seconds = 0; + u32 remainder = 0; + s32 seconds = 0; + + if (time_step_ns > 15000000000LL) { + /* convert to clock set */ + if (adapter->is_pci11x1x) + lan743x_ptp_io_clock_get(adapter, &unsigned_seconds, + &nano_seconds, NULL); + else + lan743x_ptp_clock_get(adapter, &unsigned_seconds, + &nano_seconds, NULL); + unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL, + &remainder); + nano_seconds += remainder; + if (nano_seconds >= 1000000000) { + unsigned_seconds++; + nano_seconds -= 1000000000; + } + lan743x_ptp_clock_set(adapter, unsigned_seconds, + nano_seconds, 0); + return; + } else if (time_step_ns < -15000000000LL) { + /* convert to clock set */ + time_step_ns = -time_step_ns; + + if (adapter->is_pci11x1x) { + lan743x_ptp_io_clock_get(adapter, &unsigned_seconds, + &nano_seconds, NULL); + } else { + lan743x_ptp_clock_get(adapter, &unsigned_seconds, + &nano_seconds, NULL); + } + unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL, + &remainder); + nano_seconds_step = remainder; + if (nano_seconds < nano_seconds_step) { + unsigned_seconds--; + nano_seconds += 1000000000; + } + nano_seconds -= nano_seconds_step; + lan743x_ptp_clock_set(adapter, unsigned_seconds, + nano_seconds, 0); + return; + } + + /* do clock step */ + if (time_step_ns >= 0) { + abs_time_step_ns = (u64)(time_step_ns); + seconds = (s32)div_u64_rem(abs_time_step_ns, 1000000000, + &remainder); + nano_seconds = (u32)remainder; + } else { + abs_time_step_ns = (u64)(-time_step_ns); + seconds = -((s32)div_u64_rem(abs_time_step_ns, 1000000000, + &remainder)); + nano_seconds = (u32)remainder; + if (nano_seconds > 0) { + /* subtracting nano seconds is not allowed + * convert to subtracting from seconds, + * and adding to nanoseconds + */ + seconds--; + nano_seconds = (1000000000 - nano_seconds); + } + } + + if (nano_seconds > 0) { + /* add 8 ns to cover the likely normal increment */ + nano_seconds += 8; + } + + if (nano_seconds >= 1000000000) { + /* carry into seconds */ + seconds++; + nano_seconds -= 1000000000; + } + + while (seconds) { + mutex_lock(&ptp->command_lock); + if (seconds > 0) { + u32 adjustment_value = (u32)seconds; + + if (adjustment_value > 0xF) + adjustment_value = 0xF; + lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ, + PTP_CLOCK_STEP_ADJ_DIR_ | + adjustment_value); + seconds -= ((s32)adjustment_value); + } else { + u32 adjustment_value = (u32)(-seconds); + + if (adjustment_value > 0xF) + adjustment_value = 0xF; + lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ, + adjustment_value); + seconds += ((s32)adjustment_value); + } + lan743x_csr_write(adapter, PTP_CMD_CTL, + PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_); + lan743x_ptp_wait_till_cmd_done(adapter, + PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_); + mutex_unlock(&ptp->command_lock); + } + if (nano_seconds) { + mutex_lock(&ptp->command_lock); + lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ, + PTP_CLOCK_STEP_ADJ_DIR_ | + (nano_seconds & + PTP_CLOCK_STEP_ADJ_VALUE_MASK_)); + lan743x_csr_write(adapter, PTP_CMD_CTL, + PTP_CMD_CTL_PTP_CLK_STP_NSEC_); + lan743x_ptp_wait_till_cmd_done(adapter, + PTP_CMD_CTL_PTP_CLK_STP_NSEC_); + mutex_unlock(&ptp->command_lock); + } +} + +void lan743x_ptp_isr(void *context) +{ + struct lan743x_adapter *adapter = (struct lan743x_adapter *)context; + struct lan743x_ptp *ptp = NULL; + int enable_flag = 1; + u32 ptp_int_sts = 0; + + ptp = &adapter->ptp; + + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_); + + ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS); + ptp_int_sts &= lan743x_csr_read(adapter, PTP_INT_EN_SET); + + if (ptp_int_sts & PTP_INT_BIT_TX_TS_) { + ptp_schedule_worker(ptp->ptp_clock, 0); + enable_flag = 0;/* tasklet will re-enable later */ + } + if (ptp_int_sts & PTP_INT_BIT_TX_SWTS_ERR_) { + netif_err(adapter, drv, adapter->netdev, + "PTP TX Software Timestamp Error\n"); + /* clear int status bit */ + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_BIT_TX_SWTS_ERR_); + } + if (ptp_int_sts & PTP_INT_BIT_TIMER_B_) { + /* clear int status bit */ + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_BIT_TIMER_B_); + } + if (ptp_int_sts & PTP_INT_BIT_TIMER_A_) { + /* clear int status bit */ + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_BIT_TIMER_A_); + } + + if (enable_flag) { + /* re-enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_); + } +} + +static void lan743x_ptp_tx_ts_enqueue_skb(struct lan743x_adapter *adapter, + struct sk_buff *skb, bool ignore_sync) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + spin_lock_bh(&ptp->tx_ts_lock); + if (ptp->tx_ts_skb_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) { + ptp->tx_ts_skb_queue[ptp->tx_ts_skb_queue_size] = skb; + if (ignore_sync) + ptp->tx_ts_ignore_sync_queue |= + BIT(ptp->tx_ts_skb_queue_size); + ptp->tx_ts_skb_queue_size++; + } else { + /* this should never happen, so long as the tx channel + * calls and honors the result from + * lan743x_ptp_request_tx_timestamp + */ + netif_err(adapter, drv, adapter->netdev, + "tx ts skb queue overflow\n"); + dev_kfree_skb(skb); + } + spin_unlock_bh(&ptp->tx_ts_lock); +} + +static void lan743x_ptp_sync_to_system_clock(struct lan743x_adapter *adapter) +{ + struct timespec64 ts; + + ktime_get_clocktai_ts64(&ts); + + lan743x_ptp_clock_set(adapter, ts.tv_sec, ts.tv_nsec, 0); +} + +void lan743x_ptp_update_latency(struct lan743x_adapter *adapter, + u32 link_speed) +{ + switch (link_speed) { + case 10: + lan743x_csr_write(adapter, PTP_LATENCY, + PTP_LATENCY_TX_SET_(0) | + PTP_LATENCY_RX_SET_(0)); + break; + case 100: + lan743x_csr_write(adapter, PTP_LATENCY, + PTP_LATENCY_TX_SET_(181) | + PTP_LATENCY_RX_SET_(594)); + break; + case 1000: + lan743x_csr_write(adapter, PTP_LATENCY, + PTP_LATENCY_TX_SET_(30) | + PTP_LATENCY_RX_SET_(525)); + break; + } +} + +int lan743x_ptp_init(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + int i; + + mutex_init(&ptp->command_lock); + spin_lock_init(&ptp->tx_ts_lock); + ptp->used_event_ch = 0; + + for (i = 0; i < LAN743X_PTP_N_EVENT_CHAN; i++) { + ptp->perout[i].event_ch = -1; + ptp->perout[i].gpio_pin = -1; + } + + lan743x_led_mux_save(adapter); + + return 0; +} + +int lan743x_ptp_open(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + int ret = -ENODEV; + u32 temp; + int i; + int n_pins; + + lan743x_ptp_reset(adapter); + lan743x_ptp_sync_to_system_clock(adapter); + temp = lan743x_csr_read(adapter, PTP_TX_MOD2); + temp |= PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_; + lan743x_csr_write(adapter, PTP_TX_MOD2, temp); + lan743x_ptp_enable(adapter); + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_); + lan743x_csr_write(adapter, PTP_INT_EN_SET, + PTP_INT_BIT_TX_SWTS_ERR_ | PTP_INT_BIT_TX_TS_); + ptp->flags |= PTP_FLAG_ISR_ENABLED; + + if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK)) + return 0; + + switch (adapter->csr.id_rev & ID_REV_ID_MASK_) { + case ID_REV_ID_LAN7430_: + n_pins = LAN7430_N_GPIO; + break; + case ID_REV_ID_LAN7431_: + case ID_REV_ID_A011_: + case ID_REV_ID_A041_: + n_pins = LAN7431_N_GPIO; + break; + default: + netif_warn(adapter, drv, adapter->netdev, + "Unknown LAN743x (%08x). Assuming no GPIO\n", + adapter->csr.id_rev); + n_pins = 0; + break; + } + + if (n_pins > LAN743X_PTP_N_GPIO) + n_pins = LAN743X_PTP_N_GPIO; + + for (i = 0; i < n_pins; i++) { + struct ptp_pin_desc *ptp_pin = &ptp->pin_config[i]; + + snprintf(ptp_pin->name, + sizeof(ptp_pin->name), "lan743x_ptp_pin_%02d", i); + ptp_pin->index = i; + ptp_pin->func = PTP_PF_NONE; + } + + ptp->ptp_clock_info.owner = THIS_MODULE; + snprintf(ptp->ptp_clock_info.name, 16, "%pm", + adapter->netdev->dev_addr); + ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB; + ptp->ptp_clock_info.n_alarm = 0; + ptp->ptp_clock_info.n_ext_ts = LAN743X_PTP_N_EXTTS; + ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN; + ptp->ptp_clock_info.n_pins = n_pins; + ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS; + ptp->ptp_clock_info.pin_config = ptp->pin_config; + ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine; + ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq; + ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime; + ptp->ptp_clock_info.gettime64 = lan743x_ptpci_gettime64; + ptp->ptp_clock_info.getcrosststamp = NULL; + ptp->ptp_clock_info.settime64 = lan743x_ptpci_settime64; + ptp->ptp_clock_info.enable = lan743x_ptpci_enable; + ptp->ptp_clock_info.do_aux_work = lan743x_ptpci_do_aux_work; + ptp->ptp_clock_info.verify = lan743x_ptpci_verify_pin_config; + + ptp->ptp_clock = ptp_clock_register(&ptp->ptp_clock_info, + &adapter->pdev->dev); + + if (IS_ERR(ptp->ptp_clock)) { + netif_err(adapter, ifup, adapter->netdev, + "ptp_clock_register failed\n"); + goto done; + } + ptp->flags |= PTP_FLAG_PTP_CLOCK_REGISTERED; + netif_info(adapter, ifup, adapter->netdev, + "successfully registered ptp clock\n"); + + return 0; +done: + lan743x_ptp_close(adapter); + return ret; +} + +void lan743x_ptp_close(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + int index; + + if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) && + (ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED)) { + ptp_clock_unregister(ptp->ptp_clock); + ptp->ptp_clock = NULL; + ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED; + netif_info(adapter, drv, adapter->netdev, + "ptp clock unregister\n"); + } + + if (ptp->flags & PTP_FLAG_ISR_ENABLED) { + lan743x_csr_write(adapter, PTP_INT_EN_CLR, + PTP_INT_BIT_TX_SWTS_ERR_ | + PTP_INT_BIT_TX_TS_); + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_); + ptp->flags &= ~PTP_FLAG_ISR_ENABLED; + } + + /* clean up pending timestamp requests */ + lan743x_ptp_tx_ts_complete(adapter); + spin_lock_bh(&ptp->tx_ts_lock); + for (index = 0; + index < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS; + index++) { + struct sk_buff *skb = ptp->tx_ts_skb_queue[index]; + + dev_kfree_skb(skb); + ptp->tx_ts_skb_queue[index] = NULL; + ptp->tx_ts_seconds_queue[index] = 0; + ptp->tx_ts_nseconds_queue[index] = 0; + } + ptp->tx_ts_skb_queue_size = 0; + ptp->tx_ts_queue_size = 0; + ptp->pending_tx_timestamps = 0; + spin_unlock_bh(&ptp->tx_ts_lock); + + lan743x_led_mux_restore(adapter); + + lan743x_ptp_disable(adapter); +} + +static void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter, + bool ts_insert_enable) +{ + u32 ptp_tx_mod = lan743x_csr_read(adapter, PTP_TX_MOD); + + if (ts_insert_enable) + ptp_tx_mod |= PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_; + else + ptp_tx_mod &= ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_; + + lan743x_csr_write(adapter, PTP_TX_MOD, ptp_tx_mod); +} + +static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter) +{ + if (lan743x_csr_read(adapter, PTP_CMD_CTL) & PTP_CMD_CTL_PTP_ENABLE_) + return true; + return false; +} + +static void lan743x_ptp_enable(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + + if (lan743x_ptp_is_enabled(adapter)) { + netif_warn(adapter, drv, adapter->netdev, + "PTP already enabled\n"); + goto done; + } + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_); +done: + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_disable(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + if (!lan743x_ptp_is_enabled(adapter)) { + netif_warn(adapter, drv, adapter->netdev, + "PTP already disabled\n"); + goto done; + } + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_DISABLE_); + lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_ENABLE_); +done: + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_reset(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + + if (lan743x_ptp_is_enabled(adapter)) { + netif_err(adapter, drv, adapter->netdev, + "Attempting reset while enabled\n"); + goto done; + } + + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_RESET_); + lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_RESET_); +done: + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter, + u32 seconds, u32 nano_seconds, + u32 sub_nano_seconds) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + + lan743x_csr_write(adapter, PTP_CLOCK_SEC, seconds); + lan743x_csr_write(adapter, PTP_CLOCK_NS, nano_seconds); + lan743x_csr_write(adapter, PTP_CLOCK_SUBNS, sub_nano_seconds); + + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_); + lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_LOAD_); + mutex_unlock(&ptp->command_lock); +} + +bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + bool result = false; + + spin_lock_bh(&ptp->tx_ts_lock); + if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) { + /* request granted */ + ptp->pending_tx_timestamps++; + result = true; + } + spin_unlock_bh(&ptp->tx_ts_lock); + return result; +} + +void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + spin_lock_bh(&ptp->tx_ts_lock); + if (ptp->pending_tx_timestamps > 0) + ptp->pending_tx_timestamps--; + else + netif_err(adapter, drv, adapter->netdev, + "unrequest failed, pending_tx_timestamps==0\n"); + spin_unlock_bh(&ptp->tx_ts_lock); +} + +void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter, + struct sk_buff *skb, bool ignore_sync) +{ + lan743x_ptp_tx_ts_enqueue_skb(adapter, skb, ignore_sync); + + lan743x_ptp_tx_ts_complete(adapter); +} + +int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int ret = 0; + int index; + + if (!ifr) { + netif_err(adapter, drv, adapter->netdev, + "SIOCSHWTSTAMP, ifr == NULL\n"); + return -EINVAL; + } + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + for (index = 0; index < adapter->used_tx_channels; + index++) + lan743x_tx_set_timestamping_mode(&adapter->tx[index], + false, false); + lan743x_ptp_set_sync_ts_insert(adapter, false); + break; + case HWTSTAMP_TX_ON: + for (index = 0; index < adapter->used_tx_channels; + index++) + lan743x_tx_set_timestamping_mode(&adapter->tx[index], + true, false); + lan743x_ptp_set_sync_ts_insert(adapter, false); + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + for (index = 0; index < adapter->used_tx_channels; + index++) + lan743x_tx_set_timestamping_mode(&adapter->tx[index], + true, true); + + lan743x_ptp_set_sync_ts_insert(adapter, true); + break; + case HWTSTAMP_TX_ONESTEP_P2P: + ret = -ERANGE; + break; + default: + netif_warn(adapter, drv, adapter->netdev, + " tx_type = %d, UNKNOWN\n", config.tx_type); + ret = -EINVAL; + break; + } + + if (!ret) + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; + return ret; +} diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h new file mode 100644 index 000000000..e26d4eff7 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_ptp.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#ifndef _LAN743X_PTP_H +#define _LAN743X_PTP_H + +#include "linux/ptp_clock_kernel.h" +#include "linux/netdevice.h" + +#define LAN7430_N_LED 4 +#define LAN7430_N_GPIO 4 /* multiplexed with PHY LEDs */ +#define LAN7431_N_GPIO 12 + +#define LAN743X_PTP_N_GPIO LAN7431_N_GPIO + +/* the number of periodic outputs is limited by number of + * PTP clock event channels + */ +#define LAN743X_PTP_N_EVENT_CHAN 2 +#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN +#define LAN743X_PTP_N_EXTTS 4 +#define LAN743X_PTP_N_PPS 0 +#define PCI11X1X_PTP_IO_MAX_CHANNELS 8 + +struct lan743x_adapter; + +/* GPIO */ +struct lan743x_gpio { + /* gpio_lock: used to prevent concurrent access to gpio settings */ + spinlock_t gpio_lock; + + int used_bits; + int output_bits; + int ptp_bits; + u32 gpio_cfg0; + u32 gpio_cfg1; + u32 gpio_cfg2; + u32 gpio_cfg3; +}; + +int lan743x_gpio_init(struct lan743x_adapter *adapter); + +void lan743x_ptp_isr(void *context); +bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter); +void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter); +void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter, + struct sk_buff *skb, bool ignore_sync); +int lan743x_ptp_init(struct lan743x_adapter *adapter); +int lan743x_ptp_open(struct lan743x_adapter *adapter); +void lan743x_ptp_close(struct lan743x_adapter *adapter); +void lan743x_ptp_update_latency(struct lan743x_adapter *adapter, + u32 link_speed); + +int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); + +#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4) + +#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1) +#define PTP_FLAG_ISR_ENABLED BIT(2) + +struct lan743x_ptp_perout { + int event_ch; /* PTP event channel (0=channel A, 1=channel B) */ + int gpio_pin; /* GPIO pin where output appears */ +}; + +struct lan743x_extts { + int flags; + struct timespec64 ts; +}; + +struct lan743x_ptp { + int flags; + + /* command_lock: used to prevent concurrent ptp commands */ + struct mutex command_lock; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct ptp_pin_desc pin_config[LAN743X_PTP_N_GPIO]; + + unsigned long used_event_ch; + struct lan743x_ptp_perout perout[LAN743X_PTP_N_PEROUT]; + int ptp_io_perout[LAN743X_PTP_N_PEROUT]; /* PTP event channel (0=channel A, 1=channel B) */ + struct lan743x_extts extts[LAN743X_PTP_N_EXTTS]; + + bool leds_multiplexed; + bool led_enabled[LAN7430_N_LED]; + + /* tx_ts_lock: used to prevent concurrent access to timestamp arrays */ + spinlock_t tx_ts_lock; + int pending_tx_timestamps; + struct sk_buff *tx_ts_skb_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS]; + unsigned int tx_ts_ignore_sync_queue; + int tx_ts_skb_queue_size; + u32 tx_ts_seconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS]; + u32 tx_ts_nseconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS]; + u32 tx_ts_header_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS]; + int tx_ts_queue_size; +}; + +#endif /* _LAN743X_PTP_H */ diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig new file mode 100644 index 000000000..49e1464a4 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/Kconfig @@ -0,0 +1,11 @@ +config LAN966X_SWITCH + tristate "Lan966x switch driver" + depends on PTP_1588_CLOCK_OPTIONAL + depends on HAS_IOMEM + depends on OF + depends on NET_SWITCHDEV + depends on BRIDGE || BRIDGE=n + select PHYLINK + select PACKING + help + This driver supports the Lan966x network switch device. diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile new file mode 100644 index 000000000..962f7c5f9 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Microchip Lan966x network device drivers. +# + +obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o + +lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \ + lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \ + lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \ + lan966x_ptp.o lan966x_fdma.o lan966x_lag.o \ + lan966x_tc.o lan966x_mqprio.o lan966x_taprio.o \ + lan966x_tbf.o lan966x_cbs.o lan966x_ets.o \ + lan966x_tc_matchall.o lan966x_police.o lan966x_mirror.o diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c new file mode 100644 index 000000000..70cbbf8d2 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +int lan966x_cbs_add(struct lan966x_port *port, + struct tc_cbs_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + u32 cir, cbs; + u8 se_idx; + + /* Check for invalid values */ + if (qopt->idleslope <= 0 || + qopt->sendslope >= 0 || + qopt->locredit >= qopt->hicredit) + return -EINVAL; + + se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue; + cir = qopt->idleslope; + cbs = (qopt->idleslope - qopt->sendslope) * + (qopt->hicredit - qopt->locredit) / + -qopt->sendslope; + + /* Rate unit is 100 kbps */ + cir = DIV_ROUND_UP(cir, 100); + /* Avoid using zero rate */ + cir = cir ?: 1; + /* Burst unit is 4kB */ + cbs = DIV_ROUND_UP(cbs, 4096); + /* Avoid using zero burst */ + cbs = cbs ?: 1; + + /* Check that actually the result can be written */ + if (cir > GENMASK(15, 0) || + cbs > GENMASK(6, 0)) + return -EINVAL; + + lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) | + QSYS_SE_CFG_SE_FRM_MODE_SET(1), + QSYS_SE_CFG_SE_AVB_ENA | + QSYS_SE_CFG_SE_FRM_MODE, + lan966x, QSYS_SE_CFG(se_idx)); + + lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) | + QSYS_CIR_CFG_CIR_BURST_SET(cbs), + lan966x, QSYS_CIR_CFG(se_idx)); + + return 0; +} + +int lan966x_cbs_del(struct lan966x_port *port, + struct tc_cbs_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + u8 se_idx; + + se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue; + + lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) | + QSYS_SE_CFG_SE_FRM_MODE_SET(0), + QSYS_SE_CFG_SE_AVB_ENA | + QSYS_SE_CFG_SE_FRM_MODE, + lan966x, QSYS_SE_CFG(se_idx)); + + lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) | + QSYS_CIR_CFG_CIR_BURST_SET(0), + lan966x, QSYS_CIR_CFG(se_idx)); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c new file mode 100644 index 000000000..06811c60d --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c @@ -0,0 +1,727 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/netdevice.h> + +#include "lan966x_main.h" + +/* Number of traffic classes */ +#define LAN966X_NUM_TC 8 +#define LAN966X_STATS_CHECK_DELAY (2 * HZ) + +static const struct lan966x_stat_layout lan966x_stats_layout[] = { + { .name = "rx_octets", .offset = 0x00, }, + { .name = "rx_unicast", .offset = 0x01, }, + { .name = "rx_multicast", .offset = 0x02 }, + { .name = "rx_broadcast", .offset = 0x03 }, + { .name = "rx_short", .offset = 0x04 }, + { .name = "rx_frag", .offset = 0x05 }, + { .name = "rx_jabber", .offset = 0x06 }, + { .name = "rx_crc", .offset = 0x07 }, + { .name = "rx_symbol_err", .offset = 0x08 }, + { .name = "rx_sz_64", .offset = 0x09 }, + { .name = "rx_sz_65_127", .offset = 0x0a}, + { .name = "rx_sz_128_255", .offset = 0x0b}, + { .name = "rx_sz_256_511", .offset = 0x0c }, + { .name = "rx_sz_512_1023", .offset = 0x0d }, + { .name = "rx_sz_1024_1526", .offset = 0x0e }, + { .name = "rx_sz_jumbo", .offset = 0x0f }, + { .name = "rx_pause", .offset = 0x10 }, + { .name = "rx_control", .offset = 0x11 }, + { .name = "rx_long", .offset = 0x12 }, + { .name = "rx_cat_drop", .offset = 0x13 }, + { .name = "rx_red_prio_0", .offset = 0x14 }, + { .name = "rx_red_prio_1", .offset = 0x15 }, + { .name = "rx_red_prio_2", .offset = 0x16 }, + { .name = "rx_red_prio_3", .offset = 0x17 }, + { .name = "rx_red_prio_4", .offset = 0x18 }, + { .name = "rx_red_prio_5", .offset = 0x19 }, + { .name = "rx_red_prio_6", .offset = 0x1a }, + { .name = "rx_red_prio_7", .offset = 0x1b }, + { .name = "rx_yellow_prio_0", .offset = 0x1c }, + { .name = "rx_yellow_prio_1", .offset = 0x1d }, + { .name = "rx_yellow_prio_2", .offset = 0x1e }, + { .name = "rx_yellow_prio_3", .offset = 0x1f }, + { .name = "rx_yellow_prio_4", .offset = 0x20 }, + { .name = "rx_yellow_prio_5", .offset = 0x21 }, + { .name = "rx_yellow_prio_6", .offset = 0x22 }, + { .name = "rx_yellow_prio_7", .offset = 0x23 }, + { .name = "rx_green_prio_0", .offset = 0x24 }, + { .name = "rx_green_prio_1", .offset = 0x25 }, + { .name = "rx_green_prio_2", .offset = 0x26 }, + { .name = "rx_green_prio_3", .offset = 0x27 }, + { .name = "rx_green_prio_4", .offset = 0x28 }, + { .name = "rx_green_prio_5", .offset = 0x29 }, + { .name = "rx_green_prio_6", .offset = 0x2a }, + { .name = "rx_green_prio_7", .offset = 0x2b }, + { .name = "rx_assembly_err", .offset = 0x2c }, + { .name = "rx_smd_err", .offset = 0x2d }, + { .name = "rx_assembly_ok", .offset = 0x2e }, + { .name = "rx_merge_frag", .offset = 0x2f }, + { .name = "rx_pmac_octets", .offset = 0x30, }, + { .name = "rx_pmac_unicast", .offset = 0x31, }, + { .name = "rx_pmac_multicast", .offset = 0x32 }, + { .name = "rx_pmac_broadcast", .offset = 0x33 }, + { .name = "rx_pmac_short", .offset = 0x34 }, + { .name = "rx_pmac_frag", .offset = 0x35 }, + { .name = "rx_pmac_jabber", .offset = 0x36 }, + { .name = "rx_pmac_crc", .offset = 0x37 }, + { .name = "rx_pmac_symbol_err", .offset = 0x38 }, + { .name = "rx_pmac_sz_64", .offset = 0x39 }, + { .name = "rx_pmac_sz_65_127", .offset = 0x3a }, + { .name = "rx_pmac_sz_128_255", .offset = 0x3b }, + { .name = "rx_pmac_sz_256_511", .offset = 0x3c }, + { .name = "rx_pmac_sz_512_1023", .offset = 0x3d }, + { .name = "rx_pmac_sz_1024_1526", .offset = 0x3e }, + { .name = "rx_pmac_sz_jumbo", .offset = 0x3f }, + { .name = "rx_pmac_pause", .offset = 0x40 }, + { .name = "rx_pmac_control", .offset = 0x41 }, + { .name = "rx_pmac_long", .offset = 0x42 }, + + { .name = "tx_octets", .offset = 0x80, }, + { .name = "tx_unicast", .offset = 0x81, }, + { .name = "tx_multicast", .offset = 0x82 }, + { .name = "tx_broadcast", .offset = 0x83 }, + { .name = "tx_col", .offset = 0x84 }, + { .name = "tx_drop", .offset = 0x85 }, + { .name = "tx_pause", .offset = 0x86 }, + { .name = "tx_sz_64", .offset = 0x87 }, + { .name = "tx_sz_65_127", .offset = 0x88 }, + { .name = "tx_sz_128_255", .offset = 0x89 }, + { .name = "tx_sz_256_511", .offset = 0x8a }, + { .name = "tx_sz_512_1023", .offset = 0x8b }, + { .name = "tx_sz_1024_1526", .offset = 0x8c }, + { .name = "tx_sz_jumbo", .offset = 0x8d }, + { .name = "tx_yellow_prio_0", .offset = 0x8e }, + { .name = "tx_yellow_prio_1", .offset = 0x8f }, + { .name = "tx_yellow_prio_2", .offset = 0x90 }, + { .name = "tx_yellow_prio_3", .offset = 0x91 }, + { .name = "tx_yellow_prio_4", .offset = 0x92 }, + { .name = "tx_yellow_prio_5", .offset = 0x93 }, + { .name = "tx_yellow_prio_6", .offset = 0x94 }, + { .name = "tx_yellow_prio_7", .offset = 0x95 }, + { .name = "tx_green_prio_0", .offset = 0x96 }, + { .name = "tx_green_prio_1", .offset = 0x97 }, + { .name = "tx_green_prio_2", .offset = 0x98 }, + { .name = "tx_green_prio_3", .offset = 0x99 }, + { .name = "tx_green_prio_4", .offset = 0x9a }, + { .name = "tx_green_prio_5", .offset = 0x9b }, + { .name = "tx_green_prio_6", .offset = 0x9c }, + { .name = "tx_green_prio_7", .offset = 0x9d }, + { .name = "tx_aged", .offset = 0x9e }, + { .name = "tx_llct", .offset = 0x9f }, + { .name = "tx_ct", .offset = 0xa0 }, + { .name = "tx_mm_hold", .offset = 0xa1 }, + { .name = "tx_merge_frag", .offset = 0xa2 }, + { .name = "tx_pmac_octets", .offset = 0xa3, }, + { .name = "tx_pmac_unicast", .offset = 0xa4, }, + { .name = "tx_pmac_multicast", .offset = 0xa5 }, + { .name = "tx_pmac_broadcast", .offset = 0xa6 }, + { .name = "tx_pmac_pause", .offset = 0xa7 }, + { .name = "tx_pmac_sz_64", .offset = 0xa8 }, + { .name = "tx_pmac_sz_65_127", .offset = 0xa9 }, + { .name = "tx_pmac_sz_128_255", .offset = 0xaa }, + { .name = "tx_pmac_sz_256_511", .offset = 0xab }, + { .name = "tx_pmac_sz_512_1023", .offset = 0xac }, + { .name = "tx_pmac_sz_1024_1526", .offset = 0xad }, + { .name = "tx_pmac_sz_jumbo", .offset = 0xae }, + + { .name = "dr_local", .offset = 0x100 }, + { .name = "dr_tail", .offset = 0x101 }, + { .name = "dr_yellow_prio_0", .offset = 0x102 }, + { .name = "dr_yellow_prio_1", .offset = 0x103 }, + { .name = "dr_yellow_prio_2", .offset = 0x104 }, + { .name = "dr_yellow_prio_3", .offset = 0x105 }, + { .name = "dr_yellow_prio_4", .offset = 0x106 }, + { .name = "dr_yellow_prio_5", .offset = 0x107 }, + { .name = "dr_yellow_prio_6", .offset = 0x108 }, + { .name = "dr_yellow_prio_7", .offset = 0x109 }, + { .name = "dr_green_prio_0", .offset = 0x10a }, + { .name = "dr_green_prio_1", .offset = 0x10b }, + { .name = "dr_green_prio_2", .offset = 0x10c }, + { .name = "dr_green_prio_3", .offset = 0x10d }, + { .name = "dr_green_prio_4", .offset = 0x10e }, + { .name = "dr_green_prio_5", .offset = 0x10f }, + { .name = "dr_green_prio_6", .offset = 0x110 }, + { .name = "dr_green_prio_7", .offset = 0x111 }, +}; + +/* The following numbers are indexes into lan966x_stats_layout[] */ +#define SYS_COUNT_RX_OCT 0 +#define SYS_COUNT_RX_UC 1 +#define SYS_COUNT_RX_MC 2 +#define SYS_COUNT_RX_BC 3 +#define SYS_COUNT_RX_SHORT 4 +#define SYS_COUNT_RX_FRAG 5 +#define SYS_COUNT_RX_JABBER 6 +#define SYS_COUNT_RX_CRC 7 +#define SYS_COUNT_RX_SYMBOL_ERR 8 +#define SYS_COUNT_RX_SZ_64 9 +#define SYS_COUNT_RX_SZ_65_127 10 +#define SYS_COUNT_RX_SZ_128_255 11 +#define SYS_COUNT_RX_SZ_256_511 12 +#define SYS_COUNT_RX_SZ_512_1023 13 +#define SYS_COUNT_RX_SZ_1024_1526 14 +#define SYS_COUNT_RX_SZ_JUMBO 15 +#define SYS_COUNT_RX_PAUSE 16 +#define SYS_COUNT_RX_CONTROL 17 +#define SYS_COUNT_RX_LONG 18 +#define SYS_COUNT_RX_CAT_DROP 19 +#define SYS_COUNT_RX_RED_PRIO_0 20 +#define SYS_COUNT_RX_RED_PRIO_1 21 +#define SYS_COUNT_RX_RED_PRIO_2 22 +#define SYS_COUNT_RX_RED_PRIO_3 23 +#define SYS_COUNT_RX_RED_PRIO_4 24 +#define SYS_COUNT_RX_RED_PRIO_5 25 +#define SYS_COUNT_RX_RED_PRIO_6 26 +#define SYS_COUNT_RX_RED_PRIO_7 27 +#define SYS_COUNT_RX_YELLOW_PRIO_0 28 +#define SYS_COUNT_RX_YELLOW_PRIO_1 29 +#define SYS_COUNT_RX_YELLOW_PRIO_2 30 +#define SYS_COUNT_RX_YELLOW_PRIO_3 31 +#define SYS_COUNT_RX_YELLOW_PRIO_4 32 +#define SYS_COUNT_RX_YELLOW_PRIO_5 33 +#define SYS_COUNT_RX_YELLOW_PRIO_6 34 +#define SYS_COUNT_RX_YELLOW_PRIO_7 35 +#define SYS_COUNT_RX_GREEN_PRIO_0 36 +#define SYS_COUNT_RX_GREEN_PRIO_1 37 +#define SYS_COUNT_RX_GREEN_PRIO_2 38 +#define SYS_COUNT_RX_GREEN_PRIO_3 39 +#define SYS_COUNT_RX_GREEN_PRIO_4 40 +#define SYS_COUNT_RX_GREEN_PRIO_5 41 +#define SYS_COUNT_RX_GREEN_PRIO_6 42 +#define SYS_COUNT_RX_GREEN_PRIO_7 43 +#define SYS_COUNT_RX_ASSEMBLY_ERR 44 +#define SYS_COUNT_RX_SMD_ERR 45 +#define SYS_COUNT_RX_ASSEMBLY_OK 46 +#define SYS_COUNT_RX_MERGE_FRAG 47 +#define SYS_COUNT_RX_PMAC_OCT 48 +#define SYS_COUNT_RX_PMAC_UC 49 +#define SYS_COUNT_RX_PMAC_MC 50 +#define SYS_COUNT_RX_PMAC_BC 51 +#define SYS_COUNT_RX_PMAC_SHORT 52 +#define SYS_COUNT_RX_PMAC_FRAG 53 +#define SYS_COUNT_RX_PMAC_JABBER 54 +#define SYS_COUNT_RX_PMAC_CRC 55 +#define SYS_COUNT_RX_PMAC_SYMBOL_ERR 56 +#define SYS_COUNT_RX_PMAC_SZ_64 57 +#define SYS_COUNT_RX_PMAC_SZ_65_127 58 +#define SYS_COUNT_RX_PMAC_SZ_128_255 59 +#define SYS_COUNT_RX_PMAC_SZ_256_511 60 +#define SYS_COUNT_RX_PMAC_SZ_512_1023 61 +#define SYS_COUNT_RX_PMAC_SZ_1024_1526 62 +#define SYS_COUNT_RX_PMAC_SZ_JUMBO 63 +#define SYS_COUNT_RX_PMAC_PAUSE 64 +#define SYS_COUNT_RX_PMAC_CONTROL 65 +#define SYS_COUNT_RX_PMAC_LONG 66 + +#define SYS_COUNT_TX_OCT 67 +#define SYS_COUNT_TX_UC 68 +#define SYS_COUNT_TX_MC 69 +#define SYS_COUNT_TX_BC 70 +#define SYS_COUNT_TX_COL 71 +#define SYS_COUNT_TX_DROP 72 +#define SYS_COUNT_TX_PAUSE 73 +#define SYS_COUNT_TX_SZ_64 74 +#define SYS_COUNT_TX_SZ_65_127 75 +#define SYS_COUNT_TX_SZ_128_255 76 +#define SYS_COUNT_TX_SZ_256_511 77 +#define SYS_COUNT_TX_SZ_512_1023 78 +#define SYS_COUNT_TX_SZ_1024_1526 79 +#define SYS_COUNT_TX_SZ_JUMBO 80 +#define SYS_COUNT_TX_YELLOW_PRIO_0 81 +#define SYS_COUNT_TX_YELLOW_PRIO_1 82 +#define SYS_COUNT_TX_YELLOW_PRIO_2 83 +#define SYS_COUNT_TX_YELLOW_PRIO_3 84 +#define SYS_COUNT_TX_YELLOW_PRIO_4 85 +#define SYS_COUNT_TX_YELLOW_PRIO_5 86 +#define SYS_COUNT_TX_YELLOW_PRIO_6 87 +#define SYS_COUNT_TX_YELLOW_PRIO_7 88 +#define SYS_COUNT_TX_GREEN_PRIO_0 89 +#define SYS_COUNT_TX_GREEN_PRIO_1 90 +#define SYS_COUNT_TX_GREEN_PRIO_2 91 +#define SYS_COUNT_TX_GREEN_PRIO_3 92 +#define SYS_COUNT_TX_GREEN_PRIO_4 93 +#define SYS_COUNT_TX_GREEN_PRIO_5 94 +#define SYS_COUNT_TX_GREEN_PRIO_6 95 +#define SYS_COUNT_TX_GREEN_PRIO_7 96 +#define SYS_COUNT_TX_AGED 97 +#define SYS_COUNT_TX_LLCT 98 +#define SYS_COUNT_TX_CT 99 +#define SYS_COUNT_TX_MM_HOLD 100 +#define SYS_COUNT_TX_MERGE_FRAG 101 +#define SYS_COUNT_TX_PMAC_OCT 102 +#define SYS_COUNT_TX_PMAC_UC 103 +#define SYS_COUNT_TX_PMAC_MC 104 +#define SYS_COUNT_TX_PMAC_BC 105 +#define SYS_COUNT_TX_PMAC_PAUSE 106 +#define SYS_COUNT_TX_PMAC_SZ_64 107 +#define SYS_COUNT_TX_PMAC_SZ_65_127 108 +#define SYS_COUNT_TX_PMAC_SZ_128_255 109 +#define SYS_COUNT_TX_PMAC_SZ_256_511 110 +#define SYS_COUNT_TX_PMAC_SZ_512_1023 111 +#define SYS_COUNT_TX_PMAC_SZ_1024_1526 112 +#define SYS_COUNT_TX_PMAC_SZ_JUMBO 113 + +#define SYS_COUNT_DR_LOCAL 114 +#define SYS_COUNT_DR_TAIL 115 +#define SYS_COUNT_DR_YELLOW_PRIO_0 116 +#define SYS_COUNT_DR_YELLOW_PRIO_1 117 +#define SYS_COUNT_DR_YELLOW_PRIO_2 118 +#define SYS_COUNT_DR_YELLOW_PRIO_3 119 +#define SYS_COUNT_DR_YELLOW_PRIO_4 120 +#define SYS_COUNT_DR_YELLOW_PRIO_5 121 +#define SYS_COUNT_DR_YELLOW_PRIO_6 122 +#define SYS_COUNT_DR_YELLOW_PRIO_7 123 +#define SYS_COUNT_DR_GREEN_PRIO_0 124 +#define SYS_COUNT_DR_GREEN_PRIO_1 125 +#define SYS_COUNT_DR_GREEN_PRIO_2 126 +#define SYS_COUNT_DR_GREEN_PRIO_3 127 +#define SYS_COUNT_DR_GREEN_PRIO_4 128 +#define SYS_COUNT_DR_GREEN_PRIO_5 129 +#define SYS_COUNT_DR_GREEN_PRIO_6 130 +#define SYS_COUNT_DR_GREEN_PRIO_7 131 + +/* Add a possibly wrapping 32 bit value to a 64 bit counter */ +static void lan966x_add_cnt(u64 *cnt, u32 val) +{ + if (val < (*cnt & U32_MAX)) + *cnt += (u64)1 << 32; /* value has wrapped */ + + *cnt = (*cnt & ~(u64)U32_MAX) + val; +} + +static void lan966x_stats_update(struct lan966x *lan966x) +{ + int i, j; + + mutex_lock(&lan966x->stats_lock); + + for (i = 0; i < lan966x->num_phys_ports; i++) { + uint idx = i * lan966x->num_stats; + + lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(i), + lan966x, SYS_STAT_CFG); + + for (j = 0; j < lan966x->num_stats; j++) { + u32 offset = lan966x->stats_layout[j].offset; + + lan966x_add_cnt(&lan966x->stats[idx++], + lan_rd(lan966x, SYS_CNT(offset))); + } + } + + mutex_unlock(&lan966x->stats_lock); +} + +static int lan966x_get_sset_count(struct net_device *dev, int sset) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + return lan966x->num_stats; +} + +static void lan966x_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct lan966x_port *port = netdev_priv(netdev); + struct lan966x *lan966x = port->lan966x; + int i; + + if (sset != ETH_SS_STATS) + return; + + for (i = 0; i < lan966x->num_stats; i++) + memcpy(data + i * ETH_GSTRING_LEN, + lan966x->stats_layout[i].name, ETH_GSTRING_LEN); +} + +static void lan966x_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int i; + + /* check and update now */ + lan966x_stats_update(lan966x); + + /* Copy all counters */ + for (i = 0; i < lan966x->num_stats; i++) + *data++ = lan966x->stats[port->chip_port * + lan966x->num_stats + i]; +} + +static void lan966x_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 idx; + + lan966x_stats_update(lan966x); + + idx = port->chip_port * lan966x->num_stats; + + mutex_lock(&lan966x->stats_lock); + + mac_stats->FramesTransmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_UC] + + lan966x->stats[idx + SYS_COUNT_TX_MC] + + lan966x->stats[idx + SYS_COUNT_TX_BC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_UC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC]; + mac_stats->SingleCollisionFrames = + lan966x->stats[idx + SYS_COUNT_TX_COL]; + mac_stats->MultipleCollisionFrames = 0; + mac_stats->FramesReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_UC] + + lan966x->stats[idx + SYS_COUNT_RX_MC] + + lan966x->stats[idx + SYS_COUNT_RX_BC]; + mac_stats->FrameCheckSequenceErrors = + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_CRC]; + mac_stats->AlignmentErrors = 0; + mac_stats->OctetsTransmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_OCT] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT]; + mac_stats->FramesWithDeferredXmissions = + lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD]; + mac_stats->LateCollisions = 0; + mac_stats->FramesAbortedDueToXSColls = 0; + mac_stats->FramesLostDueToIntMACXmitError = 0; + mac_stats->CarrierSenseErrors = 0; + mac_stats->OctetsReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_OCT]; + mac_stats->FramesLostDueToIntMACRcvError = 0; + mac_stats->MulticastFramesXmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_MC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC]; + mac_stats->BroadcastFramesXmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_BC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC]; + mac_stats->FramesWithExcessiveDeferral = 0; + mac_stats->MulticastFramesReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_MC]; + mac_stats->BroadcastFramesReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_BC]; + mac_stats->InRangeLengthErrors = + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_CRC]; + mac_stats->OutOfRangeLengthField = + lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; + mac_stats->FrameTooLongErrors = + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; + + mutex_unlock(&lan966x->stats_lock); +} + +static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 10239 }, + {} +}; + +static void lan966x_get_eth_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 idx; + + lan966x_stats_update(lan966x); + + idx = port->chip_port * lan966x->num_stats; + + mutex_lock(&lan966x->stats_lock); + + rmon_stats->undersize_pkts = + lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT]; + rmon_stats->oversize_pkts = + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; + rmon_stats->fragments = + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG]; + rmon_stats->jabbers = + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER]; + rmon_stats->hist[0] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64]; + rmon_stats->hist[1] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127]; + rmon_stats->hist[2] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255]; + rmon_stats->hist[3] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511]; + rmon_stats->hist[4] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023]; + rmon_stats->hist[5] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526]; + rmon_stats->hist[6] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526]; + + rmon_stats->hist_tx[0] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64]; + rmon_stats->hist_tx[1] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127]; + rmon_stats->hist_tx[2] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255]; + rmon_stats->hist_tx[3] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511]; + rmon_stats->hist_tx[4] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023]; + rmon_stats->hist_tx[5] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526]; + rmon_stats->hist_tx[6] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526]; + + mutex_unlock(&lan966x->stats_lock); + + *ranges = lan966x_rmon_ranges; +} + +static int lan966x_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct lan966x_port *port = netdev_priv(ndev); + + return phylink_ethtool_ksettings_get(port->phylink, cmd); +} + +static int lan966x_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct lan966x_port *port = netdev_priv(ndev); + + return phylink_ethtool_ksettings_set(port->phylink, cmd); +} + +static void lan966x_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct lan966x_port *port = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(port->phylink, pause); +} + +static int lan966x_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct lan966x_port *port = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(port->phylink, pause); +} + +static int lan966x_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_phc *phc; + + if (!lan966x->ptp) + return ethtool_op_get_ts_info(dev, info); + + phc = &lan966x->phc[LAN966X_PHC_PORT]; + + info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1; + if (info->phc_index == -1) { + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + } + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +const struct ethtool_ops lan966x_ethtool_ops = { + .get_link_ksettings = lan966x_get_link_ksettings, + .set_link_ksettings = lan966x_set_link_ksettings, + .get_pauseparam = lan966x_get_pauseparam, + .set_pauseparam = lan966x_set_pauseparam, + .get_sset_count = lan966x_get_sset_count, + .get_strings = lan966x_get_strings, + .get_ethtool_stats = lan966x_get_ethtool_stats, + .get_eth_mac_stats = lan966x_get_eth_mac_stats, + .get_rmon_stats = lan966x_get_eth_rmon_stats, + .get_link = ethtool_op_get_link, + .get_ts_info = lan966x_get_ts_info, +}; + +static void lan966x_check_stats_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct lan966x *lan966x = container_of(del_work, struct lan966x, + stats_work); + + lan966x_stats_update(lan966x); + + queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work, + LAN966X_STATS_CHECK_DELAY); +} + +void lan966x_stats_get(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 idx; + int i; + + idx = port->chip_port * lan966x->num_stats; + + mutex_lock(&lan966x->stats_lock); + + stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT]; + + stats->rx_packets = lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_JUMBO] + + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_JUMBO]; + + stats->multicast = lan966x->stats[idx + SYS_COUNT_RX_MC] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_MC]; + + stats->rx_errors = lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] + + lan966x->stats[idx + SYS_COUNT_RX_LONG]; + + stats->rx_dropped = dev->stats.rx_dropped + + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_DR_LOCAL] + + lan966x->stats[idx + SYS_COUNT_DR_TAIL] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_0] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_1] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_2] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_3] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_4] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_5] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_6] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_7]; + + for (i = 0; i < LAN966X_NUM_TC; i++) { + stats->rx_dropped += + (lan966x->stats[idx + SYS_COUNT_DR_YELLOW_PRIO_0 + i] + + lan966x->stats[idx + SYS_COUNT_DR_GREEN_PRIO_0 + i]); + } + + /* Get Tx stats */ + stats->tx_bytes = lan966x->stats[idx + SYS_COUNT_TX_OCT] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT]; + + stats->tx_packets = lan966x->stats[idx + SYS_COUNT_TX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_JUMBO] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_JUMBO]; + + stats->tx_dropped = lan966x->stats[idx + SYS_COUNT_TX_DROP] + + lan966x->stats[idx + SYS_COUNT_TX_AGED]; + + stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL]; + + mutex_unlock(&lan966x->stats_lock); +} + +int lan966x_stats_init(struct lan966x *lan966x) +{ + char queue_name[32]; + + lan966x->stats_layout = lan966x_stats_layout; + lan966x->num_stats = ARRAY_SIZE(lan966x_stats_layout); + lan966x->stats = devm_kcalloc(lan966x->dev, lan966x->num_phys_ports * + lan966x->num_stats, + sizeof(u64), GFP_KERNEL); + if (!lan966x->stats) + return -ENOMEM; + + /* Init stats worker */ + mutex_init(&lan966x->stats_lock); + snprintf(queue_name, sizeof(queue_name), "%s-stats", + dev_name(lan966x->dev)); + lan966x->stats_queue = create_singlethread_workqueue(queue_name); + if (!lan966x->stats_queue) + return -ENOMEM; + + INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work); + queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work, + LAN966X_STATS_CHECK_DELAY); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c new file mode 100644 index 000000000..8310d3f35 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +#define DWRR_COST_BIT_WIDTH BIT(5) + +static u32 lan966x_ets_hw_cost(u32 w_min, u32 weight) +{ + u32 res; + + /* Round half up: Multiply with 16 before division, + * add 8 and divide result with 16 again + */ + res = (((DWRR_COST_BIT_WIDTH << 4) * w_min / weight) + 8) >> 4; + return max_t(u32, 1, res) - 1; +} + +int lan966x_ets_add(struct lan966x_port *port, + struct tc_ets_qopt_offload *qopt) +{ + struct tc_ets_qopt_offload_replace_params *params; + struct lan966x *lan966x = port->lan966x; + u32 w_min = 100; + u8 count = 0; + u32 se_idx; + u8 i; + + /* Check the input */ + if (qopt->parent != TC_H_ROOT) + return -EINVAL; + + params = &qopt->replace_params; + if (params->bands != NUM_PRIO_QUEUES) + return -EINVAL; + + for (i = 0; i < params->bands; ++i) { + /* In the switch the DWRR is always on the lowest consecutive + * priorities. Due to this, the first priority must map to the + * first DWRR band. + */ + if (params->priomap[i] != (7 - i)) + return -EINVAL; + + if (params->quanta[i] && params->weights[i] == 0) + return -EINVAL; + } + + se_idx = SE_IDX_PORT + port->chip_port; + + /* Find minimum weight */ + for (i = 0; i < params->bands; ++i) { + if (params->quanta[i] == 0) + continue; + + w_min = min(w_min, params->weights[i]); + } + + for (i = 0; i < params->bands; ++i) { + if (params->quanta[i] == 0) + continue; + + ++count; + + lan_wr(lan966x_ets_hw_cost(w_min, params->weights[i]), + lan966x, QSYS_SE_DWRR_CFG(se_idx, 7 - i)); + } + + lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(count) | + QSYS_SE_CFG_SE_RR_ENA_SET(0), + QSYS_SE_CFG_SE_DWRR_CNT | + QSYS_SE_CFG_SE_RR_ENA, + lan966x, QSYS_SE_CFG(se_idx)); + + return 0; +} + +int lan966x_ets_del(struct lan966x_port *port, + struct tc_ets_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + u32 se_idx; + int i; + + se_idx = SE_IDX_PORT + port->chip_port; + + for (i = 0; i < NUM_PRIO_QUEUES; ++i) + lan_wr(0, lan966x, QSYS_SE_DWRR_CFG(se_idx, i)); + + lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(0) | + QSYS_SE_CFG_SE_RR_ENA_SET(0), + QSYS_SE_CFG_SE_DWRR_CNT | + QSYS_SE_CFG_SE_RR_ENA, + lan966x, QSYS_SE_CFG(se_idx)); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c new file mode 100644 index 000000000..2ea263e89 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/switchdev.h> + +#include "lan966x_main.h" + +struct lan966x_fdb_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + struct net_device *orig_dev; + struct lan966x *lan966x; + unsigned long event; +}; + +struct lan966x_fdb_entry { + struct list_head list; + unsigned char mac[ETH_ALEN] __aligned(2); + u16 vid; + u32 references; +}; + +static struct lan966x_fdb_entry * +lan966x_fdb_find_entry(struct lan966x *lan966x, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_fdb_entry *fdb_entry; + + list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) { + if (fdb_entry->vid == fdb_info->vid && + ether_addr_equal(fdb_entry->mac, fdb_info->addr)) + return fdb_entry; + } + + return NULL; +} + +static void lan966x_fdb_add_entry(struct lan966x *lan966x, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_fdb_entry *fdb_entry; + + fdb_entry = lan966x_fdb_find_entry(lan966x, fdb_info); + if (fdb_entry) { + fdb_entry->references++; + return; + } + + fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL); + if (!fdb_entry) + return; + + ether_addr_copy(fdb_entry->mac, fdb_info->addr); + fdb_entry->vid = fdb_info->vid; + fdb_entry->references = 1; + list_add_tail(&fdb_entry->list, &lan966x->fdb_entries); +} + +static bool lan966x_fdb_del_entry(struct lan966x *lan966x, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_fdb_entry *fdb_entry, *tmp; + + list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, + list) { + if (fdb_entry->vid == fdb_info->vid && + ether_addr_equal(fdb_entry->mac, fdb_info->addr)) { + fdb_entry->references--; + if (!fdb_entry->references) { + list_del(&fdb_entry->list); + kfree(fdb_entry); + return true; + } + break; + } + } + + return false; +} + +void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_fdb_entry *fdb_entry; + + list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) { + if (fdb_entry->vid != vid) + continue; + + lan966x_mac_cpu_learn(lan966x, fdb_entry->mac, fdb_entry->vid); + } +} + +void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_fdb_entry *fdb_entry; + + list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) { + if (fdb_entry->vid != vid) + continue; + + lan966x_mac_cpu_forget(lan966x, fdb_entry->mac, fdb_entry->vid); + } +} + +static void lan966x_fdb_purge_entries(struct lan966x *lan966x) +{ + struct lan966x_fdb_entry *fdb_entry, *tmp; + + list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, list) { + list_del(&fdb_entry->list); + kfree(fdb_entry); + } +} + +int lan966x_fdb_init(struct lan966x *lan966x) +{ + INIT_LIST_HEAD(&lan966x->fdb_entries); + lan966x->fdb_work = alloc_ordered_workqueue("lan966x_order", 0); + if (!lan966x->fdb_work) + return -ENOMEM; + + return 0; +} + +void lan966x_fdb_deinit(struct lan966x *lan966x) +{ + destroy_workqueue(lan966x->fdb_work); + lan966x_fdb_purge_entries(lan966x); +} + +void lan966x_fdb_flush_workqueue(struct lan966x *lan966x) +{ + flush_workqueue(lan966x->fdb_work); +} + +static void lan966x_fdb_port_event_work(struct lan966x_fdb_event_work *fdb_work) +{ + struct switchdev_notifier_fdb_info *fdb_info; + struct lan966x_port *port; + struct lan966x *lan966x; + + lan966x = fdb_work->lan966x; + port = netdev_priv(fdb_work->orig_dev); + fdb_info = &fdb_work->fdb_info; + + switch (fdb_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + lan966x_mac_add_entry(lan966x, port, fdb_info->addr, + fdb_info->vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + lan966x_mac_del_entry(lan966x, fdb_info->addr, + fdb_info->vid); + break; + } +} + +static void lan966x_fdb_bridge_event_work(struct lan966x_fdb_event_work *fdb_work) +{ + struct switchdev_notifier_fdb_info *fdb_info; + struct lan966x *lan966x; + int ret; + + lan966x = fdb_work->lan966x; + fdb_info = &fdb_work->fdb_info; + + /* In case the bridge is called */ + switch (fdb_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + /* If there is no front port in this vlan, there is no + * point to copy the frame to CPU because it would be + * just dropped at later point. So add it only if + * there is a port but it is required to store the fdb + * entry for later point when a port actually gets in + * the vlan. + */ + lan966x_fdb_add_entry(lan966x, fdb_info); + if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, + fdb_info->vid)) + break; + + lan966x_mac_cpu_learn(lan966x, fdb_info->addr, + fdb_info->vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + ret = lan966x_fdb_del_entry(lan966x, fdb_info); + if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, + fdb_info->vid)) + break; + + if (ret) + lan966x_mac_cpu_forget(lan966x, fdb_info->addr, + fdb_info->vid); + break; + } +} + +static void lan966x_fdb_lag_event_work(struct lan966x_fdb_event_work *fdb_work) +{ + struct switchdev_notifier_fdb_info *fdb_info; + struct lan966x_port *port; + struct lan966x *lan966x; + + if (!lan966x_lag_first_port(fdb_work->orig_dev, fdb_work->dev)) + return; + + lan966x = fdb_work->lan966x; + port = netdev_priv(fdb_work->dev); + fdb_info = &fdb_work->fdb_info; + + switch (fdb_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + lan966x_mac_add_entry(lan966x, port, fdb_info->addr, + fdb_info->vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + lan966x_mac_del_entry(lan966x, fdb_info->addr, fdb_info->vid); + break; + } +} + +static void lan966x_fdb_event_work(struct work_struct *work) +{ + struct lan966x_fdb_event_work *fdb_work = + container_of(work, struct lan966x_fdb_event_work, work); + + if (lan966x_netdevice_check(fdb_work->orig_dev)) + lan966x_fdb_port_event_work(fdb_work); + else if (netif_is_bridge_master(fdb_work->orig_dev)) + lan966x_fdb_bridge_event_work(fdb_work); + else if (netif_is_lag_master(fdb_work->orig_dev)) + lan966x_fdb_lag_event_work(fdb_work); + + kfree(fdb_work->fdb_info.addr); + kfree(fdb_work); +} + +int lan966x_handle_fdb(struct net_device *dev, + struct net_device *orig_dev, + unsigned long event, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_fdb_event_work *fdb_work; + + if (ctx && ctx != port) + return 0; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (lan966x_netdevice_check(orig_dev) && + !fdb_info->added_by_user) + break; + + fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC); + if (!fdb_work) + return -ENOMEM; + + fdb_work->dev = dev; + fdb_work->orig_dev = orig_dev; + fdb_work->lan966x = lan966x; + fdb_work->event = event; + INIT_WORK(&fdb_work->work, lan966x_fdb_event_work); + memcpy(&fdb_work->fdb_info, fdb_info, sizeof(fdb_work->fdb_info)); + fdb_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!fdb_work->fdb_info.addr) + goto err_addr_alloc; + + ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr); + + queue_work(lan966x->fdb_work, &fdb_work->work); + break; + } + + return 0; +err_addr_alloc: + kfree(fdb_work); + return -ENOMEM; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c new file mode 100644 index 000000000..e6948939c --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +static int lan966x_fdma_channel_active(struct lan966x *lan966x) +{ + return lan_rd(lan966x, FDMA_CH_ACTIVE); +} + +static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx, + struct lan966x_db *db) +{ + struct lan966x *lan966x = rx->lan966x; + dma_addr_t dma_addr; + struct page *page; + + page = dev_alloc_pages(rx->page_order); + if (unlikely(!page)) + return NULL; + + dma_addr = dma_map_page(lan966x->dev, page, 0, + PAGE_SIZE << rx->page_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(lan966x->dev, dma_addr))) + goto free_page; + + db->dataptr = dma_addr; + + return page; + +free_page: + __free_pages(page, rx->page_order); + return NULL; +} + +static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + struct lan966x_rx_dcb *dcb; + struct lan966x_db *db; + int i, j; + + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &rx->dcbs[i]; + + for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + dma_unmap_single(lan966x->dev, + (dma_addr_t)db->dataptr, + PAGE_SIZE << rx->page_order, + DMA_FROM_DEVICE); + __free_pages(rx->page[i][j], rx->page_order); + } + } +} + +static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, + struct lan966x_rx_dcb *dcb, + u64 nextptr) +{ + struct lan966x_db *db; + int i; + + for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) { + db = &dcb->db[i]; + db->status = FDMA_DCB_STATUS_INTR; + } + + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order); + + rx->last_entry->nextptr = nextptr; + rx->last_entry = dcb; +} + +static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + struct lan966x_rx_dcb *dcb; + struct lan966x_db *db; + struct page *page; + int i, j; + int size; + + /* calculate how many pages are needed to allocate the dcbs */ + size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + + rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL); + if (!rx->dcbs) + return -ENOMEM; + + rx->last_entry = rx->dcbs; + rx->db_index = 0; + rx->dcb_index = 0; + + /* Now for each dcb allocate the dbs */ + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &rx->dcbs[i]; + dcb->info = 0; + + /* For each db allocate a page and map it to the DB dataptr. */ + for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + page = lan966x_fdma_rx_alloc_page(rx, db); + if (!page) + return -ENOMEM; + + db->status = 0; + rx->page[i][j] = page; + } + + lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i); + } + + return 0; +} + +static void lan966x_fdma_rx_free(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 size; + + /* Now it is possible to do the cleanup of dcb */ + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma); +} + +static void lan966x_fdma_rx_start(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 mask; + + /* When activating a channel, first is required to write the first DCB + * address and then to activate it + */ + lan_wr(lower_32_bits((u64)rx->dma), lan966x, + FDMA_DCB_LLP(rx->channel_id)); + lan_wr(upper_32_bits((u64)rx->dma), lan966x, + FDMA_DCB_LLP1(rx->channel_id)); + + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(0) | + FDMA_CH_CFG_CH_MEM_SET(1), + lan966x, FDMA_CH_CFG(rx->channel_id)); + + /* Start fdma */ + lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), + FDMA_PORT_CTRL_XTR_STOP, + lan966x, FDMA_PORT_CTRL(0)); + + /* Enable interrupts */ + mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); + mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); + mask |= BIT(rx->channel_id); + lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), + FDMA_INTR_DB_ENA_INTR_DB_ENA, + lan966x, FDMA_INTR_DB_ENA); + + /* Activate the channel */ + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)), + FDMA_CH_ACTIVATE_CH_ACTIVATE, + lan966x, FDMA_CH_ACTIVATE); +} + +static void lan966x_fdma_rx_disable(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 val; + + /* Disable the channel */ + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)), + FDMA_CH_DISABLE_CH_DISABLE, + lan966x, FDMA_CH_DISABLE); + + readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, + val, !(val & BIT(rx->channel_id)), + READL_SLEEP_US, READL_TIMEOUT_US); + + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)), + FDMA_CH_DB_DISCARD_DB_DISCARD, + lan966x, FDMA_CH_DB_DISCARD); +} + +static void lan966x_fdma_rx_reload(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)), + FDMA_CH_RELOAD_CH_RELOAD, + lan966x, FDMA_CH_RELOAD); +} + +static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx, + struct lan966x_tx_dcb *dcb) +{ + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = 0; +} + +static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + struct lan966x_tx_dcb *dcb; + struct lan966x_db *db; + int size; + int i, j; + + tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf), + GFP_KERNEL); + if (!tx->dcbs_buf) + return -ENOMEM; + + /* calculate how many pages are needed to allocate the dcbs */ + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL); + if (!tx->dcbs) + goto out; + + /* Now for each dcb allocate the db */ + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &tx->dcbs[i]; + + for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + db->dataptr = 0; + db->status = 0; + } + + lan966x_fdma_tx_add_dcb(tx, dcb); + } + + return 0; + +out: + kfree(tx->dcbs_buf); + return -ENOMEM; +} + +static void lan966x_fdma_tx_free(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + int size; + + kfree(tx->dcbs_buf); + + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma); +} + +static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + u32 mask; + + /* When activating a channel, first is required to write the first DCB + * address and then to activate it + */ + lan_wr(lower_32_bits((u64)tx->dma), lan966x, + FDMA_DCB_LLP(tx->channel_id)); + lan_wr(upper_32_bits((u64)tx->dma), lan966x, + FDMA_DCB_LLP1(tx->channel_id)); + + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(0) | + FDMA_CH_CFG_CH_MEM_SET(1), + lan966x, FDMA_CH_CFG(tx->channel_id)); + + /* Start fdma */ + lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), + FDMA_PORT_CTRL_INJ_STOP, + lan966x, FDMA_PORT_CTRL(0)); + + /* Enable interrupts */ + mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); + mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); + mask |= BIT(tx->channel_id); + lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), + FDMA_INTR_DB_ENA_INTR_DB_ENA, + lan966x, FDMA_INTR_DB_ENA); + + /* Activate the channel */ + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)), + FDMA_CH_ACTIVATE_CH_ACTIVATE, + lan966x, FDMA_CH_ACTIVATE); +} + +static void lan966x_fdma_tx_disable(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + u32 val; + + /* Disable the channel */ + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)), + FDMA_CH_DISABLE_CH_DISABLE, + lan966x, FDMA_CH_DISABLE); + + readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, + val, !(val & BIT(tx->channel_id)), + READL_SLEEP_US, READL_TIMEOUT_US); + + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)), + FDMA_CH_DB_DISCARD_DB_DISCARD, + lan966x, FDMA_CH_DB_DISCARD); + + tx->activated = false; + tx->last_in_use = -1; +} + +static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + + /* Write the registers to reload the channel */ + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)), + FDMA_CH_RELOAD_CH_RELOAD, + lan966x, FDMA_CH_RELOAD); +} + +static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + port = lan966x->ports[i]; + if (!port) + continue; + + if (netif_queue_stopped(port->dev)) + netif_wake_queue(port->dev); + } +} + +static void lan966x_fdma_stop_netdev(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + port = lan966x->ports[i]; + if (!port) + continue; + + netif_stop_queue(port->dev); + } +} + +static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) +{ + struct lan966x_tx *tx = &lan966x->tx; + struct lan966x_tx_dcb_buf *dcb_buf; + struct lan966x_db *db; + unsigned long flags; + bool clear = false; + int i; + + spin_lock_irqsave(&lan966x->tx_lock, flags); + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb_buf = &tx->dcbs_buf[i]; + + if (!dcb_buf->used) + continue; + + db = &tx->dcbs[i].db[0]; + if (!(db->status & FDMA_DCB_STATUS_DONE)) + continue; + + dcb_buf->dev->stats.tx_packets++; + dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len; + + dcb_buf->used = false; + dma_unmap_single(lan966x->dev, + dcb_buf->dma_addr, + dcb_buf->skb->len, + DMA_TO_DEVICE); + if (!dcb_buf->ptp) + dev_kfree_skb_any(dcb_buf->skb); + + clear = true; + } + + if (clear) + lan966x_fdma_wakeup_netdev(lan966x); + + spin_unlock_irqrestore(&lan966x->tx_lock, flags); +} + +static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx) +{ + struct lan966x_db *db; + + /* Check if there is any data */ + db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; + if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE))) + return false; + + return true; +} + +static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u64 src_port, timestamp; + struct lan966x_db *db; + struct sk_buff *skb; + struct page *page; + + /* Get the received frame and unmap it */ + db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; + page = rx->page[rx->dcb_index][rx->db_index]; + + dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr, + FDMA_DCB_STATUS_BLOCKL(db->status), + DMA_FROM_DEVICE); + + skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); + if (unlikely(!skb)) + goto unmap_page; + + skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); + + lan966x_ifh_get_src_port(skb->data, &src_port); + lan966x_ifh_get_timestamp(skb->data, ×tamp); + + if (WARN_ON(src_port >= lan966x->num_phys_ports)) + goto free_skb; + + dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, + PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + + skb->dev = lan966x->ports[src_port]->dev; + skb_pull(skb, IFH_LEN * sizeof(u32)); + + if (likely(!(skb->dev->features & NETIF_F_RXFCS))) + skb_trim(skb, skb->len - ETH_FCS_LEN); + + lan966x_ptp_rxtstamp(lan966x, skb, timestamp); + skb->protocol = eth_type_trans(skb, skb->dev); + + if (lan966x->bridge_mask & BIT(src_port)) { + skb->offload_fwd_mark = 1; + + skb_reset_network_header(skb); + if (!lan966x_hw_offload(lan966x, src_port, skb)) + skb->offload_fwd_mark = 0; + } + + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + + return skb; + +free_skb: + kfree_skb(skb); +unmap_page: + dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, + PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + __free_pages(page, rx->page_order); + + return NULL; +} + +static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) +{ + struct lan966x *lan966x = container_of(napi, struct lan966x, napi); + struct lan966x_rx *rx = &lan966x->rx; + int dcb_reload = rx->dcb_index; + struct lan966x_rx_dcb *old_dcb; + struct lan966x_db *db; + struct sk_buff *skb; + struct page *page; + int counter = 0; + u64 nextptr; + + lan966x_fdma_tx_clear_buf(lan966x, weight); + + /* Get all received skb */ + while (counter < weight) { + if (!lan966x_fdma_rx_more_frames(rx)) + break; + + skb = lan966x_fdma_rx_get_frame(rx); + + rx->page[rx->dcb_index][rx->db_index] = NULL; + rx->dcb_index++; + rx->dcb_index &= FDMA_DCB_MAX - 1; + + if (!skb) + break; + + napi_gro_receive(&lan966x->napi, skb); + counter++; + } + + /* Allocate new pages and map them */ + while (dcb_reload != rx->dcb_index) { + db = &rx->dcbs[dcb_reload].db[rx->db_index]; + page = lan966x_fdma_rx_alloc_page(rx, db); + if (unlikely(!page)) + break; + rx->page[dcb_reload][rx->db_index] = page; + + old_dcb = &rx->dcbs[dcb_reload]; + dcb_reload++; + dcb_reload &= FDMA_DCB_MAX - 1; + + nextptr = rx->dma + ((unsigned long)old_dcb - + (unsigned long)rx->dcbs); + lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr); + lan966x_fdma_rx_reload(rx); + } + + if (counter < weight && napi_complete_done(napi, counter)) + lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA); + + return counter; +} + +irqreturn_t lan966x_fdma_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + u32 db, err, err_type; + + db = lan_rd(lan966x, FDMA_INTR_DB); + err = lan_rd(lan966x, FDMA_INTR_ERR); + + if (db) { + lan_wr(0, lan966x, FDMA_INTR_DB_ENA); + lan_wr(db, lan966x, FDMA_INTR_DB); + + napi_schedule(&lan966x->napi); + } + + if (err) { + err_type = lan_rd(lan966x, FDMA_ERRORS); + + WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type); + + lan_wr(err, lan966x, FDMA_INTR_ERR); + lan_wr(err_type, lan966x, FDMA_ERRORS); + } + + return IRQ_HANDLED; +} + +static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx) +{ + struct lan966x_tx_dcb_buf *dcb_buf; + int i; + + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb_buf = &tx->dcbs_buf[i]; + if (!dcb_buf->used && i != tx->last_in_use) + return i; + } + + return -1; +} + +int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_tx_dcb_buf *next_dcb_buf; + struct lan966x_tx_dcb *next_dcb, *dcb; + struct lan966x_tx *tx = &lan966x->tx; + struct lan966x_db *next_db; + int needed_headroom; + int needed_tailroom; + dma_addr_t dma_addr; + int next_to_use; + int err; + + /* Get next index */ + next_to_use = lan966x_fdma_get_next_dcb(tx); + if (next_to_use < 0) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + if (skb_put_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + /* skb processing */ + needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0); + needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); + if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) { + err = pskb_expand_head(skb, needed_headroom, needed_tailroom, + GFP_ATOMIC); + if (unlikely(err)) { + dev->stats.tx_dropped++; + err = NETDEV_TX_OK; + goto release; + } + } + + skb_tx_timestamp(skb); + skb_push(skb, IFH_LEN * sizeof(u32)); + memcpy(skb->data, ifh, IFH_LEN * sizeof(u32)); + skb_put(skb, 4); + + dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(lan966x->dev, dma_addr)) { + dev->stats.tx_dropped++; + err = NETDEV_TX_OK; + goto release; + } + + /* Setup next dcb */ + next_dcb = &tx->dcbs[next_to_use]; + next_dcb->nextptr = FDMA_DCB_INVALID_DATA; + + next_db = &next_dcb->db[0]; + next_db->dataptr = dma_addr; + next_db->status = FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_INTR | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len); + + /* Fill up the buffer */ + next_dcb_buf = &tx->dcbs_buf[next_to_use]; + next_dcb_buf->skb = skb; + next_dcb_buf->dma_addr = dma_addr; + next_dcb_buf->used = true; + next_dcb_buf->ptp = false; + next_dcb_buf->dev = dev; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + next_dcb_buf->ptp = true; + + if (likely(lan966x->tx.activated)) { + /* Connect current dcb to the next db */ + dcb = &tx->dcbs[tx->last_in_use]; + dcb->nextptr = tx->dma + (next_to_use * + sizeof(struct lan966x_tx_dcb)); + + lan966x_fdma_tx_reload(tx); + } else { + /* Because it is first time, then just activate */ + lan966x->tx.activated = true; + lan966x_fdma_tx_activate(tx); + } + + /* Move to next dcb because this last in use */ + tx->last_in_use = next_to_use; + + return NETDEV_TX_OK; + +release: + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + lan966x_ptp_txtstamp_release(port, skb); + + dev_kfree_skb_any(skb); + return err; +} + +static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x) +{ + int max_mtu = 0; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + struct lan966x_port *port; + int mtu; + + port = lan966x->ports[i]; + if (!port) + continue; + + mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); + if (mtu > max_mtu) + max_mtu = mtu; + } + + return max_mtu; +} + +static int lan966x_qsys_sw_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT)); +} + +static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) +{ + dma_addr_t rx_dma; + void *rx_dcbs; + u32 size; + int err; + + /* Store these for later to free them */ + rx_dma = lan966x->rx.dma; + rx_dcbs = lan966x->rx.dcbs; + + napi_synchronize(&lan966x->napi); + napi_disable(&lan966x->napi); + lan966x_fdma_stop_netdev(lan966x); + + lan966x_fdma_rx_disable(&lan966x->rx); + lan966x_fdma_rx_free_pages(&lan966x->rx); + lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; + err = lan966x_fdma_rx_alloc(&lan966x->rx); + if (err) + goto restore; + lan966x_fdma_rx_start(&lan966x->rx); + + size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); + + lan966x_fdma_wakeup_netdev(lan966x); + napi_enable(&lan966x->napi); + + return err; +restore: + lan966x->rx.dma = rx_dma; + lan966x->rx.dcbs = rx_dcbs; + lan966x_fdma_rx_start(&lan966x->rx); + + return err; +} + +int lan966x_fdma_change_mtu(struct lan966x *lan966x) +{ + int max_mtu; + int err; + u32 val; + + max_mtu = lan966x_fdma_get_max_mtu(lan966x); + max_mtu += IFH_LEN * sizeof(u32); + max_mtu += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + max_mtu += VLAN_HLEN * 2; + + if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 == + lan966x->rx.page_order) + return 0; + + /* Disable the CPU port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + /* Flush the CPU queues */ + readx_poll_timeout(lan966x_qsys_sw_status, lan966x, + val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)), + READL_SLEEP_US, READL_TIMEOUT_US); + + /* Add a sleep in case there are frames between the queues and the CPU + * port + */ + usleep_range(1000, 2000); + + err = lan966x_fdma_reload(lan966x, max_mtu); + + /* Enable back the CPU port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + return err; +} + +void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev) +{ + if (lan966x->fdma_ndev) + return; + + lan966x->fdma_ndev = dev; + netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll); + napi_enable(&lan966x->napi); +} + +void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev) +{ + if (lan966x->fdma_ndev == dev) { + netif_napi_del(&lan966x->napi); + lan966x->fdma_ndev = NULL; + } +} + +int lan966x_fdma_init(struct lan966x *lan966x) +{ + int err; + + if (!lan966x->fdma) + return 0; + + lan966x->rx.lan966x = lan966x; + lan966x->rx.channel_id = FDMA_XTR_CHANNEL; + lan966x->tx.lan966x = lan966x; + lan966x->tx.channel_id = FDMA_INJ_CHANNEL; + lan966x->tx.last_in_use = -1; + + err = lan966x_fdma_rx_alloc(&lan966x->rx); + if (err) + return err; + + err = lan966x_fdma_tx_alloc(&lan966x->tx); + if (err) { + lan966x_fdma_rx_free(&lan966x->rx); + return err; + } + + lan966x_fdma_rx_start(&lan966x->rx); + + return 0; +} + +void lan966x_fdma_deinit(struct lan966x *lan966x) +{ + if (!lan966x->fdma) + return; + + lan966x_fdma_rx_disable(&lan966x->rx); + lan966x_fdma_tx_disable(&lan966x->tx); + + napi_synchronize(&lan966x->napi); + napi_disable(&lan966x->napi); + + lan966x_fdma_rx_free_pages(&lan966x->rx); + lan966x_fdma_rx_free(&lan966x->rx); + lan966x_fdma_tx_free(&lan966x->tx); +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h new file mode 100644 index 000000000..ca3314789 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __LAN966X_IFH_H__ +#define __LAN966X_IFH_H__ + +/* Fields with description (*) should just be cleared upon injection + * IFH is transmitted MSByte first (Highest bit pos sent as MSB of first byte) + */ + +#define IFH_LEN 7 + +/* Timestamp for frame */ +#define IFH_POS_TIMESTAMP 192 + +/* Bypass analyzer with a prefilled IFH */ +#define IFH_POS_BYPASS 191 + +/* Masqueraded injection with masq_port defining logical source port */ +#define IFH_POS_MASQ 190 + +/* Masqueraded port number for injection */ +#define IFH_POS_MASQ_PORT 186 + +/* Frame length (*) */ +#define IFH_POS_LEN 178 + +/* Cell filling mode. Full(0),Etype(1), LlctOpt(2), Llct(3) */ +#define IFH_POS_WRDMODE 176 + +/* Frame has 16 bits rtag removed compared to line data */ +#define IFH_POS_RTAG48 175 + +/* Frame has a redundancy tag */ +#define IFH_POS_HAS_RED_TAG 174 + +/* Frame has been cut through forwarded (*) */ +#define IFH_POS_CUTTHRU 173 + +/* Rewriter command */ +#define IFH_POS_REW_CMD 163 + +/* Enable OAM-related rewriting. PDU_TYPE encodes OAM type. */ +#define IFH_POS_REW_OAM 162 + +/* PDU type. Encoding: (0-NONE, 1-Y1731_CCM, 2-MRP_TST, 3-MRP_ITST, 4-DLR_BCN, + * 5-DLR_ADV, 6-RTE_NULL_INJ, 7-IPV4, 8-IPV6, 9-Y1731_NON_CCM). + */ +#define IFH_POS_PDU_TYPE 158 + +/* Update FCS before transmission */ +#define IFH_POS_FCS_UPD 157 + +/* Classified DSCP value of frame */ +#define IFH_POS_DSCP 151 + +/* Yellow indication */ +#define IFH_POS_DP 150 + +/* Process in RTE/inbound */ +#define IFH_POS_RTE_INB_UPDATE 149 + +/* Number of tags to pop from frame */ +#define IFH_POS_POP_CNT 147 + +/* Number of tags in front of the ethertype */ +#define IFH_POS_ETYPE_OFS 145 + +/* Logical source port of frame (*) */ +#define IFH_POS_SRCPORT 141 + +/* Sequence number in redundancy tag */ +#define IFH_POS_SEQ_NUM 120 + +/* Stagd flag and classified TCI of frame (PCP/DEI/VID) */ +#define IFH_POS_TCI 103 + +/* Classified internal priority for queuing */ +#define IFH_POS_QOS_CLASS 100 + +/* Bit mask with eight cpu copy classses */ +#define IFH_POS_CPUQ 92 + +/* Relearn + learn flags (*) */ +#define IFH_POS_LEARN_FLAGS 90 + +/* SFLOW identifier for frame (0-8: Tx port, 9: Rx sampling, 15: No sampling) */ +#define IFH_POS_SFLOW_ID 86 + +/* Set if an ACL/S2 rule was hit (*). + * Super priority: acl_hit=0 and acl_hit(4)=1. + */ +#define IFH_POS_ACL_HIT 85 + +/* S2 rule index hit (*) */ +#define IFH_POS_ACL_IDX 79 + +/* ISDX as classified by S1 */ +#define IFH_POS_ISDX 71 + +/* Destination ports for frame */ +#define IFH_POS_DSTS 62 + +/* Storm policer to be applied: None/Uni/Multi/Broad (*) */ +#define IFH_POS_FLOOD 60 + +/* Redundancy tag operation */ +#define IFH_POS_SEQ_OP 58 + +/* Classified internal priority for resourcemgt, tagging etc */ +#define IFH_POS_IPV 55 + +/* Frame is for AFI use */ +#define IFH_POS_AFI 54 + +/* Internal aging value (*) */ +#define IFH_POS_AGED 52 + +/* RTP Identifier */ +#define IFH_POS_RTP_ID 42 + +/* RTP MRPD flow */ +#define IFH_POS_RTP_SUBID 41 + +/* Profinet DataStatus or opcua GroupVersion MSB */ +#define IFH_POS_PN_DATA_STATUS 33 + +/* Profinet transfer status (1 iff the status is 0) */ +#define IFH_POS_PN_TRANSF_STATUS_ZERO 32 + +/* Profinet cycle counter or opcua NetworkMessageNumber */ +#define IFH_POS_PN_CC 16 + +#define IFH_WID_TIMESTAMP 32 +#define IFH_WID_BYPASS 1 +#define IFH_WID_MASQ 1 +#define IFH_WID_MASQ_PORT 4 +#define IFH_WID_LEN 14 +#define IFH_WID_WRDMODE 2 +#define IFH_WID_RTAG48 1 +#define IFH_WID_HAS_RED_TAG 1 +#define IFH_WID_CUTTHRU 1 +#define IFH_WID_REW_CMD 10 +#define IFH_WID_REW_OAM 1 +#define IFH_WID_PDU_TYPE 4 +#define IFH_WID_FCS_UPD 1 +#define IFH_WID_DSCP 6 +#define IFH_WID_DP 1 +#define IFH_WID_RTE_INB_UPDATE 1 +#define IFH_WID_POP_CNT 2 +#define IFH_WID_ETYPE_OFS 2 +#define IFH_WID_SRCPORT 4 +#define IFH_WID_SEQ_NUM 16 +#define IFH_WID_TCI 17 +#define IFH_WID_QOS_CLASS 3 +#define IFH_WID_CPUQ 8 +#define IFH_WID_LEARN_FLAGS 2 +#define IFH_WID_SFLOW_ID 4 +#define IFH_WID_ACL_HIT 1 +#define IFH_WID_ACL_IDX 6 +#define IFH_WID_ISDX 8 +#define IFH_WID_DSTS 9 +#define IFH_WID_FLOOD 2 +#define IFH_WID_SEQ_OP 2 +#define IFH_WID_IPV 3 +#define IFH_WID_AFI 1 +#define IFH_WID_AGED 2 +#define IFH_WID_RTP_ID 10 +#define IFH_WID_RTP_SUBID 1 +#define IFH_WID_PN_DATA_STATUS 8 +#define IFH_WID_PN_TRANSF_STATUS_ZERO 1 +#define IFH_WID_PN_CC 16 + +#endif /* __LAN966X_IFH_H__ */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c new file mode 100644 index 000000000..41fa2523d --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/if_bridge.h> + +#include "lan966x_main.h" + +static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x) +{ + u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0); + int p, lag, i; + + /* Reset destination and aggregation PGIDS */ + for (p = 0; p < lan966x->num_phys_ports; ++p) + lan_wr(ANA_PGID_PGID_SET(BIT(p)), + lan966x, ANA_PGID(p)); + + for (p = PGID_AGGR; p < PGID_SRC; ++p) + lan_wr(ANA_PGID_PGID_SET(visited), + lan966x, ANA_PGID(p)); + + /* The visited ports bitmask holds the list of ports offloading any + * bonding interface. Initially we mark all these ports as unvisited, + * then every time we visit a port in this bitmask, we know that it is + * the lowest numbered port, i.e. the one whose logical ID == physical + * port ID == LAG ID. So we mark as visited all further ports in the + * bitmask that are offloading the same bonding interface. This way, + * we set up the aggregation PGIDs only once per bonding interface. + */ + for (p = 0; p < lan966x->num_phys_ports; ++p) { + struct lan966x_port *port = lan966x->ports[p]; + + if (!port || !port->bond) + continue; + + visited &= ~BIT(p); + } + + /* Now, set PGIDs for each active LAG */ + for (lag = 0; lag < lan966x->num_phys_ports; ++lag) { + struct net_device *bond = lan966x->ports[lag]->bond; + int num_active_ports = 0; + unsigned long bond_mask; + u8 aggr_idx[16]; + + if (!bond || (visited & BIT(lag))) + continue; + + bond_mask = lan966x_lag_get_mask(lan966x, bond); + + for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) { + struct lan966x_port *port = lan966x->ports[p]; + + lan_wr(ANA_PGID_PGID_SET(bond_mask), + lan966x, ANA_PGID(p)); + if (port->lag_tx_active) + aggr_idx[num_active_ports++] = p; + } + + for (i = PGID_AGGR; i < PGID_SRC; ++i) { + u32 ac; + + ac = lan_rd(lan966x, ANA_PGID(i)); + ac &= ~bond_mask; + /* Don't do division by zero if there was no active + * port. Just make all aggregation codes zero. + */ + if (num_active_ports) + ac |= BIT(aggr_idx[i % num_active_ports]); + lan_wr(ANA_PGID_PGID_SET(ac), + lan966x, ANA_PGID(i)); + } + + /* Mark all ports in the same LAG as visited to avoid applying + * the same config again. + */ + for (p = lag; p < lan966x->num_phys_ports; p++) { + struct lan966x_port *port = lan966x->ports[p]; + + if (!port) + continue; + + if (port->bond == bond) + visited |= BIT(p); + } + } +} + +static void lan966x_lag_set_port_ids(struct lan966x *lan966x) +{ + struct lan966x_port *port; + u32 bond_mask; + u32 lag_id; + int p; + + for (p = 0; p < lan966x->num_phys_ports; ++p) { + port = lan966x->ports[p]; + if (!port) + continue; + + lag_id = port->chip_port; + + bond_mask = lan966x_lag_get_mask(lan966x, port->bond); + if (bond_mask) + lag_id = __ffs(bond_mask); + + lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id), + ANA_PORT_CFG_PORTID_VAL, + lan966x, ANA_PORT_CFG(port->chip_port)); + } +} + +static void lan966x_lag_update_ids(struct lan966x *lan966x) +{ + lan966x_lag_set_port_ids(lan966x); + lan966x_update_fwd_mask(lan966x); + lan966x_lag_set_aggr_pgids(lan966x); +} + +int lan966x_lag_port_join(struct lan966x_port *port, + struct net_device *brport_dev, + struct net_device *bond, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + struct net_device *dev = port->dev; + u32 lag_id = -1; + u32 bond_mask; + int err; + + bond_mask = lan966x_lag_get_mask(lan966x, bond); + if (bond_mask) + lag_id = __ffs(bond_mask); + + port->bond = bond; + lan966x_lag_update_ids(lan966x); + + err = switchdev_bridge_port_offload(brport_dev, dev, port, + &lan966x_switchdev_nb, + &lan966x_switchdev_blocking_nb, + false, extack); + if (err) + goto out; + + lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev)); + + if (lan966x_lag_first_port(port->bond, port->dev) && + lag_id != -1) + lan966x_mac_lag_replace_port_entry(lan966x, + lan966x->ports[lag_id], + port); + + return 0; + +out: + port->bond = NULL; + lan966x_lag_update_ids(lan966x); + + return err; +} + +void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond) +{ + struct lan966x *lan966x = port->lan966x; + u32 bond_mask; + u32 lag_id; + + if (lan966x_lag_first_port(port->bond, port->dev)) { + bond_mask = lan966x_lag_get_mask(lan966x, port->bond); + bond_mask &= ~BIT(port->chip_port); + if (bond_mask) { + lag_id = __ffs(bond_mask); + lan966x_mac_lag_replace_port_entry(lan966x, port, + lan966x->ports[lag_id]); + } else { + lan966x_mac_lag_remove_port_entry(lan966x, port); + } + } + + port->bond = NULL; + lan966x_lag_update_ids(lan966x); + lan966x_port_stp_state_set(port, BR_STATE_FORWARDING); +} + +static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x, + enum netdev_lag_hash hash_type) +{ + int p; + + for (p = 0; p < lan966x->num_phys_ports; ++p) { + struct lan966x_port *port = lan966x->ports[p]; + + if (!port || !port->bond) + continue; + + if (port->hash_type != hash_type) + return false; + } + + return true; +} + +int lan966x_lag_port_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct netdev_lag_upper_info *lui; + struct netlink_ext_ack *extack; + + extack = netdev_notifier_info_to_extack(&info->info); + lui = info->upper_info; + if (!lui) { + port->hash_type = NETDEV_LAG_HASH_NONE; + return NOTIFY_DONE; + } + + if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + NL_SET_ERR_MSG_MOD(extack, + "LAG device using unsupported Tx type"); + return -EINVAL; + } + + if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) { + NL_SET_ERR_MSG_MOD(extack, + "LAG devices can have only the same hash_type"); + return -EINVAL; + } + + switch (lui->hash_type) { + case NETDEV_LAG_HASH_L2: + lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) | + ANA_AGGR_CFG_AC_SMAC_ENA_SET(1), + lan966x, ANA_AGGR_CFG); + break; + case NETDEV_LAG_HASH_L34: + lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) | + ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) | + ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1), + lan966x, ANA_AGGR_CFG); + break; + case NETDEV_LAG_HASH_L23: + lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) | + ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) | + ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) | + ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1), + lan966x, ANA_AGGR_CFG); + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "LAG device using unsupported hash type"); + return -EINVAL; + } + + port->hash_type = lui->hash_type; + + return NOTIFY_OK; +} + +int lan966x_lag_port_changelowerstate(struct net_device *dev, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag = info->lower_state_info; + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + bool is_active; + + if (!port->bond) + return NOTIFY_DONE; + + is_active = lag->link_up && lag->tx_enabled; + if (port->lag_tx_active == is_active) + return NOTIFY_DONE; + + port->lag_tx_active = is_active; + lan966x_lag_set_aggr_pgids(lan966x); + + return NOTIFY_OK; +} + +int lan966x_lag_netdev_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port; + struct net_device *lower; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(dev, lower, iter) { + if (!lan966x_netdevice_check(lower)) + continue; + + port = netdev_priv(lower); + if (port->bond != dev) + continue; + + err = lan966x_port_prechangeupper(lower, dev, info); + if (err) + return err; + } + + return NOTIFY_DONE; +} + +int lan966x_lag_netdev_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port; + struct net_device *lower; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(dev, lower, iter) { + if (!lan966x_netdevice_check(lower)) + continue; + + port = netdev_priv(lower); + if (port->bond != dev) + continue; + + err = lan966x_port_changeupper(lower, dev, info); + if (err) + return err; + } + + return NOTIFY_DONE; +} + +bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + unsigned long bond_mask; + + if (port->bond != lag) + return false; + + bond_mask = lan966x_lag_get_mask(lan966x, lag); + if (bond_mask && port->chip_port == __ffs(bond_mask)) + return true; + + return false; +} + +u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond) +{ + struct lan966x_port *port; + u32 mask = 0; + int p; + + if (!bond) + return mask; + + for (p = 0; p < lan966x->num_phys_ports; p++) { + port = lan966x->ports[p]; + if (!port) + continue; + + if (port->bond == bond) + mask |= BIT(p); + } + + return mask; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c new file mode 100644 index 000000000..baa3a30c0 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/switchdev.h> +#include "lan966x_main.h" + +#define LAN966X_MAC_COLUMNS 4 +#define MACACCESS_CMD_IDLE 0 +#define MACACCESS_CMD_LEARN 1 +#define MACACCESS_CMD_FORGET 2 +#define MACACCESS_CMD_AGE 3 +#define MACACCESS_CMD_GET_NEXT 4 +#define MACACCESS_CMD_INIT 5 +#define MACACCESS_CMD_READ 6 +#define MACACCESS_CMD_WRITE 7 +#define MACACCESS_CMD_SYNC_GET_NEXT 8 + +#define LAN966X_MAC_INVALID_ROW -1 + +struct lan966x_mac_entry { + struct list_head list; + unsigned char mac[ETH_ALEN] __aligned(2); + u16 vid; + u16 port_index; + int row; + bool lag; +}; + +struct lan966x_mac_raw_entry { + u32 mach; + u32 macl; + u32 maca; + bool processed; +}; + +static int lan966x_mac_get_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, ANA_MACACCESS); +} + +static int lan966x_mac_wait_for_completion(struct lan966x *lan966x) +{ + u32 val; + + return readx_poll_timeout_atomic(lan966x_mac_get_status, + lan966x, val, + (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) == + MACACCESS_CMD_IDLE, + TABLE_UPDATE_SLEEP_US, + TABLE_UPDATE_TIMEOUT_US); +} + +static void lan966x_mac_select(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid) +{ + u32 macl = 0, mach = 0; + + /* Set the MAC address to handle and the vlan associated in a format + * understood by the hardware. + */ + mach |= vid << 16; + mach |= mac[0] << 8; + mach |= mac[1] << 0; + macl |= mac[2] << 24; + macl |= mac[3] << 16; + macl |= mac[4] << 8; + macl |= mac[5] << 0; + + lan_wr(macl, lan966x, ANA_MACLDATA); + lan_wr(mach, lan966x, ANA_MACHDATA); +} + +static int __lan966x_mac_learn_locked(struct lan966x *lan966x, int pgid, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + lockdep_assert_held(&lan966x->mac_lock); + + lan966x_mac_select(lan966x, mac, vid); + + /* Issue a write command */ + lan_wr(ANA_MACACCESS_VALID_SET(1) | + ANA_MACACCESS_CHANGE2SW_SET(0) | + ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) | + ANA_MACACCESS_DEST_IDX_SET(pgid) | + ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN), + lan966x, ANA_MACACCESS); + + return lan966x_mac_wait_for_completion(lan966x); +} + +static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + int ret; + + spin_lock(&lan966x->mac_lock); + ret = __lan966x_mac_learn_locked(lan966x, pgid, cpu_copy, mac, vid, type); + spin_unlock(&lan966x->mac_lock); + + return ret; +} + +/* The mask of the front ports is encoded inside the mac parameter via a call + * to lan966x_mdb_encode_mac(). + */ +int lan966x_mac_ip_learn(struct lan966x *lan966x, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + WARN_ON(type != ENTRYTYPE_MACV4 && type != ENTRYTYPE_MACV6); + + return __lan966x_mac_learn(lan966x, 0, cpu_copy, mac, vid, type); +} + +int lan966x_mac_learn(struct lan966x *lan966x, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED); + + return __lan966x_mac_learn(lan966x, port, false, mac, vid, type); +} + +static int lan966x_mac_learn_locked(struct lan966x *lan966x, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED); + + return __lan966x_mac_learn_locked(lan966x, port, false, mac, vid, type); +} + +static int lan966x_mac_forget_locked(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + lockdep_assert_held(&lan966x->mac_lock); + + lan966x_mac_select(lan966x, mac, vid); + + /* Issue a forget command */ + lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET), + lan966x, ANA_MACACCESS); + + return lan966x_mac_wait_for_completion(lan966x); +} + +int lan966x_mac_forget(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + int ret; + + spin_lock(&lan966x->mac_lock); + ret = lan966x_mac_forget_locked(lan966x, mac, vid, type); + spin_unlock(&lan966x->mac_lock); + + return ret; +} + +int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid) +{ + return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED); +} + +int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid) +{ + return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED); +} + +void lan966x_mac_set_ageing(struct lan966x *lan966x, + u32 ageing) +{ + lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2), + ANA_AUTOAGE_AGE_PERIOD, + lan966x, ANA_AUTOAGE); +} + +void lan966x_mac_init(struct lan966x *lan966x) +{ + /* Clear the MAC table */ + lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS); + lan966x_mac_wait_for_completion(lan966x); + + spin_lock_init(&lan966x->mac_lock); + INIT_LIST_HEAD(&lan966x->mac_entries); +} + +static struct lan966x_mac_entry *lan966x_mac_alloc_entry(struct lan966x_port *port, + const unsigned char *mac, + u16 vid) +{ + struct lan966x_mac_entry *mac_entry; + + mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC); + if (!mac_entry) + return NULL; + + memcpy(mac_entry->mac, mac, ETH_ALEN); + mac_entry->vid = vid; + mac_entry->port_index = port->chip_port; + mac_entry->row = LAN966X_MAC_INVALID_ROW; + mac_entry->lag = port->bond ? true : false; + return mac_entry; +} + +static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x, + const unsigned char *mac, + u16 vid, u16 port_index) +{ + struct lan966x_mac_entry *res = NULL; + struct lan966x_mac_entry *mac_entry; + + list_for_each_entry(mac_entry, &lan966x->mac_entries, list) { + if (mac_entry->vid == vid && + ether_addr_equal(mac, mac_entry->mac) && + mac_entry->port_index == port_index) { + res = mac_entry; + break; + } + } + + return res; +} + +static int lan966x_mac_lookup(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type type) +{ + int ret; + + lan966x_mac_select(lan966x, mac, vid); + + /* Issue a read command */ + lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_VALID_SET(1) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ), + lan966x, ANA_MACACCESS); + + ret = lan966x_mac_wait_for_completion(lan966x); + if (ret) + return ret; + + return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS)); +} + +static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type, + const char *mac, u16 vid, + struct net_device *dev) +{ + struct switchdev_notifier_fdb_info info = { 0 }; + + info.addr = mac; + info.vid = vid; + info.offloaded = true; + call_switchdev_notifiers(type, dev, &info.info, NULL); +} + +int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port, + const unsigned char *addr, u16 vid) +{ + struct lan966x_mac_entry *mac_entry; + + spin_lock(&lan966x->mac_lock); + if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) { + spin_unlock(&lan966x->mac_lock); + return 0; + } + + /* In case the entry already exists, don't add it again to SW, + * just update HW, but we need to look in the actual HW because + * it is possible for an entry to be learn by HW and before we + * get the interrupt the frame will reach CPU and the CPU will + * add the entry but without the extern_learn flag. + */ + mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port); + if (mac_entry) { + spin_unlock(&lan966x->mac_lock); + goto mac_learn; + } + + mac_entry = lan966x_mac_alloc_entry(port, addr, vid); + if (!mac_entry) { + spin_unlock(&lan966x->mac_lock); + return -ENOMEM; + } + + list_add_tail(&mac_entry->list, &lan966x->mac_entries); + spin_unlock(&lan966x->mac_lock); + + lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, + port->bond ?: port->dev); + +mac_learn: + lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED); + + return 0; +} + +int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr, + u16 vid) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, + list) { + if (mac_entry->vid == vid && + ether_addr_equal(addr, mac_entry->mac)) { + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, + ENTRYTYPE_LOCKED); + + list_del(&mac_entry->list); + kfree(mac_entry); + } + } + spin_unlock(&lan966x->mac_lock); + + return 0; +} + +void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x, + struct lan966x_port *src, + struct lan966x_port *dst) +{ + struct lan966x_mac_entry *mac_entry; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry(mac_entry, &lan966x->mac_entries, list) { + if (mac_entry->port_index == src->chip_port && + mac_entry->lag) { + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, + ENTRYTYPE_LOCKED); + + lan966x_mac_learn_locked(lan966x, dst->chip_port, + mac_entry->mac, mac_entry->vid, + ENTRYTYPE_LOCKED); + mac_entry->port_index = dst->chip_port; + } + } + spin_unlock(&lan966x->mac_lock); +} + +void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x, + struct lan966x_port *src) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, + list) { + if (mac_entry->port_index == src->chip_port && + mac_entry->lag) { + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, + ENTRYTYPE_LOCKED); + + list_del(&mac_entry->list); + kfree(mac_entry); + } + } + spin_unlock(&lan966x->mac_lock); +} + +void lan966x_mac_purge_entries(struct lan966x *lan966x) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, + list) { + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, ENTRYTYPE_LOCKED); + + list_del(&mac_entry->list); + kfree(mac_entry); + } + spin_unlock(&lan966x->mac_lock); +} + +static void lan966x_mac_notifiers(enum switchdev_notifier_type type, + unsigned char *mac, u32 vid, + struct net_device *dev) +{ + rtnl_lock(); + lan966x_fdb_call_notifiers(type, mac, vid, dev); + rtnl_unlock(); +} + +static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry, + u8 *mac, u16 *vid, u32 *dest_idx) +{ + mac[0] = (raw_entry->mach >> 8) & 0xff; + mac[1] = (raw_entry->mach >> 0) & 0xff; + mac[2] = (raw_entry->macl >> 24) & 0xff; + mac[3] = (raw_entry->macl >> 16) & 0xff; + mac[4] = (raw_entry->macl >> 8) & 0xff; + mac[5] = (raw_entry->macl >> 0) & 0xff; + + *vid = (raw_entry->mach >> 16) & 0xfff; + *dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca); +} + +static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, + struct lan966x_mac_raw_entry *raw_entries) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + unsigned char mac[ETH_ALEN] __aligned(2); + struct list_head mac_deleted_entries; + struct lan966x_port *port; + u32 dest_idx; + u32 column; + u16 vid; + + INIT_LIST_HEAD(&mac_deleted_entries); + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) { + bool found = false; + + if (mac_entry->row != row) + continue; + + for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) { + /* All the valid entries are at the start of the row, + * so when get one invalid entry it can just skip the + * rest of the columns + */ + if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca)) + break; + + lan966x_mac_process_raw_entry(&raw_entries[column], + mac, &vid, &dest_idx); + if (WARN_ON(dest_idx >= lan966x->num_phys_ports)) + continue; + + /* If the entry in SW is found, then there is nothing + * to do + */ + if (mac_entry->vid == vid && + ether_addr_equal(mac_entry->mac, mac) && + mac_entry->port_index == dest_idx) { + raw_entries[column].processed = true; + found = true; + break; + } + } + + if (!found) { + list_del(&mac_entry->list); + /* Move the entry from SW list to a tmp list such that + * it would be deleted later + */ + list_add_tail(&mac_entry->list, &mac_deleted_entries); + } + } + spin_unlock(&lan966x->mac_lock); + + list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) { + /* Notify the bridge that the entry doesn't exist + * anymore in the HW + */ + port = lan966x->ports[mac_entry->port_index]; + lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, + mac_entry->mac, mac_entry->vid, + port->bond ?: port->dev); + list_del(&mac_entry->list); + kfree(mac_entry); + } + + /* Now go to the list of columns and see if any entry was not in the SW + * list, then that means that the entry is new so it needs to notify the + * bridge. + */ + for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) { + /* All the valid entries are at the start of the row, so when + * get one invalid entry it can just skip the rest of the columns + */ + if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca)) + break; + + /* If the entry already exists then don't do anything */ + if (raw_entries[column].processed) + continue; + + lan966x_mac_process_raw_entry(&raw_entries[column], + mac, &vid, &dest_idx); + if (WARN_ON(dest_idx >= lan966x->num_phys_ports)) + continue; + + spin_lock(&lan966x->mac_lock); + mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx); + if (mac_entry) { + spin_unlock(&lan966x->mac_lock); + continue; + } + + port = lan966x->ports[dest_idx]; + mac_entry = lan966x_mac_alloc_entry(port, mac, vid); + if (!mac_entry) { + spin_unlock(&lan966x->mac_lock); + return; + } + + mac_entry->row = row; + list_add_tail(&mac_entry->list, &lan966x->mac_entries); + spin_unlock(&lan966x->mac_lock); + + lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, + mac, vid, port->bond ?: port->dev); + } +} + +irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x) +{ + struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 }; + u32 index, column; + bool stop = true; + u32 val; + + /* Start the scan from 0, 0 */ + lan_wr(ANA_MACTINDX_M_INDEX_SET(0) | + ANA_MACTINDX_BUCKET_SET(0), + lan966x, ANA_MACTINDX); + + while (1) { + spin_lock(&lan966x->mac_lock); + lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT), + ANA_MACACCESS_MAC_TABLE_CMD, + lan966x, ANA_MACACCESS); + lan966x_mac_wait_for_completion(lan966x); + + val = lan_rd(lan966x, ANA_MACTINDX); + index = ANA_MACTINDX_M_INDEX_GET(val); + column = ANA_MACTINDX_BUCKET_GET(val); + + /* The SYNC-GET-NEXT returns all the entries(4) in a row in + * which is suffered a change. By change it means that new entry + * was added or an entry was removed because of ageing. + * It would return all the columns for that row. And after that + * it would return the next row The stop conditions of the + * SYNC-GET-NEXT is when it reaches 'directly' to row 0 + * column 3. So if SYNC-GET-NEXT returns row 0 and column 0 + * then it is required to continue to read more even if it + * reaches row 0 and column 3. + */ + if (index == 0 && column == 0) + stop = false; + + if (column == LAN966X_MAC_COLUMNS - 1 && + index == 0 && stop) { + spin_unlock(&lan966x->mac_lock); + break; + } + + entry[column].mach = lan_rd(lan966x, ANA_MACHDATA); + entry[column].macl = lan_rd(lan966x, ANA_MACLDATA); + entry[column].maca = lan_rd(lan966x, ANA_MACACCESS); + spin_unlock(&lan966x->mac_lock); + + /* Once all the columns are read process them */ + if (column == LAN966X_MAC_COLUMNS - 1) { + lan966x_mac_irq_process(lan966x, index, entry); + /* A row was processed so it is safe to assume that the + * next row/column can be the stop condition + */ + stop = true; + } + } + + lan_rmw(ANA_ANAINTR_INTR_SET(0), + ANA_ANAINTR_INTR, + lan966x, ANA_ANAINTR); + + return IRQ_HANDLED; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c new file mode 100644 index 000000000..9ce46588a --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -0,0 +1,1244 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/module.h> +#include <linux/if_bridge.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/ip.h> +#include <linux/of_platform.h> +#include <linux/of_net.h> +#include <linux/packing.h> +#include <linux/phy/phy.h> +#include <linux/reset.h> +#include <net/addrconf.h> + +#include "lan966x_main.h" + +#define XTR_EOF_0 0x00000080U +#define XTR_EOF_1 0x01000080U +#define XTR_EOF_2 0x02000080U +#define XTR_EOF_3 0x03000080U +#define XTR_PRUNED 0x04000080U +#define XTR_ABORT 0x05000080U +#define XTR_ESCAPE 0x06000080U +#define XTR_NOT_READY 0x07000080U +#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3)) + +#define IO_RANGES 2 + +static const struct of_device_id lan966x_match[] = { + { .compatible = "microchip,lan966x-switch" }, + { } +}; +MODULE_DEVICE_TABLE(of, lan966x_match); + +struct lan966x_main_io_resource { + enum lan966x_target id; + phys_addr_t offset; + int range; +}; + +static const struct lan966x_main_io_resource lan966x_main_iomap[] = { + { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */ + { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */ + { TARGET_ORG, 0, 1 }, /* 0xe2000000 */ + { TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */ + { TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */ + { TARGET_PTP, 0xc000, 1 }, /* 0xe200c000 */ + { TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */ + { TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */ + { TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */ + { TARGET_DEV, 0x34000, 1 }, /* 0xe2034000 */ + { TARGET_DEV + 1, 0x38000, 1 }, /* 0xe2038000 */ + { TARGET_DEV + 2, 0x3c000, 1 }, /* 0xe203c000 */ + { TARGET_DEV + 3, 0x40000, 1 }, /* 0xe2040000 */ + { TARGET_DEV + 4, 0x44000, 1 }, /* 0xe2044000 */ + { TARGET_DEV + 5, 0x48000, 1 }, /* 0xe2048000 */ + { TARGET_DEV + 6, 0x4c000, 1 }, /* 0xe204c000 */ + { TARGET_DEV + 7, 0x50000, 1 }, /* 0xe2050000 */ + { TARGET_QSYS, 0x100000, 1 }, /* 0xe2100000 */ + { TARGET_AFI, 0x120000, 1 }, /* 0xe2120000 */ + { TARGET_ANA, 0x140000, 1 }, /* 0xe2140000 */ +}; + +static int lan966x_create_targets(struct platform_device *pdev, + struct lan966x *lan966x) +{ + struct resource *iores[IO_RANGES]; + void __iomem *begin[IO_RANGES]; + int idx; + + /* Initially map the entire range and after that update each target to + * point inside the region at the correct offset. It is possible that + * other devices access the same region so don't add any checks about + * this. + */ + for (idx = 0; idx < IO_RANGES; idx++) { + iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM, + idx); + if (!iores[idx]) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + + begin[idx] = devm_ioremap(&pdev->dev, + iores[idx]->start, + resource_size(iores[idx])); + if (!begin[idx]) { + dev_err(&pdev->dev, "Unable to get registers: %s\n", + iores[idx]->name); + return -ENOMEM; + } + } + + for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) { + const struct lan966x_main_io_resource *iomap = + &lan966x_main_iomap[idx]; + + lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset; + } + + return 0; +} + +static bool lan966x_port_unique_address(struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int p; + + for (p = 0; p < lan966x->num_phys_ports; ++p) { + port = lan966x->ports[p]; + if (!port || port->dev == dev) + continue; + + if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr)) + return false; + } + + return true; +} + +static int lan966x_port_set_mac_address(struct net_device *dev, void *p) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + const struct sockaddr *addr = p; + int ret; + + if (ether_addr_equal(addr->sa_data, dev->dev_addr)) + return 0; + + /* Learn the new net device MAC address in the mac table. */ + ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID); + if (ret) + return ret; + + /* If there is another port with the same address as the dev, then don't + * delete it from the MAC table + */ + if (!lan966x_port_unique_address(dev)) + goto out; + + /* Then forget the previous one. */ + ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID); + if (ret) + return ret; + +out: + eth_hw_addr_set(dev, addr->sa_data); + return ret; +} + +static int lan966x_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct lan966x_port *port = netdev_priv(dev); + int ret; + + ret = snprintf(buf, len, "p%d", port->chip_port); + if (ret >= len) + return -EINVAL; + + return 0; +} + +static int lan966x_port_open(struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int err; + + /* Enable receiving frames on the port, and activate auto-learning of + * MAC addresses. + */ + lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) | + ANA_PORT_CFG_RECV_ENA_SET(1) | + ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port), + ANA_PORT_CFG_LEARNAUTO | + ANA_PORT_CFG_RECV_ENA | + ANA_PORT_CFG_PORTID_VAL, + lan966x, ANA_PORT_CFG(port->chip_port)); + + err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0); + if (err) { + netdev_err(dev, "Could not attach to PHY\n"); + return err; + } + + phylink_start(port->phylink); + + return 0; +} + +static int lan966x_port_stop(struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + + lan966x_port_config_down(port); + phylink_stop(port->phylink); + phylink_disconnect_phy(port->phylink); + + return 0; +} + +static int lan966x_port_inj_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, QS_INJ_STATUS); +} + +static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp) +{ + u32 val; + + if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp))) + return 0; + + return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val, + QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp), + READL_SLEEP_US, READL_TIMEOUT_US); +} + +static int lan966x_port_ifh_xmit(struct sk_buff *skb, + __be32 *ifh, + struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 i, count, last; + u8 grp = 0; + u32 val; + int err; + + val = lan_rd(lan966x, QS_INJ_STATUS); + if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) || + (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp))) + goto err; + + /* Write start of frame */ + lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | + QS_INJ_CTRL_SOF_SET(1), + lan966x, QS_INJ_CTRL(grp)); + + /* Write IFH header */ + for (i = 0; i < IFH_LEN; ++i) { + /* Wait until the fifo is ready */ + err = lan966x_port_inj_ready(lan966x, grp); + if (err) + goto err; + + lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp)); + } + + /* Write frame */ + count = DIV_ROUND_UP(skb->len, 4); + last = skb->len % 4; + for (i = 0; i < count; ++i) { + /* Wait until the fifo is ready */ + err = lan966x_port_inj_ready(lan966x, grp); + if (err) + goto err; + + lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp)); + } + + /* Add padding */ + while (i < (LAN966X_BUFFER_MIN_SZ / 4)) { + /* Wait until the fifo is ready */ + err = lan966x_port_inj_ready(lan966x, grp); + if (err) + goto err; + + lan_wr(0, lan966x, QS_INJ_WR(grp)); + ++i; + } + + /* Inidcate EOF and valid bytes in the last word */ + lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | + QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ? + 0 : last) | + QS_INJ_CTRL_EOF_SET(1), + lan966x, QS_INJ_CTRL(grp)); + + /* Add dummy CRC */ + lan_wr(0, lan966x, QS_INJ_WR(grp)); + skb_tx_timestamp(skb); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + return NETDEV_TX_OK; + + dev_consume_skb_any(skb); + return NETDEV_TX_OK; + +err: + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + lan966x_ptp_txtstamp_release(port, skb); + + return NETDEV_TX_BUSY; +} + +static void lan966x_ifh_set_bypass(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1, + IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_port(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1, + IFH_POS_DSTS, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_qos_class(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_QOS_CLASS + IFH_WID_QOS_CLASS - 1, + IFH_POS_QOS_CLASS, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_ipv(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_IPV + IFH_WID_IPV - 1, + IFH_POS_IPV, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_vid(void *ifh, u64 vid) +{ + packing(ifh, &vid, IFH_POS_TCI + IFH_WID_TCI - 1, + IFH_POS_TCI, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op) +{ + packing(ifh, &rew_op, IFH_POS_REW_CMD + IFH_WID_REW_CMD - 1, + IFH_POS_REW_CMD, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp) +{ + packing(ifh, ×tamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1, + IFH_POS_TIMESTAMP, IFH_LEN * 4, PACK, 0); +} + +static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + __be32 ifh[IFH_LEN]; + int err; + + memset(ifh, 0x0, sizeof(__be32) * IFH_LEN); + + lan966x_ifh_set_bypass(ifh, 1); + lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port)); + lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority); + lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority); + lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb)); + + if (port->lan966x->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + err = lan966x_ptp_txtstamp_request(port, skb); + if (err) + return err; + + lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op); + lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id); + } + + spin_lock(&lan966x->tx_lock); + if (port->lan966x->fdma) + err = lan966x_fdma_xmit(skb, ifh, dev); + else + err = lan966x_port_ifh_xmit(skb, ifh, dev); + spin_unlock(&lan966x->tx_lock); + + return err; +} + +static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int old_mtu = dev->mtu; + int err; + + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)), + lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); + dev->mtu = new_mtu; + + if (!lan966x->fdma) + return 0; + + err = lan966x_fdma_change_mtu(lan966x); + if (err) { + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(old_mtu)), + lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); + dev->mtu = old_mtu; + } + + return err; +} + +static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED); +} + +static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID); +} + +static void lan966x_port_set_rx_mode(struct net_device *dev) +{ + __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync); +} + +static int lan966x_port_get_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + ppid->id_len = sizeof(lan966x->base_mac); + memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len); + + return 0; +} + +static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct lan966x_port *port = netdev_priv(dev); + + if (!phy_has_hwtstamp(dev->phydev) && port->lan966x->ptp) { + switch (cmd) { + case SIOCSHWTSTAMP: + return lan966x_ptp_hwtstamp_set(port, ifr); + case SIOCGHWTSTAMP: + return lan966x_ptp_hwtstamp_get(port, ifr); + } + } + + if (!dev->phydev) + return -ENODEV; + + return phy_mii_ioctl(dev->phydev, ifr, cmd); +} + +static const struct net_device_ops lan966x_port_netdev_ops = { + .ndo_open = lan966x_port_open, + .ndo_stop = lan966x_port_stop, + .ndo_start_xmit = lan966x_port_xmit, + .ndo_change_mtu = lan966x_port_change_mtu, + .ndo_set_rx_mode = lan966x_port_set_rx_mode, + .ndo_get_phys_port_name = lan966x_port_get_phys_port_name, + .ndo_get_stats64 = lan966x_stats_get, + .ndo_set_mac_address = lan966x_port_set_mac_address, + .ndo_get_port_parent_id = lan966x_port_get_parent_id, + .ndo_eth_ioctl = lan966x_port_ioctl, + .ndo_setup_tc = lan966x_tc_setup, +}; + +bool lan966x_netdevice_check(const struct net_device *dev) +{ + return dev->netdev_ops == &lan966x_port_netdev_ops; +} + +bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb) +{ + u32 val; + + /* The IGMP and MLD frames are not forward by the HW if + * multicast snooping is enabled, therefor don't mark as + * offload to allow the SW to forward the frames accordingly. + */ + val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port)); + if (!(val & (ANA_CPU_FWD_CFG_IGMP_REDIR_ENA | + ANA_CPU_FWD_CFG_MLD_REDIR_ENA))) + return true; + + if (eth_type_vlan(skb->protocol)) { + skb = skb_vlan_untag(skb); + if (unlikely(!skb)) + return false; + } + + if (skb->protocol == htons(ETH_P_IP) && + ip_hdr(skb)->protocol == IPPROTO_IGMP) + return false; + + if (IS_ENABLED(CONFIG_IPV6) && + skb->protocol == htons(ETH_P_IPV6) && + ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) && + !ipv6_mc_check_mld(skb)) + return false; + + return true; +} + +static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp) +{ + return lan_rd(lan966x, QS_XTR_RD(grp)); +} + +static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp) +{ + u32 val; + + return read_poll_timeout(lan966x_port_xtr_status, val, + val != XTR_NOT_READY, + READL_SLEEP_US, READL_TIMEOUT_US, false, + lan966x, grp); +} + +static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval) +{ + u32 bytes_valid; + u32 val; + int err; + + val = lan_rd(lan966x, QS_XTR_RD(grp)); + if (val == XTR_NOT_READY) { + err = lan966x_port_xtr_ready(lan966x, grp); + if (err) + return -EIO; + } + + switch (val) { + case XTR_ABORT: + return -EIO; + case XTR_EOF_0: + case XTR_EOF_1: + case XTR_EOF_2: + case XTR_EOF_3: + case XTR_PRUNED: + bytes_valid = XTR_VALID_BYTES(val); + val = lan_rd(lan966x, QS_XTR_RD(grp)); + if (val == XTR_ESCAPE) + *rval = lan_rd(lan966x, QS_XTR_RD(grp)); + else + *rval = val; + + return bytes_valid; + case XTR_ESCAPE: + *rval = lan_rd(lan966x, QS_XTR_RD(grp)); + + return 4; + default: + *rval = val; + + return 4; + } +} + +void lan966x_ifh_get_src_port(void *ifh, u64 *src_port) +{ + packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1, + IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0); +} + +static void lan966x_ifh_get_len(void *ifh, u64 *len) +{ + packing(ifh, len, IFH_POS_LEN + IFH_WID_LEN - 1, + IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0); +} + +void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp) +{ + packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1, + IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0); +} + +static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + int i, grp = 0, err = 0; + + if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp))) + return IRQ_NONE; + + do { + u64 src_port, len, timestamp; + struct net_device *dev; + struct sk_buff *skb; + int sz = 0, buf_len; + u32 ifh[IFH_LEN]; + u32 *buf; + u32 val; + + for (i = 0; i < IFH_LEN; i++) { + err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]); + if (err != 4) + goto recover; + } + + err = 0; + + lan966x_ifh_get_src_port(ifh, &src_port); + lan966x_ifh_get_len(ifh, &len); + lan966x_ifh_get_timestamp(ifh, ×tamp); + + WARN_ON(src_port >= lan966x->num_phys_ports); + + dev = lan966x->ports[src_port]->dev; + skb = netdev_alloc_skb(dev, len); + if (unlikely(!skb)) { + netdev_err(dev, "Unable to allocate sk_buff\n"); + err = -ENOMEM; + break; + } + buf_len = len - ETH_FCS_LEN; + buf = (u32 *)skb_put(skb, buf_len); + + len = 0; + do { + sz = lan966x_rx_frame_word(lan966x, grp, &val); + if (sz < 0) { + kfree_skb(skb); + goto recover; + } + + *buf++ = val; + len += sz; + } while (len < buf_len); + + /* Read the FCS */ + sz = lan966x_rx_frame_word(lan966x, grp, &val); + if (sz < 0) { + kfree_skb(skb); + goto recover; + } + + /* Update the statistics if part of the FCS was read before */ + len -= ETH_FCS_LEN - sz; + + if (unlikely(dev->features & NETIF_F_RXFCS)) { + buf = (u32 *)skb_put(skb, ETH_FCS_LEN); + *buf = val; + } + + lan966x_ptp_rxtstamp(lan966x, skb, timestamp); + skb->protocol = eth_type_trans(skb, dev); + + if (lan966x->bridge_mask & BIT(src_port)) { + skb->offload_fwd_mark = 1; + + skb_reset_network_header(skb); + if (!lan966x_hw_offload(lan966x, src_port, skb)) + skb->offload_fwd_mark = 0; + } + + if (!skb_defer_rx_timestamp(skb)) + netif_rx(skb); + + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; + +recover: + if (sz < 0 || err) + lan_rd(lan966x, QS_XTR_RD(grp)); + + } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)); + + return IRQ_HANDLED; +} + +static irqreturn_t lan966x_ana_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + + return lan966x_mac_irq_handler(lan966x); +} + +static void lan966x_cleanup_ports(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int p; + + for (p = 0; p < lan966x->num_phys_ports; p++) { + port = lan966x->ports[p]; + if (!port) + continue; + + if (port->dev) + unregister_netdev(port->dev); + + if (lan966x->fdma && lan966x->fdma_ndev == port->dev) + lan966x_fdma_netdev_deinit(lan966x, port->dev); + + if (port->phylink) { + rtnl_lock(); + lan966x_port_stop(port->dev); + rtnl_unlock(); + phylink_destroy(port->phylink); + port->phylink = NULL; + } + + if (port->fwnode) + fwnode_handle_put(port->fwnode); + } + + disable_irq(lan966x->xtr_irq); + lan966x->xtr_irq = -ENXIO; + + if (lan966x->ana_irq > 0) { + disable_irq(lan966x->ana_irq); + lan966x->ana_irq = -ENXIO; + } + + if (lan966x->fdma) + devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x); + + if (lan966x->ptp_irq > 0) + devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x); + + if (lan966x->ptp_ext_irq > 0) + devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x); +} + +static int lan966x_probe_port(struct lan966x *lan966x, u32 p, + phy_interface_t phy_mode, + struct fwnode_handle *portnp) +{ + struct lan966x_port *port; + struct phylink *phylink; + struct net_device *dev; + int err; + + if (p >= lan966x->num_phys_ports) + return -EINVAL; + + dev = devm_alloc_etherdev_mqs(lan966x->dev, + sizeof(struct lan966x_port), + NUM_PRIO_QUEUES, 1); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, lan966x->dev); + port = netdev_priv(dev); + port->dev = dev; + port->lan966x = lan966x; + port->chip_port = p; + lan966x->ports[p] = port; + + dev->max_mtu = ETH_MAX_MTU; + + dev->netdev_ops = &lan966x_port_netdev_ops; + dev->ethtool_ops = &lan966x_ethtool_ops; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_TC; + dev->hw_features |= NETIF_F_HW_TC; + dev->needed_headroom = IFH_LEN * sizeof(u32); + + eth_hw_addr_gen(dev, lan966x->base_mac, p + 1); + + lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID, + ENTRYTYPE_LOCKED); + + port->phylink_config.dev = &port->dev->dev; + port->phylink_config.type = PHYLINK_NETDEV; + port->phylink_pcs.poll = true; + port->phylink_pcs.ops = &lan966x_phylink_pcs_ops; + + port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; + + phy_interface_set_rgmii(port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_MII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_QSGMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_QUSGMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + port->phylink_config.supported_interfaces); + + phylink = phylink_create(&port->phylink_config, + portnp, + phy_mode, + &lan966x_phylink_mac_ops); + if (IS_ERR(phylink)) { + port->dev = NULL; + return PTR_ERR(phylink); + } + + port->phylink = phylink; + + err = register_netdev(dev); + if (err) { + dev_err(lan966x->dev, "register_netdev failed\n"); + return err; + } + + lan966x_vlan_port_set_vlan_aware(port, 0); + lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); + lan966x_vlan_port_apply(port); + + return 0; +} + +static void lan966x_init(struct lan966x *lan966x) +{ + u32 p, i; + + /* MAC table initialization */ + lan966x_mac_init(lan966x); + + lan966x_vlan_init(lan966x); + + /* Flush queues */ + lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) | + GENMASK(1, 0), + lan966x, QS_XTR_FLUSH); + + /* Allow to drain */ + mdelay(1); + + /* All Queues normal */ + lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) & + ~(GENMASK(1, 0)), + lan966x, QS_XTR_FLUSH); + + /* Set MAC age time to default value, the entry is aged after + * 2 * AGE_PERIOD + */ + lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ), + lan966x, ANA_AUTOAGE); + + /* Disable learning for frames discarded by VLAN ingress filtering */ + lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1), + ANA_ADVLEARN_VLAN_CHK, + lan966x, ANA_ADVLEARN); + + /* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */ + lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) | + (20000000 / 65), + lan966x, SYS_FRM_AGING); + + /* Map the 8 CPU extraction queues to CPU port */ + lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP); + + /* Do byte-swap and expect status after last data word + * Extraction: Mode: manual extraction) | Byte_swap + */ + lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) | + QS_XTR_GRP_CFG_BYTE_SWAP_SET(1), + lan966x, QS_XTR_GRP_CFG(0)); + + /* Injection: Mode: manual injection | Byte_swap */ + lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) | + QS_INJ_GRP_CFG_BYTE_SWAP_SET(1), + lan966x, QS_INJ_GRP_CFG(0)); + + lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0), + QS_INJ_CTRL_GAP_SIZE, + lan966x, QS_INJ_CTRL(0)); + + /* Enable IFH insertion/parsing on CPU ports */ + lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) | + SYS_PORT_MODE_INCL_XTR_HDR_SET(1), + lan966x, SYS_PORT_MODE(CPU_PORT)); + + /* Setup flooding PGIDs */ + lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) | + ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) | + ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MCIPV6) | + ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC), + lan966x, ANA_FLOODING_IPMC); + + /* There are 8 priorities */ + for (i = 0; i < 8; ++i) + lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) | + ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) | + ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC), + ANA_FLOODING_FLD_MULTICAST | + ANA_FLOODING_FLD_UNICAST | + ANA_FLOODING_FLD_BROADCAST, + lan966x, ANA_FLOODING(i)); + + for (i = 0; i < PGID_ENTRIES; ++i) + /* Set all the entries to obey VLAN_VLAN */ + lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1), + ANA_PGID_CFG_OBEY_VLAN, + lan966x, ANA_PGID_CFG(i)); + + for (p = 0; p < lan966x->num_phys_ports; p++) { + /* Disable bridging by default */ + lan_rmw(ANA_PGID_PGID_SET(0x0), + ANA_PGID_PGID, + lan966x, ANA_PGID(p + PGID_SRC)); + + /* Do not forward BPDU frames to the front ports and copy them + * to CPU + */ + lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p)); + } + + /* Set source buffer size for each priority and each port to 1500 bytes */ + for (i = 0; i <= QSYS_Q_RSRV; ++i) { + lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i)); + lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i)); + } + + /* Enable switching to/from cpu port */ + lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) | + QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) | + QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1), + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + /* Configure and enable the CPU port */ + lan_rmw(ANA_PGID_PGID_SET(0), + ANA_PGID_PGID, + lan966x, ANA_PGID(CPU_PORT)); + lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_CPU)); + + /* Multicast to all other ports */ + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_MC)); + + /* This will be controlled by mrouter ports */ + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_MCIPV4)); + + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_MCIPV6)); + + /* Unicast to all other ports */ + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_UC)); + + /* Broadcast to the CPU port and to other ports */ + lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_BC)); + + lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1), + lan966x, REW_PORT_CFG(CPU_PORT)); + + lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1), + ANA_ANAINTR_INTR_ENA, + lan966x, ANA_ANAINTR); + + spin_lock_init(&lan966x->tx_lock); + + lan966x_taprio_init(lan966x); +} + +static int lan966x_ram_init(struct lan966x *lan966x) +{ + return lan_rd(lan966x, SYS_RAM_INIT); +} + +static int lan966x_reset_switch(struct lan966x *lan966x) +{ + struct reset_control *switch_reset; + int val = 0; + int ret; + + switch_reset = devm_reset_control_get_optional_shared(lan966x->dev, + "switch"); + if (IS_ERR(switch_reset)) + return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset), + "Could not obtain switch reset"); + + reset_control_reset(switch_reset); + + /* Don't reinitialize the switch core, if it is already initialized. In + * case it is initialized twice, some pointers inside the queue system + * in HW will get corrupted and then after a while the queue system gets + * full and no traffic is passing through the switch. The issue is seen + * when loading and unloading the driver and sending traffic through the + * switch. + */ + if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA) + return 0; + + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG); + lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT); + ret = readx_poll_timeout(lan966x_ram_init, lan966x, + val, (val & BIT(1)) == 0, READL_SLEEP_US, + READL_TIMEOUT_US); + if (ret) + return ret; + + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG); + + return 0; +} + +static int lan966x_probe(struct platform_device *pdev) +{ + struct fwnode_handle *ports, *portnp; + struct lan966x *lan966x; + u8 mac_addr[ETH_ALEN]; + int err; + + lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL); + if (!lan966x) + return -ENOMEM; + + platform_set_drvdata(pdev, lan966x); + lan966x->dev = &pdev->dev; + + if (!device_get_mac_address(&pdev->dev, mac_addr)) { + ether_addr_copy(lan966x->base_mac, mac_addr); + } else { + pr_info("MAC addr was not set, use random MAC\n"); + eth_random_addr(lan966x->base_mac); + lan966x->base_mac[5] &= 0xf0; + } + + err = lan966x_create_targets(pdev, lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, + "Failed to create targets"); + + err = lan966x_reset_switch(lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Reset failed"); + + lan966x->num_phys_ports = NUM_PHYS_PORTS; + lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports, + sizeof(struct lan966x_port *), + GFP_KERNEL); + if (!lan966x->ports) + return -ENOMEM; + + /* There QS system has 32KB of memory */ + lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY; + + /* set irq */ + lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr"); + if (lan966x->xtr_irq <= 0) + return -EINVAL; + + err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL, + lan966x_xtr_irq_handler, IRQF_ONESHOT, + "frame extraction", lan966x); + if (err) { + pr_err("Unable to use xtr irq"); + return -ENODEV; + } + + lan966x->ana_irq = platform_get_irq_byname(pdev, "ana"); + if (lan966x->ana_irq > 0) { + err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL, + lan966x_ana_irq_handler, IRQF_ONESHOT, + "ana irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Unable to use ana irq"); + } + + lan966x->ptp_irq = platform_get_irq_byname(pdev, "ptp"); + if (lan966x->ptp_irq > 0) { + err = devm_request_threaded_irq(&pdev->dev, lan966x->ptp_irq, NULL, + lan966x_ptp_irq_handler, IRQF_ONESHOT, + "ptp irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Unable to use ptp irq"); + + lan966x->ptp = 1; + } + + lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma"); + if (lan966x->fdma_irq > 0) { + err = devm_request_irq(&pdev->dev, lan966x->fdma_irq, + lan966x_fdma_irq_handler, 0, + "fdma irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq"); + + lan966x->fdma = true; + } + + if (lan966x->ptp) { + lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext"); + if (lan966x->ptp_ext_irq > 0) { + err = devm_request_threaded_irq(&pdev->dev, + lan966x->ptp_ext_irq, NULL, + lan966x_ptp_ext_irq_handler, + IRQF_ONESHOT, + "ptp-ext irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, + "Unable to use ptp-ext irq"); + } + } + + ports = device_get_named_child_node(&pdev->dev, "ethernet-ports"); + if (!ports) + return dev_err_probe(&pdev->dev, -ENODEV, + "no ethernet-ports child found\n"); + + /* init switch */ + lan966x_init(lan966x); + lan966x_stats_init(lan966x); + + /* go over the child nodes */ + fwnode_for_each_available_child_node(ports, portnp) { + phy_interface_t phy_mode; + struct phy *serdes; + u32 p; + + if (fwnode_property_read_u32(portnp, "reg", &p)) + continue; + + phy_mode = fwnode_get_phy_mode(portnp); + err = lan966x_probe_port(lan966x, p, phy_mode, portnp); + if (err) + goto cleanup_ports; + + /* Read needed configuration */ + lan966x->ports[p]->config.portmode = phy_mode; + lan966x->ports[p]->fwnode = fwnode_handle_get(portnp); + + serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL); + if (PTR_ERR(serdes) == -ENODEV) + serdes = NULL; + if (IS_ERR(serdes)) { + err = PTR_ERR(serdes); + goto cleanup_ports; + } + lan966x->ports[p]->serdes = serdes; + + lan966x_port_init(lan966x->ports[p]); + } + + fwnode_handle_put(ports); + + lan966x_mdb_init(lan966x); + err = lan966x_fdb_init(lan966x); + if (err) + goto cleanup_ports; + + err = lan966x_ptp_init(lan966x); + if (err) + goto cleanup_fdb; + + err = lan966x_fdma_init(lan966x); + if (err) + goto cleanup_ptp; + + return 0; + +cleanup_ptp: + lan966x_ptp_deinit(lan966x); + +cleanup_fdb: + lan966x_fdb_deinit(lan966x); + +cleanup_ports: + fwnode_handle_put(ports); + fwnode_handle_put(portnp); + + lan966x_cleanup_ports(lan966x); + + cancel_delayed_work_sync(&lan966x->stats_work); + destroy_workqueue(lan966x->stats_queue); + mutex_destroy(&lan966x->stats_lock); + + return err; +} + +static int lan966x_remove(struct platform_device *pdev) +{ + struct lan966x *lan966x = platform_get_drvdata(pdev); + + lan966x_taprio_deinit(lan966x); + lan966x_fdma_deinit(lan966x); + lan966x_cleanup_ports(lan966x); + + cancel_delayed_work_sync(&lan966x->stats_work); + destroy_workqueue(lan966x->stats_queue); + mutex_destroy(&lan966x->stats_lock); + + lan966x_mac_purge_entries(lan966x); + lan966x_mdb_deinit(lan966x); + lan966x_fdb_deinit(lan966x); + lan966x_ptp_deinit(lan966x); + + return 0; +} + +static struct platform_driver lan966x_driver = { + .probe = lan966x_probe, + .remove = lan966x_remove, + .driver = { + .name = "lan966x-switch", + .of_match_table = lan966x_match, + }, +}; + +static int __init lan966x_switch_driver_init(void) +{ + int ret; + + lan966x_register_notifier_blocks(); + + ret = platform_driver_register(&lan966x_driver); + if (ret) + goto err; + + return 0; + +err: + lan966x_unregister_notifier_blocks(); + return ret; +} + +static void __exit lan966x_switch_driver_exit(void) +{ + platform_driver_unregister(&lan966x_driver); + lan966x_unregister_notifier_blocks(); +} + +module_init(lan966x_switch_driver_init); +module_exit(lan966x_switch_driver_exit); + +MODULE_DESCRIPTION("Microchip LAN966X switch driver"); +MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h new file mode 100644 index 000000000..4ec33999e --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -0,0 +1,577 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __LAN966X_MAIN_H__ +#define __LAN966X_MAIN_H__ + +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/jiffies.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/ptp_clock_kernel.h> +#include <net/pkt_cls.h> +#include <net/pkt_sched.h> +#include <net/switchdev.h> + +#include "lan966x_regs.h" +#include "lan966x_ifh.h" + +#define TABLE_UPDATE_SLEEP_US 10 +#define TABLE_UPDATE_TIMEOUT_US 100000 + +#define READL_SLEEP_US 10 +#define READL_TIMEOUT_US 100000000 + +#define LAN966X_BUFFER_CELL_SZ 64 +#define LAN966X_BUFFER_MEMORY (160 * 1024) +#define LAN966X_BUFFER_MIN_SZ 60 + +#define LAN966X_HW_MTU(mtu) ((mtu) + ETH_HLEN + ETH_FCS_LEN) + +#define PGID_AGGR 64 +#define PGID_SRC 80 +#define PGID_ENTRIES 89 + +#define UNAWARE_PVID 0 +#define HOST_PVID 4095 + +/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */ +#define QSYS_Q_RSRV 95 + +#define NUM_PHYS_PORTS 8 +#define CPU_PORT 8 +#define NUM_PRIO_QUEUES 8 + +/* Reserved PGIDs */ +#define PGID_CPU (PGID_AGGR - 6) +#define PGID_UC (PGID_AGGR - 5) +#define PGID_BC (PGID_AGGR - 4) +#define PGID_MC (PGID_AGGR - 3) +#define PGID_MCIPV4 (PGID_AGGR - 2) +#define PGID_MCIPV6 (PGID_AGGR - 1) + +/* Non-reserved PGIDs, used for general purpose */ +#define PGID_GP_START (CPU_PORT + 1) +#define PGID_GP_END PGID_CPU + +#define LAN966X_SPEED_NONE 0 +#define LAN966X_SPEED_2500 1 +#define LAN966X_SPEED_1000 1 +#define LAN966X_SPEED_100 2 +#define LAN966X_SPEED_10 3 + +#define LAN966X_PHC_COUNT 3 +#define LAN966X_PHC_PORT 0 +#define LAN966X_PHC_PINS_NUM 7 + +#define IFH_REW_OP_NOOP 0x0 +#define IFH_REW_OP_ONE_STEP_PTP 0x3 +#define IFH_REW_OP_TWO_STEP_PTP 0x4 + +#define FDMA_RX_DCB_MAX_DBS 1 +#define FDMA_TX_DCB_MAX_DBS 1 +#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) + +#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_STATUS_SOF BIT(16) +#define FDMA_DCB_STATUS_EOF BIT(17) +#define FDMA_DCB_STATUS_INTR BIT(18) +#define FDMA_DCB_STATUS_DONE BIT(19) +#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define FDMA_DCB_INVALID_DATA 0x1 + +#define FDMA_XTR_CHANNEL 6 +#define FDMA_INJ_CHANNEL 0 +#define FDMA_DCB_MAX 512 + +#define SE_IDX_QUEUE 0 /* 0-79 : Queue scheduler elements */ +#define SE_IDX_PORT 80 /* 80-89 : Port schedular elements */ + +/* MAC table entry types. + * ENTRYTYPE_NORMAL is subject to aging. + * ENTRYTYPE_LOCKED is not subject to aging. + * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast. + * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast. + */ +enum macaccess_entry_type { + ENTRYTYPE_NORMAL = 0, + ENTRYTYPE_LOCKED, + ENTRYTYPE_MACV4, + ENTRYTYPE_MACV6, +}; + +struct lan966x_port; + +struct lan966x_db { + u64 dataptr; + u64 status; +}; + +struct lan966x_rx_dcb { + u64 nextptr; + u64 info; + struct lan966x_db db[FDMA_RX_DCB_MAX_DBS]; +}; + +struct lan966x_tx_dcb { + u64 nextptr; + u64 info; + struct lan966x_db db[FDMA_TX_DCB_MAX_DBS]; +}; + +struct lan966x_rx { + struct lan966x *lan966x; + + /* Pointer to the array of hardware dcbs. */ + struct lan966x_rx_dcb *dcbs; + + /* Pointer to the last address in the dcbs. */ + struct lan966x_rx_dcb *last_entry; + + /* For each DB, there is a page */ + struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + + /* Represents the db_index, it can have a value between 0 and + * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS + * it means that the DCB can be reused. + */ + int db_index; + + /* Represents the index in the dcbs. It has a value between 0 and + * FDMA_DCB_MAX + */ + int dcb_index; + + /* Represents the dma address to the dcbs array */ + dma_addr_t dma; + + /* Represents the page order that is used to allocate the pages for the + * RX buffers. This value is calculated based on max MTU of the devices. + */ + u8 page_order; + + u8 channel_id; +}; + +struct lan966x_tx_dcb_buf { + struct net_device *dev; + struct sk_buff *skb; + dma_addr_t dma_addr; + bool used; + bool ptp; +}; + +struct lan966x_tx { + struct lan966x *lan966x; + + /* Pointer to the dcb list */ + struct lan966x_tx_dcb *dcbs; + u16 last_in_use; + + /* Represents the DMA address to the first entry of the dcb entries. */ + dma_addr_t dma; + + /* Array of dcbs that are given to the HW */ + struct lan966x_tx_dcb_buf *dcbs_buf; + + u8 channel_id; + + bool activated; +}; + +struct lan966x_stat_layout { + u32 offset; + char name[ETH_GSTRING_LEN]; +}; + +struct lan966x_phc { + struct ptp_clock *clock; + struct ptp_clock_info info; + struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM]; + struct hwtstamp_config hwtstamp_config; + struct lan966x *lan966x; + u8 index; +}; + +struct lan966x_skb_cb { + u8 rew_op; + u16 ts_id; + unsigned long jiffies; +}; + +#define LAN966X_PTP_TIMEOUT msecs_to_jiffies(10) +#define LAN966X_SKB_CB(skb) \ + ((struct lan966x_skb_cb *)((skb)->cb)) + +struct lan966x { + struct device *dev; + + u8 num_phys_ports; + struct lan966x_port **ports; + + void __iomem *regs[NUM_TARGETS]; + + int shared_queue_sz; + + u8 base_mac[ETH_ALEN]; + + spinlock_t tx_lock; /* lock for frame transmition */ + + struct net_device *bridge; + u16 bridge_mask; + u16 bridge_fwd_mask; + + struct list_head mac_entries; + spinlock_t mac_lock; /* lock for mac_entries list */ + + u16 vlan_mask[VLAN_N_VID]; + DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID); + + /* stats */ + const struct lan966x_stat_layout *stats_layout; + u32 num_stats; + + /* workqueue for reading stats */ + struct mutex stats_lock; + u64 *stats; + struct delayed_work stats_work; + struct workqueue_struct *stats_queue; + + /* interrupts */ + int xtr_irq; + int ana_irq; + int ptp_irq; + int fdma_irq; + int ptp_ext_irq; + + /* worqueue for fdb */ + struct workqueue_struct *fdb_work; + struct list_head fdb_entries; + + /* mdb */ + struct list_head mdb_entries; + struct list_head pgid_entries; + + /* ptp */ + bool ptp; + struct lan966x_phc phc[LAN966X_PHC_COUNT]; + spinlock_t ptp_clock_lock; /* lock for phc */ + spinlock_t ptp_ts_id_lock; /* lock for ts_id */ + struct mutex ptp_lock; /* lock for ptp interface state */ + u16 ptp_skbs; + + /* fdma */ + bool fdma; + struct net_device *fdma_ndev; + struct lan966x_rx rx; + struct lan966x_tx tx; + struct napi_struct napi; + + /* Mirror */ + struct lan966x_port *mirror_monitor; + u32 mirror_mask[2]; + u32 mirror_count; +}; + +struct lan966x_port_config { + phy_interface_t portmode; + const unsigned long *advertising; + int speed; + int duplex; + u32 pause; + bool inband; + bool autoneg; +}; + +struct lan966x_port_tc { + bool ingress_shared_block; + unsigned long police_id; + unsigned long ingress_mirror_id; + unsigned long egress_mirror_id; + struct flow_stats police_stat; + struct flow_stats mirror_stat; +}; + +struct lan966x_port { + struct net_device *dev; + struct lan966x *lan966x; + + u8 chip_port; + u16 pvid; + u16 vid; + bool vlan_aware; + + bool learn_ena; + bool mcast_ena; + + struct phylink_config phylink_config; + struct phylink_pcs phylink_pcs; + struct lan966x_port_config config; + struct phylink *phylink; + struct phy *serdes; + struct fwnode_handle *fwnode; + + u8 ptp_cmd; + u16 ts_id; + struct sk_buff_head tx_skbs; + + struct net_device *bond; + bool lag_tx_active; + enum netdev_lag_hash hash_type; + + struct lan966x_port_tc tc; +}; + +extern const struct phylink_mac_ops lan966x_phylink_mac_ops; +extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops; +extern const struct ethtool_ops lan966x_ethtool_ops; +extern struct notifier_block lan966x_switchdev_nb __read_mostly; +extern struct notifier_block lan966x_switchdev_blocking_nb __read_mostly; + +bool lan966x_netdevice_check(const struct net_device *dev); + +void lan966x_register_notifier_blocks(void); +void lan966x_unregister_notifier_blocks(void); + +bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb); + +void lan966x_ifh_get_src_port(void *ifh, u64 *src_port); +void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp); + +void lan966x_stats_get(struct net_device *dev, + struct rtnl_link_stats64 *stats); +int lan966x_stats_init(struct lan966x *lan966x); + +void lan966x_port_config_down(struct lan966x_port *port); +void lan966x_port_config_up(struct lan966x_port *port); +void lan966x_port_status_get(struct lan966x_port *port, + struct phylink_link_state *state); +int lan966x_port_pcs_set(struct lan966x_port *port, + struct lan966x_port_config *config); +void lan966x_port_init(struct lan966x_port *port); + +int lan966x_mac_ip_learn(struct lan966x *lan966x, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type); +int lan966x_mac_learn(struct lan966x *lan966x, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type); +int lan966x_mac_forget(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type); +int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid); +int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid); +void lan966x_mac_init(struct lan966x *lan966x); +void lan966x_mac_set_ageing(struct lan966x *lan966x, + u32 ageing); +int lan966x_mac_del_entry(struct lan966x *lan966x, + const unsigned char *addr, + u16 vid); +int lan966x_mac_add_entry(struct lan966x *lan966x, + struct lan966x_port *port, + const unsigned char *addr, + u16 vid); +void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x, + struct lan966x_port *src, + struct lan966x_port *dst); +void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x, + struct lan966x_port *src); +void lan966x_mac_purge_entries(struct lan966x *lan966x); +irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x); + +void lan966x_vlan_init(struct lan966x *lan966x); +void lan966x_vlan_port_apply(struct lan966x_port *port); +bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid); +void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port, + bool vlan_aware); +int lan966x_vlan_port_set_vid(struct lan966x_port *port, + u16 vid, + bool pvid, + bool untagged); +void lan966x_vlan_port_add_vlan(struct lan966x_port *port, + u16 vid, + bool pvid, + bool untagged); +void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid); +void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid); +void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid); + +void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid); +void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid); +int lan966x_fdb_init(struct lan966x *lan966x); +void lan966x_fdb_deinit(struct lan966x *lan966x); +void lan966x_fdb_flush_workqueue(struct lan966x *lan966x); +int lan966x_handle_fdb(struct net_device *dev, + struct net_device *orig_dev, + unsigned long event, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info); + +void lan966x_mdb_init(struct lan966x *lan966x); +void lan966x_mdb_deinit(struct lan966x *lan966x); +int lan966x_handle_port_mdb_add(struct lan966x_port *port, + const struct switchdev_obj *obj); +int lan966x_handle_port_mdb_del(struct lan966x_port *port, + const struct switchdev_obj *obj); +void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid); +void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid); +void lan966x_mdb_clear_entries(struct lan966x *lan966x); +void lan966x_mdb_restore_entries(struct lan966x *lan966x); + +int lan966x_ptp_init(struct lan966x *lan966x); +void lan966x_ptp_deinit(struct lan966x *lan966x); +int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr); +int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr); +void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb, + u64 timestamp); +int lan966x_ptp_txtstamp_request(struct lan966x_port *port, + struct sk_buff *skb); +void lan966x_ptp_txtstamp_release(struct lan966x_port *port, + struct sk_buff *skb); +irqreturn_t lan966x_ptp_irq_handler(int irq, void *args); +irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args); +u32 lan966x_ptp_get_period_ps(void); +int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); + +int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev); +int lan966x_fdma_change_mtu(struct lan966x *lan966x); +void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev); +void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev); +int lan966x_fdma_init(struct lan966x *lan966x); +void lan966x_fdma_deinit(struct lan966x *lan966x); +irqreturn_t lan966x_fdma_irq_handler(int irq, void *args); + +int lan966x_lag_port_join(struct lan966x_port *port, + struct net_device *brport_dev, + struct net_device *bond, + struct netlink_ext_ack *extack); +void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond); +int lan966x_lag_port_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info); +int lan966x_lag_port_changelowerstate(struct net_device *dev, + struct netdev_notifier_changelowerstate_info *info); +int lan966x_lag_netdev_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info); +int lan966x_lag_netdev_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info); +bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev); +u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond); + +int lan966x_port_changeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info); +int lan966x_port_prechangeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info); +void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state); +void lan966x_port_ageing_set(struct lan966x_port *port, + unsigned long ageing_clock_t); +void lan966x_update_fwd_mask(struct lan966x *lan966x); + +int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type, + void *type_data); + +int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc); +int lan966x_mqprio_del(struct lan966x_port *port); + +void lan966x_taprio_init(struct lan966x *lan966x); +void lan966x_taprio_deinit(struct lan966x *lan966x); +int lan966x_taprio_add(struct lan966x_port *port, + struct tc_taprio_qopt_offload *qopt); +int lan966x_taprio_del(struct lan966x_port *port); +int lan966x_taprio_speed_set(struct lan966x_port *port, int speed); + +int lan966x_tbf_add(struct lan966x_port *port, + struct tc_tbf_qopt_offload *qopt); +int lan966x_tbf_del(struct lan966x_port *port, + struct tc_tbf_qopt_offload *qopt); + +int lan966x_cbs_add(struct lan966x_port *port, + struct tc_cbs_qopt_offload *qopt); +int lan966x_cbs_del(struct lan966x_port *port, + struct tc_cbs_qopt_offload *qopt); + +int lan966x_ets_add(struct lan966x_port *port, + struct tc_ets_qopt_offload *qopt); +int lan966x_ets_del(struct lan966x_port *port, + struct tc_ets_qopt_offload *qopt); + +int lan966x_tc_matchall(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress); + +int lan966x_police_port_add(struct lan966x_port *port, + struct flow_action *action, + struct flow_action_entry *act, + unsigned long police_id, + bool ingress, + struct netlink_ext_ack *extack); +int lan966x_police_port_del(struct lan966x_port *port, + unsigned long police_id, + struct netlink_ext_ack *extack); +void lan966x_police_port_stats(struct lan966x_port *port, + struct flow_stats *stats); + +int lan966x_mirror_port_add(struct lan966x_port *port, + struct flow_action_entry *action, + unsigned long mirror_id, + bool ingress, + struct netlink_ext_ack *extack); +int lan966x_mirror_port_del(struct lan966x_port *port, + bool ingress, + struct netlink_ext_ack *extack); +void lan966x_mirror_port_stats(struct lan966x_port *port, + struct flow_stats *stats, + bool ingress); + +static inline void __iomem *lan_addr(void __iomem *base[], + int id, int tinst, int tcnt, + int gbase, int ginst, + int gcnt, int gwidth, + int raddr, int rinst, + int rcnt, int rwidth) +{ + WARN_ON((tinst) >= tcnt); + WARN_ON((ginst) >= gcnt); + WARN_ON((rinst) >= rcnt); + return base[id + (tinst)] + + gbase + ((ginst) * gwidth) + + raddr + ((rinst) * rwidth); +} + +static inline u32 lan_rd(struct lan966x *lan966x, int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + return readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +static inline void lan_wr(u32 val, struct lan966x *lan966x, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + writel(val, lan_addr(lan966x->regs, id, tinst, tcnt, + gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth)); +} + +static inline void lan_rmw(u32 val, u32 mask, struct lan966x *lan966x, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + u32 nval; + + nval = readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); + nval = (nval & ~mask) | (val & mask); + writel(nval, lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +#endif /* __LAN966X_MAIN_H__ */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c new file mode 100644 index 000000000..2af55268b --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/switchdev.h> + +#include "lan966x_main.h" + +struct lan966x_pgid_entry { + struct list_head list; + int index; + refcount_t refcount; + u16 ports; +}; + +struct lan966x_mdb_entry { + struct list_head list; + unsigned char mac[ETH_ALEN]; + u16 vid; + u16 ports; + struct lan966x_pgid_entry *pgid; + u8 cpu_copy; +}; + +void lan966x_mdb_init(struct lan966x *lan966x) +{ + INIT_LIST_HEAD(&lan966x->mdb_entries); + INIT_LIST_HEAD(&lan966x->pgid_entries); +} + +static void lan966x_mdb_purge_mdb_entries(struct lan966x *lan966x) +{ + struct lan966x_mdb_entry *mdb_entry, *tmp; + + list_for_each_entry_safe(mdb_entry, tmp, &lan966x->mdb_entries, list) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + } +} + +static void lan966x_mdb_purge_pgid_entries(struct lan966x *lan966x) +{ + struct lan966x_pgid_entry *pgid_entry, *tmp; + + list_for_each_entry_safe(pgid_entry, tmp, &lan966x->pgid_entries, list) { + list_del(&pgid_entry->list); + kfree(pgid_entry); + } +} + +void lan966x_mdb_deinit(struct lan966x *lan966x) +{ + lan966x_mdb_purge_mdb_entries(lan966x); + lan966x_mdb_purge_pgid_entries(lan966x); +} + +static struct lan966x_mdb_entry * +lan966x_mdb_entry_get(struct lan966x *lan966x, + const unsigned char *mac, + u16 vid) +{ + struct lan966x_mdb_entry *mdb_entry; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + if (ether_addr_equal(mdb_entry->mac, mac) && + mdb_entry->vid == vid) + return mdb_entry; + } + + return NULL; +} + +static struct lan966x_mdb_entry * +lan966x_mdb_entry_add(struct lan966x *lan966x, + const struct switchdev_obj_port_mdb *mdb) +{ + struct lan966x_mdb_entry *mdb_entry; + + mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL); + if (!mdb_entry) + return ERR_PTR(-ENOMEM); + + ether_addr_copy(mdb_entry->mac, mdb->addr); + mdb_entry->vid = mdb->vid; + + list_add_tail(&mdb_entry->list, &lan966x->mdb_entries); + + return mdb_entry; +} + +static void lan966x_mdb_encode_mac(unsigned char *mac, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + ether_addr_copy(mac, mdb_entry->mac); + + if (type == ENTRYTYPE_MACV4) { + mac[0] = 0; + mac[1] = mdb_entry->ports >> 8; + mac[2] = mdb_entry->ports & 0xff; + } else if (type == ENTRYTYPE_MACV6) { + mac[0] = mdb_entry->ports >> 8; + mac[1] = mdb_entry->ports & 0xff; + } +} + +static int lan966x_mdb_ip_add(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + bool cpu_copy = false; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) { + mdb_entry = lan966x_mdb_entry_add(lan966x, mdb); + if (IS_ERR(mdb_entry)) + return PTR_ERR(mdb_entry); + } else { + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + } + + if (cpu_port) + mdb_entry->cpu_copy++; + else + mdb_entry->ports |= BIT(port->chip_port); + + /* Copy the frame to CPU only if the CPU is in the VLAN */ + if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) && + mdb_entry->cpu_copy) + cpu_copy = true; + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + return lan966x_mac_ip_learn(lan966x, cpu_copy, + mac, mdb_entry->vid, type); +} + +static int lan966x_mdb_ip_del(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + u16 ports; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) + return -ENOENT; + + ports = mdb_entry->ports; + if (cpu_port) { + /* If there are still other references to the CPU port then + * there is no point to delete and add again the same entry + */ + mdb_entry->cpu_copy--; + if (mdb_entry->cpu_copy) + return 0; + } else { + ports &= ~BIT(port->chip_port); + } + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + + mdb_entry->ports = ports; + + if (!mdb_entry->ports && !mdb_entry->cpu_copy) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return 0; + } + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + return lan966x_mac_ip_learn(lan966x, mdb_entry->cpu_copy, + mac, mdb_entry->vid, type); +} + +static struct lan966x_pgid_entry * +lan966x_pgid_entry_add(struct lan966x *lan966x, int index, u16 ports) +{ + struct lan966x_pgid_entry *pgid_entry; + + pgid_entry = kzalloc(sizeof(*pgid_entry), GFP_KERNEL); + if (!pgid_entry) + return ERR_PTR(-ENOMEM); + + pgid_entry->ports = ports; + pgid_entry->index = index; + refcount_set(&pgid_entry->refcount, 1); + + list_add_tail(&pgid_entry->list, &lan966x->pgid_entries); + + return pgid_entry; +} + +static struct lan966x_pgid_entry * +lan966x_pgid_entry_get(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry) +{ + struct lan966x_pgid_entry *pgid_entry; + int index; + + /* Try to find an existing pgid that uses the same ports as the + * mdb_entry + */ + list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) { + if (pgid_entry->ports == mdb_entry->ports) { + refcount_inc(&pgid_entry->refcount); + return pgid_entry; + } + } + + /* Try to find an empty pgid entry and allocate one in case it finds it, + * otherwise it means that there are no more resources + */ + for (index = PGID_GP_START; index < PGID_GP_END; index++) { + bool used = false; + + list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) { + if (pgid_entry->index == index) { + used = true; + break; + } + } + + if (!used) + return lan966x_pgid_entry_add(lan966x, index, + mdb_entry->ports); + } + + return ERR_PTR(-ENOSPC); +} + +static void lan966x_pgid_entry_del(struct lan966x *lan966x, + struct lan966x_pgid_entry *pgid_entry) +{ + if (!refcount_dec_and_test(&pgid_entry->refcount)) + return; + + list_del(&pgid_entry->list); + kfree(pgid_entry); +} + +static int lan966x_mdb_l2_add(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_pgid_entry *pgid_entry; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) { + mdb_entry = lan966x_mdb_entry_add(lan966x, mdb); + if (IS_ERR(mdb_entry)) + return PTR_ERR(mdb_entry); + } else { + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + } + + if (cpu_port) { + mdb_entry->ports |= BIT(CPU_PORT); + mdb_entry->cpu_copy++; + } else { + mdb_entry->ports |= BIT(port->chip_port); + } + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return PTR_ERR(pgid_entry); + } + mdb_entry->pgid = pgid_entry; + + /* Copy the frame to CPU only if the CPU is in the VLAN */ + if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) && + mdb_entry->cpu_copy) + mdb_entry->ports &= BIT(CPU_PORT); + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +static int lan966x_mdb_l2_del(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_pgid_entry *pgid_entry; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + u16 ports; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) + return -ENOENT; + + ports = mdb_entry->ports; + if (cpu_port) { + /* If there are still other references to the CPU port then + * there is no point to delete and add again the same entry + */ + mdb_entry->cpu_copy--; + if (mdb_entry->cpu_copy) + return 0; + + ports &= ~BIT(CPU_PORT); + } else { + ports &= ~BIT(port->chip_port); + } + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + + mdb_entry->ports = ports; + + if (!mdb_entry->ports) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return 0; + } + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return PTR_ERR(pgid_entry); + } + mdb_entry->pgid = pgid_entry; + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +static enum macaccess_entry_type +lan966x_mdb_classify(const unsigned char *mac) +{ + if (mac[0] == 0x01 && mac[1] == 0x00 && mac[2] == 0x5e) + return ENTRYTYPE_MACV4; + if (mac[0] == 0x33 && mac[1] == 0x33) + return ENTRYTYPE_MACV6; + return ENTRYTYPE_LOCKED; +} + +int lan966x_handle_port_mdb_add(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + enum macaccess_entry_type type; + + /* Split the way the entries are added for ipv4/ipv6 and for l2. The + * reason is that for ipv4/ipv6 it doesn't require to use any pgid + * entry, while for l2 is required to use pgid entries + */ + type = lan966x_mdb_classify(mdb->addr); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + return lan966x_mdb_ip_add(port, mdb, type); + + return lan966x_mdb_l2_add(port, mdb, type); +} + +int lan966x_handle_port_mdb_del(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + enum macaccess_entry_type type; + + /* Split the way the entries are removed for ipv4/ipv6 and for l2. The + * reason is that for ipv4/ipv6 it doesn't require to use any pgid + * entry, while for l2 is required to use pgid entries + */ + type = lan966x_mdb_classify(mdb->addr); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + return lan966x_mdb_ip_del(port, mdb, type); + + return lan966x_mdb_l2_del(port, mdb, type); +} + +static void lan966x_mdb_ip_cpu_copy(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + unsigned char mac[ETH_ALEN]; + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + lan966x_mac_ip_learn(lan966x, true, mac, mdb_entry->vid, type); +} + +static void lan966x_mdb_l2_cpu_copy(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + struct lan966x_pgid_entry *pgid_entry; + unsigned char mac[ETH_ALEN]; + + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + + mdb_entry->ports |= BIT(CPU_PORT); + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) + return; + + mdb_entry->pgid = pgid_entry; + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_mdb_entry *mdb_entry; + enum macaccess_entry_type type; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + if (mdb_entry->vid != vid || !mdb_entry->cpu_copy) + continue; + + type = lan966x_mdb_classify(mdb_entry->mac); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + lan966x_mdb_ip_cpu_copy(lan966x, mdb_entry, type); + else + lan966x_mdb_l2_cpu_copy(lan966x, mdb_entry, type); + } +} + +static void lan966x_mdb_ip_cpu_remove(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + unsigned char mac[ETH_ALEN]; + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + lan966x_mac_ip_learn(lan966x, false, mac, mdb_entry->vid, type); +} + +static void lan966x_mdb_l2_cpu_remove(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + struct lan966x_pgid_entry *pgid_entry; + unsigned char mac[ETH_ALEN]; + + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + + mdb_entry->ports &= ~BIT(CPU_PORT); + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) + return; + + mdb_entry->pgid = pgid_entry; + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_mdb_entry *mdb_entry; + enum macaccess_entry_type type; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + if (mdb_entry->vid != vid || !mdb_entry->cpu_copy) + continue; + + type = lan966x_mdb_classify(mdb_entry->mac); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + lan966x_mdb_ip_cpu_remove(lan966x, mdb_entry, type); + else + lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type); + } +} + +void lan966x_mdb_clear_entries(struct lan966x *lan966x) +{ + struct lan966x_mdb_entry *mdb_entry; + enum macaccess_entry_type type; + unsigned char mac[ETH_ALEN]; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + type = lan966x_mdb_classify(mdb_entry->mac); + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + /* Remove just the MAC entry, still keep the PGID in case of L2 + * entries because this can be restored at later point + */ + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + } +} + +void lan966x_mdb_restore_entries(struct lan966x *lan966x) +{ + struct lan966x_mdb_entry *mdb_entry; + enum macaccess_entry_type type; + unsigned char mac[ETH_ALEN]; + bool cpu_copy = false; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + type = lan966x_mdb_classify(mdb_entry->mac); + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) { + /* Copy the frame to CPU only if the CPU is in the VLAN */ + if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, + mdb_entry->vid) && + mdb_entry->cpu_copy) + cpu_copy = true; + + lan966x_mac_ip_learn(lan966x, cpu_copy, mac, + mdb_entry->vid, type); + } else { + lan966x_mac_learn(lan966x, mdb_entry->pgid->index, + mdb_entry->mac, + mdb_entry->vid, type); + } + } +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c new file mode 100644 index 000000000..7e1ba3f40 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +int lan966x_mirror_port_add(struct lan966x_port *port, + struct flow_action_entry *action, + unsigned long mirror_id, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + struct lan966x_port *monitor_port; + + if (!lan966x_netdevice_check(action->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not an lan966x port"); + return -EOPNOTSUPP; + } + + monitor_port = netdev_priv(action->dev); + + if (lan966x->mirror_mask[ingress] & BIT(port->chip_port)) { + NL_SET_ERR_MSG_MOD(extack, + "Mirror already exists"); + return -EEXIST; + } + + if (lan966x->mirror_monitor && + lan966x->mirror_monitor != monitor_port) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change mirror port while in use"); + return -EBUSY; + } + + if (port == monitor_port) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot mirror the monitor port"); + return -EINVAL; + } + + lan966x->mirror_mask[ingress] |= BIT(port->chip_port); + + lan966x->mirror_monitor = monitor_port; + lan_wr(BIT(monitor_port->chip_port), lan966x, ANA_MIRRORPORTS); + + if (ingress) { + lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(1), + ANA_PORT_CFG_SRC_MIRROR_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + } else { + lan_wr(lan966x->mirror_mask[0], lan966x, + ANA_EMIRRORPORTS); + } + + lan966x->mirror_count++; + + if (ingress) + port->tc.ingress_mirror_id = mirror_id; + else + port->tc.egress_mirror_id = mirror_id; + + return 0; +} + +int lan966x_mirror_port_del(struct lan966x_port *port, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + + if (!(lan966x->mirror_mask[ingress] & BIT(port->chip_port))) { + NL_SET_ERR_MSG_MOD(extack, + "There is no mirroring for this port"); + return -ENOENT; + } + + lan966x->mirror_mask[ingress] &= ~BIT(port->chip_port); + + if (ingress) { + lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(0), + ANA_PORT_CFG_SRC_MIRROR_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + } else { + lan_wr(lan966x->mirror_mask[0], lan966x, + ANA_EMIRRORPORTS); + } + + lan966x->mirror_count--; + + if (lan966x->mirror_count == 0) { + lan966x->mirror_monitor = NULL; + lan_wr(0, lan966x, ANA_MIRRORPORTS); + } + + if (ingress) + port->tc.ingress_mirror_id = 0; + else + port->tc.egress_mirror_id = 0; + + return 0; +} + +void lan966x_mirror_port_stats(struct lan966x_port *port, + struct flow_stats *stats, + bool ingress) +{ + struct rtnl_link_stats64 new_stats; + struct flow_stats *old_stats; + + old_stats = &port->tc.mirror_stat; + lan966x_stats_get(port->dev, &new_stats); + + if (ingress) { + flow_stats_update(stats, + new_stats.rx_bytes - old_stats->bytes, + new_stats.rx_packets - old_stats->pkts, + new_stats.rx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.rx_bytes; + old_stats->pkts = new_stats.rx_packets; + old_stats->drops = new_stats.rx_dropped; + old_stats->lastused = jiffies; + } else { + flow_stats_update(stats, + new_stats.tx_bytes - old_stats->bytes, + new_stats.tx_packets - old_stats->pkts, + new_stats.tx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.tx_bytes; + old_stats->pkts = new_stats.tx_packets; + old_stats->drops = new_stats.tx_dropped; + old_stats->lastused = jiffies; + } +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c new file mode 100644 index 000000000..7fa76e74f --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc) +{ + u8 i; + + if (num_tc != NUM_PRIO_QUEUES) { + netdev_err(port->dev, "Only %d traffic classes supported\n", + NUM_PRIO_QUEUES); + return -EINVAL; + } + + netdev_set_num_tc(port->dev, num_tc); + + for (i = 0; i < num_tc; ++i) + netdev_set_tc_queue(port->dev, i, 1, i); + + return 0; +} + +int lan966x_mqprio_del(struct lan966x_port *port) +{ + netdev_reset_tc(port->dev); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c new file mode 100644 index 000000000..e4ac59480 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/module.h> +#include <linux/phylink.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/phy/phy.h> +#include <linux/sfp.h> + +#include "lan966x_main.h" + +static struct phylink_pcs *lan966x_phylink_mac_select(struct phylink_config *config, + phy_interface_t interface) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + + return &port->phylink_pcs; +} + +static void lan966x_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static int lan966x_phylink_mac_prepare(struct phylink_config *config, + unsigned int mode, + phy_interface_t iface) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + phy_interface_t serdes_mode = iface; + int err; + + if (port->serdes) { + err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, + serdes_mode); + if (err) { + netdev_err(to_net_dev(config->dev), + "Could not set mode of SerDes\n"); + return err; + } + } + + return 0; +} + +static void lan966x_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + struct lan966x_port_config *port_config = &port->config; + + port_config->duplex = duplex; + port_config->speed = speed; + port_config->pause = 0; + port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0; + port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0; + + if (phy_interface_mode_is_rgmii(interface)) + phy_set_speed(port->serdes, speed); + + lan966x_port_config_up(port); +} + +static void lan966x_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + struct lan966x *lan966x = port->lan966x; + + lan966x_port_config_down(port); + + /* Take PCS out of reset */ + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0), + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); +} + +static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct lan966x_port, phylink_pcs); +} + +static void lan966x_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct lan966x_port *port = lan966x_pcs_to_port(pcs); + + lan966x_port_status_get(port, state); +} + +static int lan966x_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct lan966x_port *port = lan966x_pcs_to_port(pcs); + struct lan966x_port_config config; + int ret; + + config = port->config; + config.portmode = interface; + config.inband = phylink_autoneg_inband(mode); + config.autoneg = phylink_test(advertising, Autoneg); + config.advertising = advertising; + + ret = lan966x_port_pcs_set(port, &config); + if (ret) + netdev_err(port->dev, "port PCS config failed: %d\n", ret); + + return ret; +} + +static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs) +{ + /* Currently not used */ +} + +const struct phylink_mac_ops lan966x_phylink_mac_ops = { + .validate = phylink_generic_validate, + .mac_select_pcs = lan966x_phylink_mac_select, + .mac_config = lan966x_phylink_mac_config, + .mac_prepare = lan966x_phylink_mac_prepare, + .mac_link_down = lan966x_phylink_mac_link_down, + .mac_link_up = lan966x_phylink_mac_link_up, +}; + +const struct phylink_pcs_ops lan966x_phylink_pcs_ops = { + .pcs_get_state = lan966x_pcs_get_state, + .pcs_config = lan966x_pcs_config, + .pcs_an_restart = lan966x_pcs_aneg_restart, +}; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c new file mode 100644 index 000000000..7d66fe75c --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +/* 0-8 : 9 port policers */ +#define POL_IDX_PORT 0 + +/* Policer order: Serial (QoS -> Port -> VCAP) */ +#define POL_ORDER 0x1d3 + +struct lan966x_tc_policer { + /* kilobit per second */ + u32 rate; + /* bytes */ + u32 burst; +}; + +static int lan966x_police_add(struct lan966x_port *port, + struct lan966x_tc_policer *pol, + u16 pol_idx) +{ + struct lan966x *lan966x = port->lan966x; + + /* Rate unit is 33 1/3 kpps */ + pol->rate = DIV_ROUND_UP(pol->rate * 3, 100); + /* Avoid zero burst size */ + pol->burst = pol->burst ?: 1; + /* Unit is 4kB */ + pol->burst = DIV_ROUND_UP(pol->burst, 4096); + + if (pol->rate > GENMASK(15, 0) || + pol->burst > GENMASK(6, 0)) + return -EINVAL; + + lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) | + ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) | + ANA_POL_MODE_IPG_SIZE_SET(20) | + ANA_POL_MODE_FRM_MODE_SET(1) | + ANA_POL_MODE_OVERSHOOT_ENA_SET(1), + lan966x, ANA_POL_MODE(pol_idx)); + + lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0), + lan966x, ANA_POL_PIR_STATE(pol_idx)); + + lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(pol->rate) | + ANA_POL_PIR_CFG_PIR_BURST_SET(pol->burst), + lan966x, ANA_POL_PIR_CFG(pol_idx)); + + return 0; +} + +static int lan966x_police_del(struct lan966x_port *port, + u16 pol_idx) +{ + struct lan966x *lan966x = port->lan966x; + + lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) | + ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) | + ANA_POL_MODE_IPG_SIZE_SET(20) | + ANA_POL_MODE_FRM_MODE_SET(2) | + ANA_POL_MODE_OVERSHOOT_ENA_SET(1), + lan966x, ANA_POL_MODE(pol_idx)); + + lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0), + lan966x, ANA_POL_PIR_STATE(pol_idx)); + + lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(GENMASK(14, 0)) | + ANA_POL_PIR_CFG_PIR_BURST_SET(0), + lan966x, ANA_POL_PIR_CFG(pol_idx)); + + return 0; +} + +static int lan966x_police_validate(struct lan966x_port *port, + const struct flow_action *action, + const struct flow_action_entry *act, + unsigned long police_id, + bool ingress, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + + if (!ingress) { + NL_SET_ERR_MSG_MOD(extack, + "Policer is not supported on egress"); + return -EOPNOTSUPP; + } + + if (port->tc.ingress_shared_block) { + NL_SET_ERR_MSG_MOD(extack, + "Policer is not supported on shared ingress blocks"); + return -EOPNOTSUPP; + } + + if (port->tc.police_id && port->tc.police_id != police_id) { + NL_SET_ERR_MSG_MOD(extack, + "Only one policer per port is supported"); + return -EEXIST; + } + + return 0; +} + +int lan966x_police_port_add(struct lan966x_port *port, + struct flow_action *action, + struct flow_action_entry *act, + unsigned long police_id, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + struct rtnl_link_stats64 new_stats; + struct lan966x_tc_policer pol; + struct flow_stats *old_stats; + int err; + + err = lan966x_police_validate(port, action, act, police_id, ingress, + extack); + if (err) + return err; + + memset(&pol, 0, sizeof(pol)); + + pol.rate = div_u64(act->police.rate_bytes_ps, 1000) * 8; + pol.burst = act->police.burst; + + err = lan966x_police_add(port, &pol, POL_IDX_PORT + port->chip_port); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to add policer to port"); + return err; + } + + lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(1) | + ANA_POL_CFG_POL_ORDER_SET(POL_ORDER), + ANA_POL_CFG_PORT_POL_ENA | + ANA_POL_CFG_POL_ORDER, + lan966x, ANA_POL_CFG(port->chip_port)); + + port->tc.police_id = police_id; + + /* Setup initial stats */ + old_stats = &port->tc.police_stat; + lan966x_stats_get(port->dev, &new_stats); + old_stats->bytes = new_stats.rx_bytes; + old_stats->pkts = new_stats.rx_packets; + old_stats->drops = new_stats.rx_dropped; + old_stats->lastused = jiffies; + + return 0; +} + +int lan966x_police_port_del(struct lan966x_port *port, + unsigned long police_id, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + int err; + + if (port->tc.police_id != police_id) { + NL_SET_ERR_MSG_MOD(extack, + "Invalid policer id"); + return -EINVAL; + } + + err = lan966x_police_del(port, POL_IDX_PORT + port->chip_port); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to add policer to port"); + return err; + } + + lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(0) | + ANA_POL_CFG_POL_ORDER_SET(POL_ORDER), + ANA_POL_CFG_PORT_POL_ENA | + ANA_POL_CFG_POL_ORDER, + lan966x, ANA_POL_CFG(port->chip_port)); + + port->tc.police_id = 0; + + return 0; +} + +void lan966x_police_port_stats(struct lan966x_port *port, + struct flow_stats *stats) +{ + struct rtnl_link_stats64 new_stats; + struct flow_stats *old_stats; + + old_stats = &port->tc.police_stat; + lan966x_stats_get(port->dev, &new_stats); + + flow_stats_update(stats, + new_stats.rx_bytes - old_stats->bytes, + new_stats.rx_packets - old_stats->pkts, + new_stats.rx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.rx_bytes; + old_stats->pkts = new_stats.rx_packets; + old_stats->drops = new_stats.rx_dropped; + old_stats->lastused = jiffies; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c new file mode 100644 index 000000000..0050fcb98 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/netdevice.h> +#include <linux/phy/phy.h> + +#include "lan966x_main.h" + +/* Watermark encode */ +#define MULTIPLIER_BIT BIT(8) +static u32 lan966x_wm_enc(u32 value) +{ + value /= LAN966X_BUFFER_CELL_SZ; + + if (value >= MULTIPLIER_BIT) { + value /= 16; + if (value >= MULTIPLIER_BIT) + value = (MULTIPLIER_BIT - 1); + + value |= MULTIPLIER_BIT; + } + + return value; +} + +static void lan966x_port_link_down(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + u32 val, delay = 0; + + /* 0.5: Disable any AFI */ + lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(1) | + AFI_PORT_CFG_FRM_OUT_MAX_SET(0), + AFI_PORT_CFG_FC_SKIP_TTI_INJ | + AFI_PORT_CFG_FRM_OUT_MAX, + lan966x, AFI_PORT_CFG(port->chip_port)); + + /* wait for reg afi_port_frm_out to become 0 for the port */ + while (true) { + val = lan_rd(lan966x, AFI_PORT_FRM_OUT(port->chip_port)); + if (!AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(val)) + break; + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + delay++; + if (delay == 2000) { + pr_err("AFI timeout chip port %u", port->chip_port); + break; + } + } + + delay = 0; + + /* 1: Reset the PCS Rx clock domain */ + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(1), + DEV_CLOCK_CFG_PCS_RX_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + /* 2: Disable MAC frame reception */ + lan_rmw(DEV_MAC_ENA_CFG_RX_ENA_SET(0), + DEV_MAC_ENA_CFG_RX_ENA, + lan966x, DEV_MAC_ENA_CFG(port->chip_port)); + + /* 3: Disable traffic being sent to or from switch port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* 4: Disable dequeuing from the egress queues */ + lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(1), + QSYS_PORT_MODE_DEQUEUE_DIS, + lan966x, QSYS_PORT_MODE(port->chip_port)); + + /* 5: Disable Flowcontrol */ + lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(0), + SYS_PAUSE_CFG_PAUSE_ENA, + lan966x, SYS_PAUSE_CFG(port->chip_port)); + + /* 5.1: Disable PFC */ + lan_rmw(QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0), + QSYS_SW_PORT_MODE_TX_PFC_ENA, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* 6: Wait a worst case time 8ms (jumbo/10Mbit) */ + usleep_range(8 * USEC_PER_MSEC, 9 * USEC_PER_MSEC); + + /* 7: Disable HDX backpressure */ + lan_rmw(SYS_FRONT_PORT_MODE_HDX_MODE_SET(0), + SYS_FRONT_PORT_MODE_HDX_MODE, + lan966x, SYS_FRONT_PORT_MODE(port->chip_port)); + + /* 8: Flush the queues accociated with the port */ + lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3), + QSYS_SW_PORT_MODE_AGING_MODE, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* 9: Enable dequeuing from the egress queues */ + lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(0), + QSYS_PORT_MODE_DEQUEUE_DIS, + lan966x, QSYS_PORT_MODE(port->chip_port)); + + /* 10: Wait until flushing is complete */ + while (true) { + val = lan_rd(lan966x, QSYS_SW_STATUS(port->chip_port)); + if (!QSYS_SW_STATUS_EQ_AVAIL_GET(val)) + break; + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + delay++; + if (delay == 2000) { + pr_err("Flush timeout chip port %u", port->chip_port); + break; + } + } + + /* 11: Reset the Port and MAC clock domains */ + lan_rmw(DEV_MAC_ENA_CFG_TX_ENA_SET(0), + DEV_MAC_ENA_CFG_TX_ENA, + lan966x, DEV_MAC_ENA_CFG(port->chip_port)); + + lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(1), + DEV_CLOCK_CFG_PORT_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + + lan_rmw(DEV_CLOCK_CFG_MAC_TX_RST_SET(1) | + DEV_CLOCK_CFG_MAC_RX_RST_SET(1) | + DEV_CLOCK_CFG_PORT_RST_SET(1), + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_RX_RST | + DEV_CLOCK_CFG_PORT_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + /* 12: Clear flushing */ + lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(2), + QSYS_SW_PORT_MODE_AGING_MODE, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* The port is disabled and flushed, now set up the port in the + * new operating mode + */ +} + +static void lan966x_port_link_up(struct lan966x_port *port) +{ + struct lan966x_port_config *config = &port->config; + struct lan966x *lan966x = port->lan966x; + int speed = 0, mode = 0; + int atop_wm = 0; + + switch (config->speed) { + case SPEED_10: + speed = LAN966X_SPEED_10; + break; + case SPEED_100: + speed = LAN966X_SPEED_100; + break; + case SPEED_1000: + speed = LAN966X_SPEED_1000; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + break; + case SPEED_2500: + speed = LAN966X_SPEED_2500; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + break; + } + + lan966x_taprio_speed_set(port, config->speed); + + /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the + * port speed for QSGMII ports. + */ + if (phy_interface_num_ports(config->portmode) == 4) + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + + lan_wr(config->duplex | mode, + lan966x, DEV_MAC_MODE_CFG(port->chip_port)); + + lan_rmw(DEV_MAC_IFG_CFG_TX_IFG_SET(config->duplex ? 6 : 5) | + DEV_MAC_IFG_CFG_RX_IFG1_SET(config->speed == SPEED_10 ? 2 : 1) | + DEV_MAC_IFG_CFG_RX_IFG2_SET(2), + DEV_MAC_IFG_CFG_TX_IFG | + DEV_MAC_IFG_CFG_RX_IFG1 | + DEV_MAC_IFG_CFG_RX_IFG2, + lan966x, DEV_MAC_IFG_CFG(port->chip_port)); + + lan_rmw(DEV_MAC_HDX_CFG_SEED_SET(4) | + DEV_MAC_HDX_CFG_SEED_LOAD_SET(1), + DEV_MAC_HDX_CFG_SEED | + DEV_MAC_HDX_CFG_SEED_LOAD, + lan966x, DEV_MAC_HDX_CFG(port->chip_port)); + + if (config->portmode == PHY_INTERFACE_MODE_GMII) { + if (config->speed == SPEED_1000) + lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(1), + CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, + lan966x, + CHIP_TOP_CUPHY_PORT_CFG(port->chip_port)); + else + lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(0), + CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, + lan966x, + CHIP_TOP_CUPHY_PORT_CFG(port->chip_port)); + } + + /* No PFC */ + lan_wr(ANA_PFC_CFG_FC_LINK_SPEED_SET(speed), + lan966x, ANA_PFC_CFG(port->chip_port)); + + lan_rmw(DEV_PCS1G_CFG_PCS_ENA_SET(1), + DEV_PCS1G_CFG_PCS_ENA, + lan966x, DEV_PCS1G_CFG(port->chip_port)); + + lan_rmw(DEV_PCS1G_SD_CFG_SD_ENA_SET(0), + DEV_PCS1G_SD_CFG_SD_ENA, + lan966x, DEV_PCS1G_SD_CFG(port->chip_port)); + + /* Set Pause WM hysteresis, start/stop are in 1518 byte units */ + lan_wr(SYS_PAUSE_CFG_PAUSE_ENA_SET(1) | + SYS_PAUSE_CFG_PAUSE_STOP_SET(lan966x_wm_enc(4 * 1518)) | + SYS_PAUSE_CFG_PAUSE_START_SET(lan966x_wm_enc(6 * 1518)), + lan966x, SYS_PAUSE_CFG(port->chip_port)); + + /* Set SMAC of Pause frame (00:00:00:00:00:00) */ + lan_wr(0, lan966x, DEV_FC_MAC_LOW_CFG(port->chip_port)); + lan_wr(0, lan966x, DEV_FC_MAC_HIGH_CFG(port->chip_port)); + + /* Flow control */ + lan_rmw(SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(speed) | + SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(7) | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(1) | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(0xffff) | + SYS_MAC_FC_CFG_RX_FC_ENA_SET(config->pause & MLO_PAUSE_RX ? 1 : 0) | + SYS_MAC_FC_CFG_TX_FC_ENA_SET(config->pause & MLO_PAUSE_TX ? 1 : 0), + SYS_MAC_FC_CFG_FC_LINK_SPEED | + SYS_MAC_FC_CFG_FC_LATENCY_CFG | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG | + SYS_MAC_FC_CFG_RX_FC_ENA | + SYS_MAC_FC_CFG_TX_FC_ENA, + lan966x, SYS_MAC_FC_CFG(port->chip_port)); + + /* Tail dropping watermark */ + atop_wm = lan966x->shared_queue_sz; + + /* The total memory size is diveded by number of front ports plus CPU + * port + */ + lan_wr(lan966x_wm_enc(atop_wm / lan966x->num_phys_ports + 1), lan966x, + SYS_ATOP(port->chip_port)); + lan_wr(lan966x_wm_enc(atop_wm), lan966x, SYS_ATOP_TOT_CFG); + + /* This needs to be at the end */ + /* Enable MAC module */ + lan_wr(DEV_MAC_ENA_CFG_RX_ENA_SET(1) | + DEV_MAC_ENA_CFG_TX_ENA_SET(1), + lan966x, DEV_MAC_ENA_CFG(port->chip_port)); + + /* Take out the clock from reset */ + lan_wr(DEV_CLOCK_CFG_LINK_SPEED_SET(speed), + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + /* Core: Enable port for frame transfer */ + lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) | + QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) | + QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1), + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(0) | + AFI_PORT_CFG_FRM_OUT_MAX_SET(16), + AFI_PORT_CFG_FC_SKIP_TTI_INJ | + AFI_PORT_CFG_FRM_OUT_MAX, + lan966x, AFI_PORT_CFG(port->chip_port)); +} + +void lan966x_port_config_down(struct lan966x_port *port) +{ + lan966x_port_link_down(port); +} + +void lan966x_port_config_up(struct lan966x_port *port) +{ + lan966x_port_link_up(port); +} + +void lan966x_port_status_get(struct lan966x_port *port, + struct phylink_link_state *state) +{ + struct lan966x *lan966x = port->lan966x; + bool link_down; + u16 bmsr = 0; + u16 lp_adv; + u32 val; + + val = lan_rd(lan966x, DEV_PCS1G_STICKY(port->chip_port)); + link_down = DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(val); + if (link_down) + lan_wr(val, lan966x, DEV_PCS1G_STICKY(port->chip_port)); + + /* Get both current Link and Sync status */ + val = lan_rd(lan966x, DEV_PCS1G_LINK_STATUS(port->chip_port)); + state->link = DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(val) && + DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(val); + state->link &= !link_down; + + /* Get PCS ANEG status register */ + val = lan_rd(lan966x, DEV_PCS1G_ANEG_STATUS(port->chip_port)); + /* Aneg complete provides more information */ + if (DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(val)) { + state->an_complete = true; + + bmsr |= state->link ? BMSR_LSTATUS : 0; + bmsr |= BMSR_ANEGCOMPLETE; + + lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val); + phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv); + } else { + if (!state->link) + return; + + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + state->speed = SPEED_1000; + else if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + state->speed = SPEED_2500; + + state->duplex = DUPLEX_FULL; + } +} + +int lan966x_port_pcs_set(struct lan966x_port *port, + struct lan966x_port_config *config) +{ + struct lan966x *lan966x = port->lan966x; + bool inband_aneg = false; + bool outband; + bool full_preamble = false; + + if (config->portmode == PHY_INTERFACE_MODE_QUSGMII) + full_preamble = true; + + if (config->inband) { + if (config->portmode == PHY_INTERFACE_MODE_SGMII || + phy_interface_num_ports(config->portmode) == 4) + inband_aneg = true; /* Cisco-SGMII in-band-aneg */ + else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX && + config->autoneg) + inband_aneg = true; /* Clause-37 in-band-aneg */ + + outband = false; + } else { + outband = true; + } + + /* Disable or enable inband. + * For QUSGMII, we rely on the preamble to transmit data such as + * timestamps, therefore force full preamble transmission, and prevent + * premable shortening + */ + lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband) | + DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(full_preamble), + DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA | + DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, + lan966x, DEV_PCS1G_MODE_CFG(port->chip_port)); + + /* Enable PCS */ + lan_wr(DEV_PCS1G_CFG_PCS_ENA_SET(1), + lan966x, DEV_PCS1G_CFG(port->chip_port)); + + if (inband_aneg) { + int adv = phylink_mii_c22_pcs_encode_advertisement(config->portmode, + config->advertising); + if (adv >= 0) + /* Enable in-band aneg */ + lan_wr(DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(adv) | + DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) | + DEV_PCS1G_ANEG_CFG_ENA_SET(1) | + DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(1), + lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port)); + } else { + lan_wr(0, lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port)); + } + + /* Take PCS out of reset */ + lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000) | + DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0), + DEV_CLOCK_CFG_LINK_SPEED | + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + port->config = *config; + + return 0; +} + +void lan966x_port_init(struct lan966x_port *port) +{ + struct lan966x_port_config *config = &port->config; + struct lan966x *lan966x = port->lan966x; + + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(0), + ANA_PORT_CFG_LEARN_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + + lan966x_port_config_down(port); + + if (lan966x->fdma) + lan966x_fdma_netdev_init(lan966x, port->dev); + + if (phy_interface_num_ports(config->portmode) != 4) + return; + + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0) | + DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000), + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST | + DEV_CLOCK_CFG_LINK_SPEED, + lan966x, DEV_CLOCK_CFG(port->chip_port)); +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c new file mode 100644 index 000000000..0a0e233f3 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c @@ -0,0 +1,900 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/ptp_classify.h> + +#include "lan966x_main.h" + +#define LAN966X_MAX_PTP_ID 512 + +/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference + * The value is calculated as following: (1/1000000)/((2^-59)/6.037735849) + */ +#define LAN966X_1PPM_FORMAT 3480517749723LL + +/* Represents 1ppb adjustment in 2^29 format with 6.037735849ns as reference + * The value is calculated as following: (1/1000000000)/((2^59)/6.037735849) + */ +#define LAN966X_1PPB_FORMAT 3480517749LL + +#define TOD_ACC_PIN 0x7 + +enum { + PTP_PIN_ACTION_IDLE = 0, + PTP_PIN_ACTION_LOAD, + PTP_PIN_ACTION_SAVE, + PTP_PIN_ACTION_CLOCK, + PTP_PIN_ACTION_DELTA, + PTP_PIN_ACTION_TOD +}; + +static u64 lan966x_ptp_get_nominal_value(void) +{ + /* This is the default value that for each system clock, the time of day + * is increased. It has the format 5.59 nanosecond. + */ + return 0x304d4873ecade305; +} + +int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr) +{ + struct lan966x *lan966x = port->lan966x; + struct hwtstamp_config cfg; + struct lan966x_phc *phc; + + /* For now don't allow to run ptp on ports that are part of a bridge, + * because in case of transparent clock the HW will still forward the + * frames, so there would be duplicate frames + */ + if (lan966x->bridge_mask & BIT(port->chip_port)) + return -EINVAL; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + switch (cfg.tx_type) { + case HWTSTAMP_TX_ON: + port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP; + break; + case HWTSTAMP_TX_OFF: + port->ptp_cmd = IFH_REW_OP_NOOP; + break; + default: + return -ERANGE; + } + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + cfg.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + /* Commit back the result & save it */ + mutex_lock(&lan966x->ptp_lock); + phc = &lan966x->phc[LAN966X_PHC_PORT]; + memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg)); + mutex_unlock(&lan966x->ptp_lock); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr) +{ + struct lan966x *lan966x = port->lan966x; + struct lan966x_phc *phc; + + phc = &lan966x->phc[LAN966X_PHC_PORT]; + return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config, + sizeof(phc->hwtstamp_config)) ? -EFAULT : 0; +} + +static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb) +{ + struct ptp_header *header; + u8 msgtype; + int type; + + if (port->ptp_cmd == IFH_REW_OP_NOOP) + return IFH_REW_OP_NOOP; + + type = ptp_classify_raw(skb); + if (type == PTP_CLASS_NONE) + return IFH_REW_OP_NOOP; + + header = ptp_parse_header(skb, type); + if (!header) + return IFH_REW_OP_NOOP; + + if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) + return IFH_REW_OP_TWO_STEP_PTP; + + /* If it is sync and run 1 step then set the correct operation, + * otherwise run as 2 step + */ + msgtype = ptp_get_msgtype(header, type); + if ((msgtype & 0xf) == 0) + return IFH_REW_OP_ONE_STEP_PTP; + + return IFH_REW_OP_TWO_STEP_PTP; +} + +static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port) +{ + struct sk_buff *skb, *skb_tmp; + unsigned long flags; + + spin_lock_irqsave(&port->tx_skbs.lock, flags); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if time_after(LAN966X_SKB_CB(skb)->jiffies + LAN966X_PTP_TIMEOUT, + jiffies) + break; + + __skb_unlink(skb, &port->tx_skbs); + dev_kfree_skb_any(skb); + } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); +} + +int lan966x_ptp_txtstamp_request(struct lan966x_port *port, + struct sk_buff *skb) +{ + struct lan966x *lan966x = port->lan966x; + unsigned long flags; + u8 rew_op; + + rew_op = lan966x_ptp_classify(port, skb); + LAN966X_SKB_CB(skb)->rew_op = rew_op; + + if (rew_op != IFH_REW_OP_TWO_STEP_PTP) + return 0; + + lan966x_ptp_txtstamp_old_release(port); + + spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags); + if (lan966x->ptp_skbs == LAN966X_MAX_PTP_ID) { + spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags); + return -EBUSY; + } + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + skb_queue_tail(&port->tx_skbs, skb); + LAN966X_SKB_CB(skb)->ts_id = port->ts_id; + LAN966X_SKB_CB(skb)->jiffies = jiffies; + + lan966x->ptp_skbs++; + port->ts_id++; + if (port->ts_id == LAN966X_MAX_PTP_ID) + port->ts_id = 0; + + spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags); + + return 0; +} + +void lan966x_ptp_txtstamp_release(struct lan966x_port *port, + struct sk_buff *skb) +{ + struct lan966x *lan966x = port->lan966x; + unsigned long flags; + + spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags); + port->ts_id--; + lan966x->ptp_skbs--; + skb_unlink(skb, &port->tx_skbs); + spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags); +} + +static void lan966x_get_hwtimestamp(struct lan966x *lan966x, + struct timespec64 *ts, + u32 nsec) +{ + /* Read current PTP time to get seconds */ + unsigned long flags; + u32 curr_nsec; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PIN_CFG_PIN_DOM_SET(LAN966X_PHC_PORT) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + ts->tv_sec = lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + curr_nsec = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN)); + + ts->tv_nsec = nsec; + + /* Sec has incremented since the ts was registered */ + if (curr_nsec < nsec) + ts->tv_sec--; + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); +} + +irqreturn_t lan966x_ptp_irq_handler(int irq, void *args) +{ + int budget = LAN966X_MAX_PTP_ID; + struct lan966x *lan966x = args; + + while (budget--) { + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; + struct skb_shared_hwtstamps shhwtstamps; + struct lan966x_port *port; + struct timespec64 ts; + unsigned long flags; + u32 val, id, txport; + u32 delay; + + val = lan_rd(lan966x, PTP_TWOSTEP_CTRL); + + /* Check if a timestamp can be retrieved */ + if (!(val & PTP_TWOSTEP_CTRL_VLD)) + break; + + WARN_ON(val & PTP_TWOSTEP_CTRL_OVFL); + + if (!(val & PTP_TWOSTEP_CTRL_STAMP_TX)) + continue; + + /* Retrieve the ts Tx port */ + txport = PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val); + + /* Retrieve its associated skb */ + port = lan966x->ports[txport]; + + /* Retrieve the delay */ + delay = lan_rd(lan966x, PTP_TWOSTEP_STAMP); + delay = PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay); + + /* Get next timestamp from fifo, which needs to be the + * rx timestamp which represents the id of the frame + */ + lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1), + PTP_TWOSTEP_CTRL_NXT, + lan966x, PTP_TWOSTEP_CTRL); + + val = lan_rd(lan966x, PTP_TWOSTEP_CTRL); + + /* Check if a timestamp can be retried */ + if (!(val & PTP_TWOSTEP_CTRL_VLD)) + break; + + /* Read RX timestamping to get the ID */ + id = lan_rd(lan966x, PTP_TWOSTEP_STAMP); + + spin_lock_irqsave(&port->tx_skbs.lock, flags); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if (LAN966X_SKB_CB(skb)->ts_id != id) + continue; + + __skb_unlink(skb, &port->tx_skbs); + skb_match = skb; + break; + } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); + + /* Next ts */ + lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1), + PTP_TWOSTEP_CTRL_NXT, + lan966x, PTP_TWOSTEP_CTRL); + + if (WARN_ON(!skb_match)) + continue; + + spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags); + lan966x->ptp_skbs--; + spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags); + + /* Get the h/w timestamp */ + lan966x_get_hwtimestamp(lan966x, &ts, delay); + + /* Set the timestamp into the skb */ + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb_match, &shhwtstamps); + + dev_kfree_skb_any(skb_match); + } + + return IRQ_HANDLED; +} + +irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + struct lan966x_phc *phc; + unsigned long flags; + u64 time = 0; + time64_t s; + int pin, i; + s64 ns; + + if (!(lan_rd(lan966x, PTP_PIN_INTR))) + return IRQ_NONE; + + /* Go through all domains and see which pin generated the interrupt */ + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + struct ptp_clock_event ptp_event = {0}; + + phc = &lan966x->phc[i]; + pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0); + if (pin == -1) + continue; + + if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin))) + continue; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + /* Enable to get the new interrupt. + * By writing 1 it clears the bit + */ + lan_wr(BIT(pin), lan966x, PTP_PIN_INTR); + + /* Get current time */ + s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin)); + s <<= 32; + s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin)); + ns = lan_rd(lan966x, PTP_TOD_NSEC(pin)); + ns &= PTP_TOD_NSEC_TOD_NSEC; + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) { + s--; + ns &= 0xf; + ns += 999999984; + } + time = ktime_set(s, ns); + + ptp_event.index = pin; + ptp_event.timestamp = time; + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_clock_event(phc->clock, &ptp_event); + } + + return IRQ_HANDLED; +} + +static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + unsigned long flags; + bool neg_adj = 0; + u64 tod_inc; + u64 ref; + + if (!scaled_ppm) + return 0; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + + tod_inc = lan966x_ptp_get_nominal_value(); + + /* The multiplication is split in 2 separate additions because of + * overflow issues. If scaled_ppm with 16bit fractional part was bigger + * than 20ppm then we got overflow. + */ + ref = LAN966X_1PPM_FORMAT * (scaled_ppm >> 16); + ref += (LAN966X_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16; + tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(1 << BIT(phc->index)), + PTP_DOM_CFG_CLKCFG_DIS, + lan966x, PTP_DOM_CFG); + + lan_wr((u32)tod_inc & 0xFFFFFFFF, lan966x, + PTP_CLK_PER_CFG(phc->index, 0)); + lan_wr((u32)(tod_inc >> 32), lan966x, + PTP_CLK_PER_CFG(phc->index, 1)); + + lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0), + PTP_DOM_CFG_CLKCFG_DIS, + lan966x, PTP_DOM_CFG); + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +static int lan966x_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + unsigned long flags; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + /* Must be in IDLE mode before the time can be loaded */ + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + /* Set new value */ + lan_wr(PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)), + lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + lan_wr(lower_32_bits(ts->tv_sec), + lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + lan_wr(ts->tv_nsec, lan966x, PTP_TOD_NSEC(TOD_ACC_PIN)); + + /* Apply new values */ + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + unsigned long flags; + time64_t s; + s64 ns; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + s = lan_rd(lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + s <<= 32; + s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + ns = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN)); + ns &= PTP_TOD_NSEC_TOD_NSEC; + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + /* Deal with negative values */ + if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) { + s--; + ns &= 0xf; + ns += 999999984; + } + + set_normalized_timespec64(ts, s, ns); + return 0; +} + +static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + + if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) { + unsigned long flags; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + /* Must be in IDLE mode before the time can be loaded */ + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + lan_wr(PTP_TOD_NSEC_TOD_NSEC_SET(delta), + lan966x, PTP_TOD_NSEC(TOD_ACC_PIN)); + + /* Adjust time with the value of PTP_TOD_NSEC */ + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + } else { + /* Fall back using lan966x_ptp_settime64 which is not exact */ + struct timespec64 ts; + u64 now; + + lan966x_ptp_gettime64(ptp, &ts); + + now = ktime_to_ns(timespec64_to_ktime(ts)); + ts = ns_to_timespec64(now + delta); + + lan966x_ptp_settime64(ptp, &ts); + } + + return 0; +} + +static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + struct ptp_clock_info *info; + int i; + + /* Currently support only 1 channel */ + if (chan != 0) + return -1; + + switch (func) { + case PTP_PF_NONE: + case PTP_PF_PEROUT: + case PTP_PF_EXTTS: + break; + default: + return -1; + } + + /* The PTP pins are shared by all the PHC. So it is required to see if + * the pin is connected to another PHC. The pin is connected to another + * PHC if that pin already has a function on that PHC. + */ + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + info = &lan966x->phc[i].info; + + /* Ignore the check with ourself */ + if (ptp == info) + continue; + + if (info->pin_config[pin].func == PTP_PF_PEROUT || + info->pin_config[pin].func == PTP_PF_EXTTS) + return -1; + } + + return 0; +} + +static int lan966x_ptp_perout(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + struct timespec64 ts_phase, ts_period; + unsigned long flags; + s64 wf_high, wf_low; + bool pps = false; + int pin; + + if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index); + if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM) + return -EINVAL; + + if (!on) { + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + return 0; + } + + if (rq->perout.period.sec == 1 && + rq->perout.period.nsec == 0) + pps = true; + + if (rq->perout.flags & PTP_PEROUT_PHASE) { + ts_phase.tv_sec = rq->perout.phase.sec; + ts_phase.tv_nsec = rq->perout.phase.nsec; + } else { + ts_phase.tv_sec = rq->perout.start.sec; + ts_phase.tv_nsec = rq->perout.start.nsec; + } + + if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) { + dev_warn(lan966x->dev, + "Absolute time not supported!\n"); + return -EINVAL; + } + + if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) { + struct timespec64 ts_on; + + ts_on.tv_sec = rq->perout.on.sec; + ts_on.tv_nsec = rq->perout.on.nsec; + + wf_high = timespec64_to_ns(&ts_on); + } else { + wf_high = 5000; + } + + if (pps) { + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec), + lan966x, PTP_WF_LOW_PERIOD(pin)); + lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high), + lan966x, PTP_WF_HIGH_PERIOD(pin)); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(3), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + return 0; + } + + ts_period.tv_sec = rq->perout.period.sec; + ts_period.tv_nsec = rq->perout.period.nsec; + + wf_low = timespec64_to_ns(&ts_period); + wf_low -= wf_high; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low), + lan966x, PTP_WF_LOW_PERIOD(pin)); + lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high), + lan966x, PTP_WF_HIGH_PERIOD(pin)); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +static int lan966x_ptp_extts(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + unsigned long flags; + int pin; + u32 val; + + if (lan966x->ptp_ext_irq <= 0) + return -EOPNOTSUPP; + + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index); + if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM) + return -EINVAL; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SELECT_SET(pin), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_SYNC | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SELECT, + lan966x, PTP_PIN_CFG(pin)); + + val = lan_rd(lan966x, PTP_PIN_INTR_ENA); + if (on) + val |= BIT(pin); + else + val &= ~BIT(pin); + lan_wr(val, lan966x, PTP_PIN_INTR_ENA); + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +static int lan966x_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + return lan966x_ptp_perout(ptp, rq, on); + case PTP_CLK_REQ_EXTTS: + return lan966x_ptp_extts(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static struct ptp_clock_info lan966x_ptp_clock_info = { + .owner = THIS_MODULE, + .name = "lan966x ptp", + .max_adj = 200000, + .gettime64 = lan966x_ptp_gettime64, + .settime64 = lan966x_ptp_settime64, + .adjtime = lan966x_ptp_adjtime, + .adjfine = lan966x_ptp_adjfine, + .verify = lan966x_ptp_verify, + .enable = lan966x_ptp_enable, + .n_per_out = LAN966X_PHC_PINS_NUM, + .n_ext_ts = LAN966X_PHC_PINS_NUM, + .n_pins = LAN966X_PHC_PINS_NUM, +}; + +static int lan966x_ptp_phc_init(struct lan966x *lan966x, + int index, + struct ptp_clock_info *clock_info) +{ + struct lan966x_phc *phc = &lan966x->phc[index]; + struct ptp_pin_desc *p; + int i; + + for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) { + p = &phc->pins[i]; + + snprintf(p->name, sizeof(p->name), "pin%d", i); + p->index = i; + p->func = PTP_PF_NONE; + } + + phc->info = *clock_info; + phc->info.pin_config = &phc->pins[0]; + phc->clock = ptp_clock_register(&phc->info, lan966x->dev); + if (IS_ERR(phc->clock)) + return PTR_ERR(phc->clock); + + phc->index = index; + phc->lan966x = lan966x; + + /* PTP Rx stamping is always enabled. */ + phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + + return 0; +} + +int lan966x_ptp_init(struct lan966x *lan966x) +{ + u64 tod_adj = lan966x_ptp_get_nominal_value(); + struct lan966x_port *port; + int err, i; + + if (!lan966x->ptp) + return 0; + + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + err = lan966x_ptp_phc_init(lan966x, i, &lan966x_ptp_clock_info); + if (err) + return err; + } + + spin_lock_init(&lan966x->ptp_clock_lock); + spin_lock_init(&lan966x->ptp_ts_id_lock); + mutex_init(&lan966x->ptp_lock); + + /* Disable master counters */ + lan_wr(PTP_DOM_CFG_ENA_SET(0), lan966x, PTP_DOM_CFG); + + /* Configure the nominal TOD increment per clock cycle */ + lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0x7), + PTP_DOM_CFG_CLKCFG_DIS, + lan966x, PTP_DOM_CFG); + + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + lan_wr((u32)tod_adj & 0xFFFFFFFF, lan966x, + PTP_CLK_PER_CFG(i, 0)); + lan_wr((u32)(tod_adj >> 32), lan966x, + PTP_CLK_PER_CFG(i, 1)); + } + + lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0), + PTP_DOM_CFG_CLKCFG_DIS, + lan966x, PTP_DOM_CFG); + + /* Enable master counters */ + lan_wr(PTP_DOM_CFG_ENA_SET(0x7), lan966x, PTP_DOM_CFG); + + for (i = 0; i < lan966x->num_phys_ports; i++) { + port = lan966x->ports[i]; + if (!port) + continue; + + skb_queue_head_init(&port->tx_skbs); + } + + return 0; +} + +void lan966x_ptp_deinit(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int i; + + if (!lan966x->ptp) + return; + + for (i = 0; i < lan966x->num_phys_ports; i++) { + port = lan966x->ports[i]; + if (!port) + continue; + + skb_queue_purge(&port->tx_skbs); + } + + for (i = 0; i < LAN966X_PHC_COUNT; ++i) + ptp_clock_unregister(lan966x->phc[i].clock); +} + +void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb, + u64 timestamp) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct lan966x_phc *phc; + struct timespec64 ts; + u64 full_ts_in_ns; + + if (!lan966x->ptp) + return; + + phc = &lan966x->phc[LAN966X_PHC_PORT]; + lan966x_ptp_gettime64(&phc->info, &ts); + + /* Drop the sub-ns precision */ + timestamp = timestamp >> 2; + if (ts.tv_nsec < timestamp) + ts.tv_sec--; + ts.tv_nsec = timestamp; + full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = full_ts_in_ns; +} + +u32 lan966x_ptp_get_period_ps(void) +{ + /* This represents the system clock period in picoseconds */ + return 15125; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h new file mode 100644 index 000000000..fb5087fef --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h @@ -0,0 +1,1509 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ + +/* This file is autogenerated by cml-utils 2021-10-10 13:25:08 +0200. + * Commit ID: 26db2002924973d36a30b369c94f025a678fe9ea (dirty) + */ + +#ifndef _LAN966X_REGS_H_ +#define _LAN966X_REGS_H_ + +#include <linux/bitfield.h> +#include <linux/types.h> +#include <linux/bug.h> + +enum lan966x_target { + TARGET_AFI = 2, + TARGET_ANA = 3, + TARGET_CHIP_TOP = 5, + TARGET_CPU = 6, + TARGET_DEV = 13, + TARGET_FDMA = 21, + TARGET_GCB = 27, + TARGET_ORG = 36, + TARGET_PTP = 41, + TARGET_QS = 42, + TARGET_QSYS = 46, + TARGET_REW = 47, + TARGET_SYS = 52, + NUM_TARGETS = 66 +}; + +#define __REG(...) __VA_ARGS__ + +/* AFI:PORT_TBL:PORT_FRM_OUT */ +#define AFI_PORT_FRM_OUT(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 0, 0, 1, 4) + +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT GENMASK(26, 16) +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_SET(x)\ + FIELD_PREP(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x) +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(x)\ + FIELD_GET(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x) + +/* AFI:PORT_TBL:PORT_CFG */ +#define AFI_PORT_CFG(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 4, 0, 1, 4) + +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ BIT(16) +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(x)\ + FIELD_PREP(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x) +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_GET(x)\ + FIELD_GET(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x) + +#define AFI_PORT_CFG_FRM_OUT_MAX GENMASK(9, 0) +#define AFI_PORT_CFG_FRM_OUT_MAX_SET(x)\ + FIELD_PREP(AFI_PORT_CFG_FRM_OUT_MAX, x) +#define AFI_PORT_CFG_FRM_OUT_MAX_GET(x)\ + FIELD_GET(AFI_PORT_CFG_FRM_OUT_MAX, x) + +/* ANA:ANA:ADVLEARN */ +#define ANA_ADVLEARN __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 0, 0, 1, 4) + +#define ANA_ADVLEARN_VLAN_CHK BIT(0) +#define ANA_ADVLEARN_VLAN_CHK_SET(x)\ + FIELD_PREP(ANA_ADVLEARN_VLAN_CHK, x) +#define ANA_ADVLEARN_VLAN_CHK_GET(x)\ + FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x) + +/* ANA:ANA:VLANMASK */ +#define ANA_VLANMASK __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 8, 0, 1, 4) + +/* ANA:ANA:ANAINTR */ +#define ANA_ANAINTR __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 16, 0, 1, 4) + +#define ANA_ANAINTR_INTR BIT(1) +#define ANA_ANAINTR_INTR_SET(x)\ + FIELD_PREP(ANA_ANAINTR_INTR, x) +#define ANA_ANAINTR_INTR_GET(x)\ + FIELD_GET(ANA_ANAINTR_INTR, x) + +#define ANA_ANAINTR_INTR_ENA BIT(0) +#define ANA_ANAINTR_INTR_ENA_SET(x)\ + FIELD_PREP(ANA_ANAINTR_INTR_ENA, x) +#define ANA_ANAINTR_INTR_ENA_GET(x)\ + FIELD_GET(ANA_ANAINTR_INTR_ENA, x) + +/* ANA:ANA:AUTOAGE */ +#define ANA_AUTOAGE __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 44, 0, 1, 4) + +#define ANA_AUTOAGE_AGE_PERIOD GENMASK(20, 1) +#define ANA_AUTOAGE_AGE_PERIOD_SET(x)\ + FIELD_PREP(ANA_AUTOAGE_AGE_PERIOD, x) +#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\ + FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x) + +/* ANA:ANA:MIRRORPORTS */ +#define ANA_MIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 60, 0, 1, 4) + +#define ANA_MIRRORPORTS_MIRRORPORTS GENMASK(8, 0) +#define ANA_MIRRORPORTS_MIRRORPORTS_SET(x)\ + FIELD_PREP(ANA_MIRRORPORTS_MIRRORPORTS, x) +#define ANA_MIRRORPORTS_MIRRORPORTS_GET(x)\ + FIELD_GET(ANA_MIRRORPORTS_MIRRORPORTS, x) + +/* ANA:ANA:EMIRRORPORTS */ +#define ANA_EMIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 64, 0, 1, 4) + +#define ANA_EMIRRORPORTS_EMIRRORPORTS GENMASK(8, 0) +#define ANA_EMIRRORPORTS_EMIRRORPORTS_SET(x)\ + FIELD_PREP(ANA_EMIRRORPORTS_EMIRRORPORTS, x) +#define ANA_EMIRRORPORTS_EMIRRORPORTS_GET(x)\ + FIELD_GET(ANA_EMIRRORPORTS_EMIRRORPORTS, x) + +/* ANA:ANA:FLOODING */ +#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4) + +#define ANA_FLOODING_FLD_UNICAST GENMASK(17, 12) +#define ANA_FLOODING_FLD_UNICAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_UNICAST, x) +#define ANA_FLOODING_FLD_UNICAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_UNICAST, x) + +#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6) +#define ANA_FLOODING_FLD_BROADCAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x) +#define ANA_FLOODING_FLD_BROADCAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_BROADCAST, x) + +#define ANA_FLOODING_FLD_MULTICAST GENMASK(5, 0) +#define ANA_FLOODING_FLD_MULTICAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_MULTICAST, x) +#define ANA_FLOODING_FLD_MULTICAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_MULTICAST, x) + +/* ANA:ANA:FLOODING_IPMC */ +#define ANA_FLOODING_IPMC __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 100, 0, 1, 4) + +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL GENMASK(23, 18) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x) + +#define ANA_FLOODING_IPMC_FLD_MC4_DATA GENMASK(17, 12) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_DATA, x) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_DATA, x) + +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL GENMASK(11, 6) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x) + +#define ANA_FLOODING_IPMC_FLD_MC6_DATA GENMASK(5, 0) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_DATA, x) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_DATA, x) + +/* ANA:PGID:PGID */ +#define ANA_PGID(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 0, 0, 1, 4) + +#define ANA_PGID_PGID GENMASK(8, 0) +#define ANA_PGID_PGID_SET(x)\ + FIELD_PREP(ANA_PGID_PGID, x) +#define ANA_PGID_PGID_GET(x)\ + FIELD_GET(ANA_PGID_PGID, x) + +/* ANA:PGID:PGID_CFG */ +#define ANA_PGID_CFG(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 4, 0, 1, 4) + +#define ANA_PGID_CFG_OBEY_VLAN BIT(0) +#define ANA_PGID_CFG_OBEY_VLAN_SET(x)\ + FIELD_PREP(ANA_PGID_CFG_OBEY_VLAN, x) +#define ANA_PGID_CFG_OBEY_VLAN_GET(x)\ + FIELD_GET(ANA_PGID_CFG_OBEY_VLAN, x) + +/* ANA:ANA_TABLES:MACHDATA */ +#define ANA_MACHDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 40, 0, 1, 4) + +/* ANA:ANA_TABLES:MACLDATA */ +#define ANA_MACLDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 44, 0, 1, 4) + +/* ANA:ANA_TABLES:MACACCESS */ +#define ANA_MACACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 48, 0, 1, 4) + +#define ANA_MACACCESS_CHANGE2SW BIT(17) +#define ANA_MACACCESS_CHANGE2SW_SET(x)\ + FIELD_PREP(ANA_MACACCESS_CHANGE2SW, x) +#define ANA_MACACCESS_CHANGE2SW_GET(x)\ + FIELD_GET(ANA_MACACCESS_CHANGE2SW, x) + +#define ANA_MACACCESS_MAC_CPU_COPY BIT(16) +#define ANA_MACACCESS_MAC_CPU_COPY_SET(x)\ + FIELD_PREP(ANA_MACACCESS_MAC_CPU_COPY, x) +#define ANA_MACACCESS_MAC_CPU_COPY_GET(x)\ + FIELD_GET(ANA_MACACCESS_MAC_CPU_COPY, x) + +#define ANA_MACACCESS_VALID BIT(12) +#define ANA_MACACCESS_VALID_SET(x)\ + FIELD_PREP(ANA_MACACCESS_VALID, x) +#define ANA_MACACCESS_VALID_GET(x)\ + FIELD_GET(ANA_MACACCESS_VALID, x) + +#define ANA_MACACCESS_ENTRYTYPE GENMASK(11, 10) +#define ANA_MACACCESS_ENTRYTYPE_SET(x)\ + FIELD_PREP(ANA_MACACCESS_ENTRYTYPE, x) +#define ANA_MACACCESS_ENTRYTYPE_GET(x)\ + FIELD_GET(ANA_MACACCESS_ENTRYTYPE, x) + +#define ANA_MACACCESS_DEST_IDX GENMASK(9, 4) +#define ANA_MACACCESS_DEST_IDX_SET(x)\ + FIELD_PREP(ANA_MACACCESS_DEST_IDX, x) +#define ANA_MACACCESS_DEST_IDX_GET(x)\ + FIELD_GET(ANA_MACACCESS_DEST_IDX, x) + +#define ANA_MACACCESS_MAC_TABLE_CMD GENMASK(3, 0) +#define ANA_MACACCESS_MAC_TABLE_CMD_SET(x)\ + FIELD_PREP(ANA_MACACCESS_MAC_TABLE_CMD, x) +#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\ + FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x) + +/* ANA:ANA_TABLES:MACTINDX */ +#define ANA_MACTINDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 52, 0, 1, 4) + +#define ANA_MACTINDX_BUCKET GENMASK(12, 11) +#define ANA_MACTINDX_BUCKET_SET(x)\ + FIELD_PREP(ANA_MACTINDX_BUCKET, x) +#define ANA_MACTINDX_BUCKET_GET(x)\ + FIELD_GET(ANA_MACTINDX_BUCKET, x) + +#define ANA_MACTINDX_M_INDEX GENMASK(10, 0) +#define ANA_MACTINDX_M_INDEX_SET(x)\ + FIELD_PREP(ANA_MACTINDX_M_INDEX, x) +#define ANA_MACTINDX_M_INDEX_GET(x)\ + FIELD_GET(ANA_MACTINDX_M_INDEX, x) + +/* ANA:ANA_TABLES:VLAN_PORT_MASK */ +#define ANA_VLAN_PORT_MASK __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 56, 0, 1, 4) + +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK GENMASK(8, 0) +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(x)\ + FIELD_PREP(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x) +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_GET(x)\ + FIELD_GET(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x) + +/* ANA:ANA_TABLES:VLANACCESS */ +#define ANA_VLANACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 60, 0, 1, 4) + +#define ANA_VLANACCESS_VLAN_TBL_CMD GENMASK(1, 0) +#define ANA_VLANACCESS_VLAN_TBL_CMD_SET(x)\ + FIELD_PREP(ANA_VLANACCESS_VLAN_TBL_CMD, x) +#define ANA_VLANACCESS_VLAN_TBL_CMD_GET(x)\ + FIELD_GET(ANA_VLANACCESS_VLAN_TBL_CMD, x) + +/* ANA:ANA_TABLES:VLANTIDX */ +#define ANA_VLANTIDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 64, 0, 1, 4) + +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS BIT(18) +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x) +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_GET(x)\ + FIELD_GET(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x) + +#define ANA_VLANTIDX_V_INDEX GENMASK(11, 0) +#define ANA_VLANTIDX_V_INDEX_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_V_INDEX, x) +#define ANA_VLANTIDX_V_INDEX_GET(x)\ + FIELD_GET(ANA_VLANTIDX_V_INDEX, x) + +/* ANA:PORT:VLAN_CFG */ +#define ANA_VLAN_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 0, 0, 1, 4) + +#define ANA_VLAN_CFG_VLAN_AWARE_ENA BIT(20) +#define ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_AWARE_ENA, x) +#define ANA_VLAN_CFG_VLAN_AWARE_ENA_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_AWARE_ENA, x) + +#define ANA_VLAN_CFG_VLAN_POP_CNT GENMASK(19, 18) +#define ANA_VLAN_CFG_VLAN_POP_CNT_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_POP_CNT, x) +#define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x) + +#define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0) +#define ANA_VLAN_CFG_VLAN_VID_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x) +#define ANA_VLAN_CFG_VLAN_VID_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_VID, x) + +/* ANA:PORT:DROP_CFG */ +#define ANA_DROP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 4, 0, 1, 4) + +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA BIT(6) +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3) +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2) +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA BIT(0) +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x) +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x) + +/* ANA:PORT:CPU_FWD_CFG */ +#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4) + +#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA BIT(6) +#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x) +#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x) + +#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA BIT(5) +#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x) +#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x) + +#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA BIT(4) +#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x) +#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x) + +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3) +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x) +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x) + +/* ANA:PORT:CPU_FWD_BPDU_CFG */ +#define ANA_CPU_FWD_BPDU_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 100, 0, 1, 4) + +/* ANA:PORT:PORT_CFG */ +#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4) + +#define ANA_PORT_CFG_SRC_MIRROR_ENA BIT(13) +#define ANA_PORT_CFG_SRC_MIRROR_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_SRC_MIRROR_ENA, x) +#define ANA_PORT_CFG_SRC_MIRROR_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_SRC_MIRROR_ENA, x) + +#define ANA_PORT_CFG_LEARNAUTO BIT(6) +#define ANA_PORT_CFG_LEARNAUTO_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x) +#define ANA_PORT_CFG_LEARNAUTO_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LEARNAUTO, x) + +#define ANA_PORT_CFG_LEARN_ENA BIT(5) +#define ANA_PORT_CFG_LEARN_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LEARN_ENA, x) +#define ANA_PORT_CFG_LEARN_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LEARN_ENA, x) + +#define ANA_PORT_CFG_RECV_ENA BIT(4) +#define ANA_PORT_CFG_RECV_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_RECV_ENA, x) +#define ANA_PORT_CFG_RECV_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_RECV_ENA, x) + +#define ANA_PORT_CFG_PORTID_VAL GENMASK(3, 0) +#define ANA_PORT_CFG_PORTID_VAL_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_PORTID_VAL, x) +#define ANA_PORT_CFG_PORTID_VAL_GET(x)\ + FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x) + +/* ANA:PORT:POL_CFG */ +#define ANA_POL_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 116, 0, 1, 4) + +#define ANA_POL_CFG_PORT_POL_ENA BIT(17) +#define ANA_POL_CFG_PORT_POL_ENA_SET(x)\ + FIELD_PREP(ANA_POL_CFG_PORT_POL_ENA, x) +#define ANA_POL_CFG_PORT_POL_ENA_GET(x)\ + FIELD_GET(ANA_POL_CFG_PORT_POL_ENA, x) + +#define ANA_POL_CFG_POL_ORDER GENMASK(8, 0) +#define ANA_POL_CFG_POL_ORDER_SET(x)\ + FIELD_PREP(ANA_POL_CFG_POL_ORDER, x) +#define ANA_POL_CFG_POL_ORDER_GET(x)\ + FIELD_GET(ANA_POL_CFG_POL_ORDER, x) + +/* ANA:PFC:PFC_CFG */ +#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4) + +#define ANA_PFC_CFG_FC_LINK_SPEED GENMASK(1, 0) +#define ANA_PFC_CFG_FC_LINK_SPEED_SET(x)\ + FIELD_PREP(ANA_PFC_CFG_FC_LINK_SPEED, x) +#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\ + FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x) + +/* ANA:COMMON:AGGR_CFG */ +#define ANA_AGGR_CFG __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 0, 0, 1, 4) + +#define ANA_AGGR_CFG_AC_RND_ENA BIT(6) +#define ANA_AGGR_CFG_AC_RND_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_RND_ENA, x) +#define ANA_AGGR_CFG_AC_RND_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_RND_ENA, x) + +#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(5) +#define ANA_AGGR_CFG_AC_DMAC_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_DMAC_ENA, x) +#define ANA_AGGR_CFG_AC_DMAC_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_DMAC_ENA, x) + +#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(4) +#define ANA_AGGR_CFG_AC_SMAC_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_SMAC_ENA, x) +#define ANA_AGGR_CFG_AC_SMAC_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_SMAC_ENA, x) + +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(3) +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x) +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x) + +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(2) +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x) +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x) + +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(1) +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x) +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x) + +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(0) +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x) +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x) + +/* ANA:POL:POL_PIR_CFG */ +#define ANA_POL_PIR_CFG(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 0, 0, 1, 4) + +#define ANA_POL_PIR_CFG_PIR_RATE GENMASK(20, 6) +#define ANA_POL_PIR_CFG_PIR_RATE_SET(x)\ + FIELD_PREP(ANA_POL_PIR_CFG_PIR_RATE, x) +#define ANA_POL_PIR_CFG_PIR_RATE_GET(x)\ + FIELD_GET(ANA_POL_PIR_CFG_PIR_RATE, x) + +#define ANA_POL_PIR_CFG_PIR_BURST GENMASK(5, 0) +#define ANA_POL_PIR_CFG_PIR_BURST_SET(x)\ + FIELD_PREP(ANA_POL_PIR_CFG_PIR_BURST, x) +#define ANA_POL_PIR_CFG_PIR_BURST_GET(x)\ + FIELD_GET(ANA_POL_PIR_CFG_PIR_BURST, x) + +/* ANA:POL:POL_MODE_CFG */ +#define ANA_POL_MODE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 8, 0, 1, 4) + +#define ANA_POL_MODE_DROP_ON_YELLOW_ENA BIT(11) +#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(x)\ + FIELD_PREP(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x) +#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_GET(x)\ + FIELD_GET(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x) + +#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA BIT(10) +#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(x)\ + FIELD_PREP(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x) +#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_GET(x)\ + FIELD_GET(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x) + +#define ANA_POL_MODE_IPG_SIZE GENMASK(9, 5) +#define ANA_POL_MODE_IPG_SIZE_SET(x)\ + FIELD_PREP(ANA_POL_MODE_IPG_SIZE, x) +#define ANA_POL_MODE_IPG_SIZE_GET(x)\ + FIELD_GET(ANA_POL_MODE_IPG_SIZE, x) + +#define ANA_POL_MODE_FRM_MODE GENMASK(4, 3) +#define ANA_POL_MODE_FRM_MODE_SET(x)\ + FIELD_PREP(ANA_POL_MODE_FRM_MODE, x) +#define ANA_POL_MODE_FRM_MODE_GET(x)\ + FIELD_GET(ANA_POL_MODE_FRM_MODE, x) + +#define ANA_POL_MODE_OVERSHOOT_ENA BIT(0) +#define ANA_POL_MODE_OVERSHOOT_ENA_SET(x)\ + FIELD_PREP(ANA_POL_MODE_OVERSHOOT_ENA, x) +#define ANA_POL_MODE_OVERSHOOT_ENA_GET(x)\ + FIELD_GET(ANA_POL_MODE_OVERSHOOT_ENA, x) + +/* ANA:POL:POL_PIR_STATE */ +#define ANA_POL_PIR_STATE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 12, 0, 1, 4) + +#define ANA_POL_PIR_STATE_PIR_LVL GENMASK(21, 0) +#define ANA_POL_PIR_STATE_PIR_LVL_SET(x)\ + FIELD_PREP(ANA_POL_PIR_STATE_PIR_LVL, x) +#define ANA_POL_PIR_STATE_PIR_LVL_GET(x)\ + FIELD_GET(ANA_POL_PIR_STATE_PIR_LVL, x) + +/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */ +#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4) + +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA BIT(0) +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x) +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x) + +/* DEV:PORT_MODE:CLOCK_CFG */ +#define DEV_CLOCK_CFG(t) __REG(TARGET_DEV, t, 8, 0, 0, 1, 28, 0, 0, 1, 4) + +#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7) +#define DEV_CLOCK_CFG_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_MAC_TX_RST, x) +#define DEV_CLOCK_CFG_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_MAC_TX_RST, x) + +#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6) +#define DEV_CLOCK_CFG_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_MAC_RX_RST, x) +#define DEV_CLOCK_CFG_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_MAC_RX_RST, x) + +#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5) +#define DEV_CLOCK_CFG_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PCS_TX_RST, x) +#define DEV_CLOCK_CFG_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PCS_TX_RST, x) + +#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4) +#define DEV_CLOCK_CFG_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PCS_RX_RST, x) +#define DEV_CLOCK_CFG_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PCS_RX_RST, x) + +#define DEV_CLOCK_CFG_PORT_RST BIT(3) +#define DEV_CLOCK_CFG_PORT_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PORT_RST, x) +#define DEV_CLOCK_CFG_PORT_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PORT_RST, x) + +#define DEV_CLOCK_CFG_LINK_SPEED GENMASK(1, 0) +#define DEV_CLOCK_CFG_LINK_SPEED_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_LINK_SPEED, x) +#define DEV_CLOCK_CFG_LINK_SPEED_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_LINK_SPEED, x) + +/* DEV:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV_MAC_ENA_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 0, 0, 1, 4) + +#define DEV_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_ENA_CFG_RX_ENA, x) +#define DEV_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV_MAC_ENA_CFG_RX_ENA, x) + +#define DEV_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_ENA_CFG_TX_ENA, x) +#define DEV_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV_MAC_ENA_CFG_TX_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_MODE_CFG */ +#define DEV_MAC_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 4, 0, 1, 4) + +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x) +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\ + FIELD_GET(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 8, 0, 1, 4) + +#define DEV_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV_MAC_TAGS_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 12, 0, 1, 4) + +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1) +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x) +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_GET(x)\ + FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x) + +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\ + FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */ +#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4) + +#define DEV_MAC_IFG_CFG_TX_IFG GENMASK(12, 8) +#define DEV_MAC_IFG_CFG_TX_IFG_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_TX_IFG, x) +#define DEV_MAC_IFG_CFG_TX_IFG_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_TX_IFG, x) + +#define DEV_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4) +#define DEV_MAC_IFG_CFG_RX_IFG2_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG2, x) +#define DEV_MAC_IFG_CFG_RX_IFG2_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG2, x) + +#define DEV_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0) +#define DEV_MAC_IFG_CFG_RX_IFG1_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG1, x) +#define DEV_MAC_IFG_CFG_RX_IFG1_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG1, x) + +/* DEV:MAC_CFG_STATUS:MAC_HDX_CFG */ +#define DEV_MAC_HDX_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 24, 0, 1, 4) + +#define DEV_MAC_HDX_CFG_SEED GENMASK(23, 16) +#define DEV_MAC_HDX_CFG_SEED_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_SEED, x) +#define DEV_MAC_HDX_CFG_SEED_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_SEED, x) + +#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12) +#define DEV_MAC_HDX_CFG_SEED_LOAD_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_SEED_LOAD, x) +#define DEV_MAC_HDX_CFG_SEED_LOAD_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_SEED_LOAD, x) + +/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_LOW_CFG */ +#define DEV_FC_MAC_LOW_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 32, 0, 1, 4) + +/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_HIGH_CFG */ +#define DEV_FC_MAC_HIGH_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 36, 0, 1, 4) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_CFG */ +#define DEV_PCS1G_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 0, 0, 1, 4) + +#define DEV_PCS1G_CFG_PCS_ENA BIT(0) +#define DEV_PCS1G_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_CFG_PCS_ENA, x) +#define DEV_PCS1G_CFG_PCS_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_CFG_PCS_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */ +#define DEV_PCS1G_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 4, 0, 1, 4) + +#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0) +#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) +#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) + +#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1) +#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x) +#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */ +#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4) + +#define DEV_PCS1G_SD_CFG_SD_ENA BIT(0) +#define DEV_PCS1G_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_SD_CFG_SD_ENA, x) +#define DEV_PCS1G_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_SD_CFG_SD_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */ +#define DEV_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 12, 0, 1, 4) + +#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16) +#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x) +#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x) + +#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8) +#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x) +#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x) + +#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT BIT(1) +#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x) +#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x) + +#define DEV_PCS1G_ANEG_CFG_ENA BIT(0) +#define DEV_PCS1G_ANEG_CFG_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_ENA, x) +#define DEV_PCS1G_ANEG_CFG_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */ +#define DEV_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 32, 0, 1, 4) + +#define DEV_PCS1G_ANEG_STATUS_LP_ADV GENMASK(31, 16) +#define DEV_PCS1G_ANEG_STATUS_LP_ADV_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_STATUS_LP_ADV, x) +#define DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_STATUS_LP_ADV, x) + +#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0) +#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) +#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */ +#define DEV_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 40, 0, 1, 4) + +#define DEV_PCS1G_LINK_STATUS_LINK_STATUS BIT(4) +#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\ + FIELD_PREP(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x) +#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\ + FIELD_GET(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x) + +#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0) +#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\ + FIELD_PREP(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x) +#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\ + FIELD_GET(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_STICKY */ +#define DEV_PCS1G_STICKY(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 48, 0, 1, 4) + +#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4) +#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\ + FIELD_PREP(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x) +#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\ + FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x) + +/* FDMA:FDMA:FDMA_CH_ACTIVATE */ +#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4) + +#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\ + FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\ + FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) + +/* FDMA:FDMA:FDMA_CH_RELOAD */ +#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4) + +#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0) +#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\ + FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x) +#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\ + FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x) + +/* FDMA:FDMA:FDMA_CH_DISABLE */ +#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4) + +#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0) +#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\ + FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x) +#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\ + FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x) + +/* FDMA:FDMA:FDMA_CH_DB_DISCARD */ +#define FDMA_CH_DB_DISCARD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 16, 0, 1, 4) + +#define FDMA_CH_DB_DISCARD_DB_DISCARD GENMASK(7, 0) +#define FDMA_CH_DB_DISCARD_DB_DISCARD_SET(x)\ + FIELD_PREP(FDMA_CH_DB_DISCARD_DB_DISCARD, x) +#define FDMA_CH_DB_DISCARD_DB_DISCARD_GET(x)\ + FIELD_GET(FDMA_CH_DB_DISCARD_DB_DISCARD, x) + +/* FDMA:FDMA:FDMA_DCB_LLP */ +#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4) + +/* FDMA:FDMA:FDMA_DCB_LLP1 */ +#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4) + +/* FDMA:FDMA:FDMA_CH_ACTIVE */ +#define FDMA_CH_ACTIVE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 180, 0, 1, 4) + +/* FDMA:FDMA:FDMA_CH_CFG */ +#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4) + +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(4) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) + +#define FDMA_CH_CFG_CH_INJ_PORT BIT(3) +#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x) +#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x) + +#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(2, 1) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x) + +#define FDMA_CH_CFG_CH_MEM BIT(0) +#define FDMA_CH_CFG_CH_MEM_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_MEM, x) +#define FDMA_CH_CFG_CH_MEM_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_MEM, x) + +/* FDMA:FDMA:FDMA_PORT_CTRL */ +#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4) + +#define FDMA_PORT_CTRL_INJ_STOP BIT(4) +#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x) +#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x) + +#define FDMA_PORT_CTRL_XTR_STOP BIT(2) +#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x) +#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x) + +/* FDMA:FDMA:FDMA_INTR_DB */ +#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4) + +/* FDMA:FDMA:FDMA_INTR_DB_ENA */ +#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4) + +#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\ + FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\ + FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) + +/* FDMA:FDMA:FDMA_INTR_ERR */ +#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4) + +/* FDMA:FDMA:FDMA_ERRORS */ +#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4) + +/* PTP:PTP_CFG:PTP_PIN_INTR */ +#define PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 0, 0, 1, 4) + +#define PTP_PIN_INTR_INTR_PTP GENMASK(7, 0) +#define PTP_PIN_INTR_INTR_PTP_SET(x)\ + FIELD_PREP(PTP_PIN_INTR_INTR_PTP, x) +#define PTP_PIN_INTR_INTR_PTP_GET(x)\ + FIELD_GET(PTP_PIN_INTR_INTR_PTP, x) + +/* PTP:PTP_CFG:PTP_PIN_INTR_ENA */ +#define PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 4, 0, 1, 4) + +#define PTP_PIN_INTR_ENA_INTR_ENA GENMASK(7, 0) +#define PTP_PIN_INTR_ENA_INTR_ENA_SET(x)\ + FIELD_PREP(PTP_PIN_INTR_ENA_INTR_ENA, x) +#define PTP_PIN_INTR_ENA_INTR_ENA_GET(x)\ + FIELD_GET(PTP_PIN_INTR_ENA_INTR_ENA, x) + +/* PTP:PTP_CFG:PTP_DOM_CFG */ +#define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4) + +#define PTP_DOM_CFG_ENA GENMASK(11, 9) +#define PTP_DOM_CFG_ENA_SET(x)\ + FIELD_PREP(PTP_DOM_CFG_ENA, x) +#define PTP_DOM_CFG_ENA_GET(x)\ + FIELD_GET(PTP_DOM_CFG_ENA, x) + +#define PTP_DOM_CFG_CLKCFG_DIS GENMASK(2, 0) +#define PTP_DOM_CFG_CLKCFG_DIS_SET(x)\ + FIELD_PREP(PTP_DOM_CFG_CLKCFG_DIS, x) +#define PTP_DOM_CFG_CLKCFG_DIS_GET(x)\ + FIELD_GET(PTP_DOM_CFG_CLKCFG_DIS, x) + +/* PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */ +#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 528, g, 3, 28, 0, r, 2, 4) + +/* PTP:PTP_PINS:PTP_PIN_CFG */ +#define PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 0, 0, 1, 4) + +#define PTP_PIN_CFG_PIN_ACTION GENMASK(29, 27) +#define PTP_PIN_CFG_PIN_ACTION_SET(x)\ + FIELD_PREP(PTP_PIN_CFG_PIN_ACTION, x) +#define PTP_PIN_CFG_PIN_ACTION_GET(x)\ + FIELD_GET(PTP_PIN_CFG_PIN_ACTION, x) + +#define PTP_PIN_CFG_PIN_SYNC GENMASK(26, 25) +#define PTP_PIN_CFG_PIN_SYNC_SET(x)\ + FIELD_PREP(PTP_PIN_CFG_PIN_SYNC, x) +#define PTP_PIN_CFG_PIN_SYNC_GET(x)\ + FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x) + +#define PTP_PIN_CFG_PIN_SELECT GENMASK(23, 21) +#define PTP_PIN_CFG_PIN_SELECT_SET(x)\ + FIELD_PREP(PTP_PIN_CFG_PIN_SELECT, x) +#define PTP_PIN_CFG_PIN_SELECT_GET(x)\ + FIELD_GET(PTP_PIN_CFG_PIN_SELECT, x) + +#define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16) +#define PTP_PIN_CFG_PIN_DOM_SET(x)\ + FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x) +#define PTP_PIN_CFG_PIN_DOM_GET(x)\ + FIELD_GET(PTP_PIN_CFG_PIN_DOM, x) + +/* PTP:PTP_PINS:PTP_TOD_SEC_MSB */ +#define PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 4, 0, 1, 4) + +#define PTP_TOD_SEC_MSB_TOD_SEC_MSB GENMASK(15, 0) +#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(x)\ + FIELD_PREP(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x) +#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_GET(x)\ + FIELD_GET(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x) + +/* PTP:PTP_PINS:PTP_TOD_SEC_LSB */ +#define PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 8, 0, 1, 4) + +/* PTP:PTP_PINS:PTP_TOD_NSEC */ +#define PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 12, 0, 1, 4) + +#define PTP_TOD_NSEC_TOD_NSEC GENMASK(29, 0) +#define PTP_TOD_NSEC_TOD_NSEC_SET(x)\ + FIELD_PREP(PTP_TOD_NSEC_TOD_NSEC, x) +#define PTP_TOD_NSEC_TOD_NSEC_GET(x)\ + FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x) + +/* PTP:PTP_PINS:WF_HIGH_PERIOD */ +#define PTP_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\ + 0, 1, 0, g, 8, 64, 24, 0, 1, 4) + +#define PTP_WF_HIGH_PERIOD_PIN_WFH(x) ((x) & GENMASK(29, 0)) +#define PTP_WF_HIGH_PERIOD_PIN_WFH_M GENMASK(29, 0) +#define PTP_WF_HIGH_PERIOD_PIN_WFH_X(x) ((x) & GENMASK(29, 0)) + +/* PTP:PTP_PINS:WF_LOW_PERIOD */ +#define PTP_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\ + 0, 1, 0, g, 8, 64, 28, 0, 1, 4) + +#define PTP_WF_LOW_PERIOD_PIN_WFL(x) ((x) & GENMASK(29, 0)) +#define PTP_WF_LOW_PERIOD_PIN_WFL_M GENMASK(29, 0) +#define PTP_WF_LOW_PERIOD_PIN_WFL_X(x) ((x) & GENMASK(29, 0)) + +/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */ +#define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4) + +#define PTP_TWOSTEP_CTRL_NXT BIT(11) +#define PTP_TWOSTEP_CTRL_NXT_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_CTRL_NXT, x) +#define PTP_TWOSTEP_CTRL_NXT_GET(x)\ + FIELD_GET(PTP_TWOSTEP_CTRL_NXT, x) + +#define PTP_TWOSTEP_CTRL_VLD BIT(10) +#define PTP_TWOSTEP_CTRL_VLD_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_CTRL_VLD, x) +#define PTP_TWOSTEP_CTRL_VLD_GET(x)\ + FIELD_GET(PTP_TWOSTEP_CTRL_VLD, x) + +#define PTP_TWOSTEP_CTRL_STAMP_TX BIT(9) +#define PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_TX, x) +#define PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\ + FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_TX, x) + +#define PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1) +#define PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_PORT, x) +#define PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\ + FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_PORT, x) + +#define PTP_TWOSTEP_CTRL_OVFL BIT(0) +#define PTP_TWOSTEP_CTRL_OVFL_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_CTRL_OVFL, x) +#define PTP_TWOSTEP_CTRL_OVFL_GET(x)\ + FIELD_GET(PTP_TWOSTEP_CTRL_OVFL, x) + +/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP */ +#define PTP_TWOSTEP_STAMP __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 4, 0, 1, 4) + +#define PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(31, 2) +#define PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_STAMP_STAMP_NSEC, x) +#define PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\ + FIELD_GET(PTP_TWOSTEP_STAMP_STAMP_NSEC, x) + +/* DEVCPU_QS:XTR:XTR_GRP_CFG */ +#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4) + +#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2) +#define QS_XTR_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_MODE, x) +#define QS_XTR_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_MODE, x) + +#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x) +#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:XTR:XTR_RD */ +#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4) + +/* DEVCPU_QS:XTR:XTR_FLUSH */ +#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4) + +/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */ +#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4) + +/* DEVCPU_QS:INJ:INJ_GRP_CFG */ +#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4) + +#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2) +#define QS_INJ_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_MODE, x) +#define QS_INJ_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_MODE, x) + +#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x) +#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:INJ:INJ_WR */ +#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4) + +/* DEVCPU_QS:INJ:INJ_CTRL */ +#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4) + +#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21) +#define QS_INJ_CTRL_GAP_SIZE_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x) +#define QS_INJ_CTRL_GAP_SIZE_GET(x)\ + FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x) + +#define QS_INJ_CTRL_EOF BIT(19) +#define QS_INJ_CTRL_EOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_EOF, x) +#define QS_INJ_CTRL_EOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_EOF, x) + +#define QS_INJ_CTRL_SOF BIT(18) +#define QS_INJ_CTRL_SOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_SOF, x) +#define QS_INJ_CTRL_SOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_SOF, x) + +#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16) +#define QS_INJ_CTRL_VLD_BYTES_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x) +#define QS_INJ_CTRL_VLD_BYTES_GET(x)\ + FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x) + +/* DEVCPU_QS:INJ:INJ_STATUS */ +#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4) + +#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4) +#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x) +#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\ + FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x) + +#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2) +#define QS_INJ_STATUS_FIFO_RDY_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x) +#define QS_INJ_STATUS_FIFO_RDY_GET(x)\ + FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x) + +/* QSYS:SYSTEM:PORT_MODE */ +#define QSYS_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 0, r, 10, 4) + +#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1) +#define QSYS_PORT_MODE_DEQUEUE_DIS_SET(x)\ + FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_DIS, x) +#define QSYS_PORT_MODE_DEQUEUE_DIS_GET(x)\ + FIELD_GET(QSYS_PORT_MODE_DEQUEUE_DIS, x) + +/* QSYS:SYSTEM:SWITCH_PORT_MODE */ +#define QSYS_SW_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 80, r, 9, 4) + +#define QSYS_SW_PORT_MODE_PORT_ENA BIT(18) +#define QSYS_SW_PORT_MODE_PORT_ENA_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_PORT_ENA, x) +#define QSYS_SW_PORT_MODE_PORT_ENA_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_PORT_ENA, x) + +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG GENMASK(16, 14) +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x) +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x) + +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE BIT(12) +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x) +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x) + +#define QSYS_SW_PORT_MODE_TX_PFC_ENA GENMASK(11, 4) +#define QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_ENA, x) +#define QSYS_SW_PORT_MODE_TX_PFC_ENA_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_ENA, x) + +#define QSYS_SW_PORT_MODE_AGING_MODE GENMASK(1, 0) +#define QSYS_SW_PORT_MODE_AGING_MODE_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_AGING_MODE, x) +#define QSYS_SW_PORT_MODE_AGING_MODE_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_AGING_MODE, x) + +/* QSYS:SYSTEM:SW_STATUS */ +#define QSYS_SW_STATUS(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 164, r, 9, 4) + +#define QSYS_SW_STATUS_EQ_AVAIL GENMASK(7, 0) +#define QSYS_SW_STATUS_EQ_AVAIL_SET(x)\ + FIELD_PREP(QSYS_SW_STATUS_EQ_AVAIL, x) +#define QSYS_SW_STATUS_EQ_AVAIL_GET(x)\ + FIELD_GET(QSYS_SW_STATUS_EQ_AVAIL, x) + +/* QSYS:SYSTEM:CPU_GROUP_MAP */ +#define QSYS_CPU_GROUP_MAP __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 204, 0, 1, 4) + +/* QSYS:RES_CTRL:RES_CFG */ +#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4) + +/* QSYS:HSCH:CIR_CFG */ +#define QSYS_CIR_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 0, 0, 1, 4) + +#define QSYS_CIR_CFG_CIR_RATE GENMASK(20, 6) +#define QSYS_CIR_CFG_CIR_RATE_SET(x)\ + FIELD_PREP(QSYS_CIR_CFG_CIR_RATE, x) +#define QSYS_CIR_CFG_CIR_RATE_GET(x)\ + FIELD_GET(QSYS_CIR_CFG_CIR_RATE, x) + +#define QSYS_CIR_CFG_CIR_BURST GENMASK(5, 0) +#define QSYS_CIR_CFG_CIR_BURST_SET(x)\ + FIELD_PREP(QSYS_CIR_CFG_CIR_BURST, x) +#define QSYS_CIR_CFG_CIR_BURST_GET(x)\ + FIELD_GET(QSYS_CIR_CFG_CIR_BURST, x) + +/* QSYS:HSCH:SE_CFG */ +#define QSYS_SE_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 8, 0, 1, 4) + +#define QSYS_SE_CFG_SE_DWRR_CNT GENMASK(9, 6) +#define QSYS_SE_CFG_SE_DWRR_CNT_SET(x)\ + FIELD_PREP(QSYS_SE_CFG_SE_DWRR_CNT, x) +#define QSYS_SE_CFG_SE_DWRR_CNT_GET(x)\ + FIELD_GET(QSYS_SE_CFG_SE_DWRR_CNT, x) + +#define QSYS_SE_CFG_SE_RR_ENA BIT(5) +#define QSYS_SE_CFG_SE_RR_ENA_SET(x)\ + FIELD_PREP(QSYS_SE_CFG_SE_RR_ENA, x) +#define QSYS_SE_CFG_SE_RR_ENA_GET(x)\ + FIELD_GET(QSYS_SE_CFG_SE_RR_ENA, x) + +#define QSYS_SE_CFG_SE_AVB_ENA BIT(4) +#define QSYS_SE_CFG_SE_AVB_ENA_SET(x)\ + FIELD_PREP(QSYS_SE_CFG_SE_AVB_ENA, x) +#define QSYS_SE_CFG_SE_AVB_ENA_GET(x)\ + FIELD_GET(QSYS_SE_CFG_SE_AVB_ENA, x) + +#define QSYS_SE_CFG_SE_FRM_MODE GENMASK(3, 2) +#define QSYS_SE_CFG_SE_FRM_MODE_SET(x)\ + FIELD_PREP(QSYS_SE_CFG_SE_FRM_MODE, x) +#define QSYS_SE_CFG_SE_FRM_MODE_GET(x)\ + FIELD_GET(QSYS_SE_CFG_SE_FRM_MODE, x) + +#define QSYS_SE_DWRR_CFG(g, r) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 12, r, 12, 4) + +#define QSYS_SE_DWRR_CFG_DWRR_COST GENMASK(4, 0) +#define QSYS_SE_DWRR_CFG_DWRR_COST_SET(x)\ + FIELD_PREP(QSYS_SE_DWRR_CFG_DWRR_COST, x) +#define QSYS_SE_DWRR_CFG_DWRR_COST_GET(x)\ + FIELD_GET(QSYS_SE_DWRR_CFG_DWRR_COST, x) + +/* QSYS:TAS_CONFIG:TAS_CFG_CTRL */ +#define QSYS_TAS_CFG_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 0, 0, 1, 4) + +#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX GENMASK(27, 23) +#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(x)\ + FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x) +#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_GET(x)\ + FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x) + +#define QSYS_TAS_CFG_CTRL_LIST_NUM GENMASK(22, 18) +#define QSYS_TAS_CFG_CTRL_LIST_NUM_SET(x)\ + FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM, x) +#define QSYS_TAS_CFG_CTRL_LIST_NUM_GET(x)\ + FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM, x) + +#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q BIT(17) +#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(x)\ + FIELD_PREP(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x) +#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_GET(x)\ + FIELD_GET(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x) + +#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM GENMASK(16, 5) +#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(x)\ + FIELD_PREP(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x) +#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_GET(x)\ + FIELD_GET(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x) + +/* QSYS:TAS_CONFIG:TAS_GATE_STATE_CTRL */ +#define QSYS_TAS_GS_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 4, 0, 1, 4) + +#define QSYS_TAS_GS_CTRL_HSCH_POS GENMASK(2, 0) +#define QSYS_TAS_GS_CTRL_HSCH_POS_SET(x)\ + FIELD_PREP(QSYS_TAS_GS_CTRL_HSCH_POS, x) +#define QSYS_TAS_GS_CTRL_HSCH_POS_GET(x)\ + FIELD_GET(QSYS_TAS_GS_CTRL_HSCH_POS, x) + +/* QSYS:TAS_CONFIG:TAS_STATEMACHINE_CFG */ +#define QSYS_TAS_STM_CFG __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 8, 0, 1, 4) + +#define QSYS_TAS_STM_CFG_REVISIT_DLY GENMASK(7, 0) +#define QSYS_TAS_STM_CFG_REVISIT_DLY_SET(x)\ + FIELD_PREP(QSYS_TAS_STM_CFG_REVISIT_DLY, x) +#define QSYS_TAS_STM_CFG_REVISIT_DLY_GET(x)\ + FIELD_GET(QSYS_TAS_STM_CFG_REVISIT_DLY, x) + +/* QSYS:TAS_PROFILE_CFG:TAS_PROFILE_CONFIG */ +#define QSYS_TAS_PROFILE_CFG(g) __REG(TARGET_QSYS, 0, 1, 30720, g, 16, 64, 32, 0, 1, 4) + +#define QSYS_TAS_PROFILE_CFG_PORT_NUM GENMASK(21, 19) +#define QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(x)\ + FIELD_PREP(QSYS_TAS_PROFILE_CFG_PORT_NUM, x) +#define QSYS_TAS_PROFILE_CFG_PORT_NUM_GET(x)\ + FIELD_GET(QSYS_TAS_PROFILE_CFG_PORT_NUM, x) + +#define QSYS_TAS_PROFILE_CFG_LINK_SPEED GENMASK(18, 16) +#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(x)\ + FIELD_PREP(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x) +#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_GET(x)\ + FIELD_GET(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x) + +/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_NSEC */ +#define QSYS_TAS_BT_NSEC __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 0, 0, 1, 4) + +#define QSYS_TAS_BT_NSEC_NSEC GENMASK(29, 0) +#define QSYS_TAS_BT_NSEC_NSEC_SET(x)\ + FIELD_PREP(QSYS_TAS_BT_NSEC_NSEC, x) +#define QSYS_TAS_BT_NSEC_NSEC_GET(x)\ + FIELD_GET(QSYS_TAS_BT_NSEC_NSEC, x) + +/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_LSB */ +#define QSYS_TAS_BT_SEC_LSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 4, 0, 1, 4) + +/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_MSB */ +#define QSYS_TAS_BT_SEC_MSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 8, 0, 1, 4) + +#define QSYS_TAS_BT_SEC_MSB_SEC_MSB GENMASK(15, 0) +#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(x)\ + FIELD_PREP(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x) +#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_GET(x)\ + FIELD_GET(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x) + +/* QSYS:TAS_LIST_CFG:TAS_CYCLE_TIME_CFG */ +#define QSYS_TAS_CT_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 24, 0, 1, 4) + +/* QSYS:TAS_LIST_CFG:TAS_STARTUP_CFG */ +#define QSYS_TAS_STARTUP_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 28, 0, 1, 4) + +#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX GENMASK(27, 23) +#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(x)\ + FIELD_PREP(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x) +#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_GET(x)\ + FIELD_GET(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x) + +/* QSYS:TAS_LIST_CFG:TAS_LIST_CFG */ +#define QSYS_TAS_LIST_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 32, 0, 1, 4) + +#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR GENMASK(11, 0) +#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(x)\ + FIELD_PREP(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x) +#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(x)\ + FIELD_GET(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x) + +/* QSYS:TAS_LIST_CFG:TAS_LIST_STATE */ +#define QSYS_TAS_LST __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 36, 0, 1, 4) + +#define QSYS_TAS_LST_LIST_STATE GENMASK(2, 0) +#define QSYS_TAS_LST_LIST_STATE_SET(x)\ + FIELD_PREP(QSYS_TAS_LST_LIST_STATE, x) +#define QSYS_TAS_LST_LIST_STATE_GET(x)\ + FIELD_GET(QSYS_TAS_LST_LIST_STATE, x) + +/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG */ +#define QSYS_TAS_GCL_CT_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 0, 0, 1, 4) + +#define QSYS_TAS_GCL_CT_CFG_HSCH_POS GENMASK(12, 10) +#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(x)\ + FIELD_PREP(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x) +#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_GET(x)\ + FIELD_GET(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x) + +#define QSYS_TAS_GCL_CT_CFG_GATE_STATE GENMASK(9, 2) +#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(x)\ + FIELD_PREP(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x) +#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_GET(x)\ + FIELD_GET(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x) + +#define QSYS_TAS_GCL_CT_CFG_OP_TYPE GENMASK(1, 0) +#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(x)\ + FIELD_PREP(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x) +#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_GET(x)\ + FIELD_GET(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x) + +/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG2 */ +#define QSYS_TAS_GCL_CT_CFG2 __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 4, 0, 1, 4) + +#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE GENMASK(15, 12) +#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(x)\ + FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x) +#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_GET(x)\ + FIELD_GET(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x) + +#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL GENMASK(11, 0) +#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(x)\ + FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x) +#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(x)\ + FIELD_GET(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x) + +/* QSYS:TAS_GCL_CFG:TAS_GCL_TIME_CFG */ +#define QSYS_TAS_GCL_TM_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 8, 0, 1, 4) + +/* QSYS:HSCH_TAS_STATE:TAS_GATE_STATE */ +#define QSYS_TAS_GATE_STATE __REG(TARGET_QSYS, 0, 1, 28004, 0, 1, 4, 0, 0, 1, 4) + +#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE GENMASK(7, 0) +#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(x)\ + FIELD_PREP(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x) +#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_GET(x)\ + FIELD_GET(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x) + +/* REW:PORT:PORT_VLAN_CFG */ +#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4) + +#define REW_PORT_VLAN_CFG_PORT_TPID GENMASK(31, 16) +#define REW_PORT_VLAN_CFG_PORT_TPID_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_TPID, x) +#define REW_PORT_VLAN_CFG_PORT_TPID_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_TPID, x) + +#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0) +#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x) +#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x) + +/* REW:PORT:TAG_CFG */ +#define REW_TAG_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 4, 0, 1, 4) + +#define REW_TAG_CFG_TAG_CFG GENMASK(8, 7) +#define REW_TAG_CFG_TAG_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_CFG, x) +#define REW_TAG_CFG_TAG_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_CFG, x) + +#define REW_TAG_CFG_TAG_TPID_CFG GENMASK(6, 5) +#define REW_TAG_CFG_TAG_TPID_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_TPID_CFG, x) +#define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x) + +/* REW:PORT:PORT_CFG */ +#define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4) + +#define REW_PORT_CFG_NO_REWRITE BIT(0) +#define REW_PORT_CFG_NO_REWRITE_SET(x)\ + FIELD_PREP(REW_PORT_CFG_NO_REWRITE, x) +#define REW_PORT_CFG_NO_REWRITE_GET(x)\ + FIELD_GET(REW_PORT_CFG_NO_REWRITE, x) + +/* SYS:SYSTEM:RESET_CFG */ +#define SYS_RESET_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 0, 0, 1, 4) + +#define SYS_RESET_CFG_CORE_ENA BIT(0) +#define SYS_RESET_CFG_CORE_ENA_SET(x)\ + FIELD_PREP(SYS_RESET_CFG_CORE_ENA, x) +#define SYS_RESET_CFG_CORE_ENA_GET(x)\ + FIELD_GET(SYS_RESET_CFG_CORE_ENA, x) + +/* SYS:SYSTEM:PORT_MODE */ +#define SYS_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 44, r, 10, 4) + +#define SYS_PORT_MODE_INCL_INJ_HDR GENMASK(5, 4) +#define SYS_PORT_MODE_INCL_INJ_HDR_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_INCL_INJ_HDR, x) +#define SYS_PORT_MODE_INCL_INJ_HDR_GET(x)\ + FIELD_GET(SYS_PORT_MODE_INCL_INJ_HDR, x) + +#define SYS_PORT_MODE_INCL_XTR_HDR GENMASK(3, 2) +#define SYS_PORT_MODE_INCL_XTR_HDR_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_INCL_XTR_HDR, x) +#define SYS_PORT_MODE_INCL_XTR_HDR_GET(x)\ + FIELD_GET(SYS_PORT_MODE_INCL_XTR_HDR, x) + +/* SYS:SYSTEM:FRONT_PORT_MODE */ +#define SYS_FRONT_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 84, r, 8, 4) + +#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(1) +#define SYS_FRONT_PORT_MODE_HDX_MODE_SET(x)\ + FIELD_PREP(SYS_FRONT_PORT_MODE_HDX_MODE, x) +#define SYS_FRONT_PORT_MODE_HDX_MODE_GET(x)\ + FIELD_GET(SYS_FRONT_PORT_MODE_HDX_MODE, x) + +/* SYS:SYSTEM:FRM_AGING */ +#define SYS_FRM_AGING __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 116, 0, 1, 4) + +#define SYS_FRM_AGING_AGE_TX_ENA BIT(20) +#define SYS_FRM_AGING_AGE_TX_ENA_SET(x)\ + FIELD_PREP(SYS_FRM_AGING_AGE_TX_ENA, x) +#define SYS_FRM_AGING_AGE_TX_ENA_GET(x)\ + FIELD_GET(SYS_FRM_AGING_AGE_TX_ENA, x) + +/* SYS:SYSTEM:STAT_CFG */ +#define SYS_STAT_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 120, 0, 1, 4) + +#define SYS_STAT_CFG_STAT_VIEW GENMASK(9, 0) +#define SYS_STAT_CFG_STAT_VIEW_SET(x)\ + FIELD_PREP(SYS_STAT_CFG_STAT_VIEW, x) +#define SYS_STAT_CFG_STAT_VIEW_GET(x)\ + FIELD_GET(SYS_STAT_CFG_STAT_VIEW, x) + +/* SYS:PAUSE_CFG:PAUSE_CFG */ +#define SYS_PAUSE_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 0, r, 9, 4) + +#define SYS_PAUSE_CFG_PAUSE_START GENMASK(18, 10) +#define SYS_PAUSE_CFG_PAUSE_START_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_START, x) +#define SYS_PAUSE_CFG_PAUSE_START_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_START, x) + +#define SYS_PAUSE_CFG_PAUSE_STOP GENMASK(9, 1) +#define SYS_PAUSE_CFG_PAUSE_STOP_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_STOP, x) +#define SYS_PAUSE_CFG_PAUSE_STOP_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_STOP, x) + +#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0) +#define SYS_PAUSE_CFG_PAUSE_ENA_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_ENA, x) +#define SYS_PAUSE_CFG_PAUSE_ENA_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_ENA, x) + +/* SYS:PAUSE_CFG:ATOP */ +#define SYS_ATOP(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 40, r, 9, 4) + +/* SYS:PAUSE_CFG:ATOP_TOT_CFG */ +#define SYS_ATOP_TOT_CFG __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 76, 0, 1, 4) + +/* SYS:PAUSE_CFG:MAC_FC_CFG */ +#define SYS_MAC_FC_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 80, r, 8, 4) + +#define SYS_MAC_FC_CFG_FC_LINK_SPEED GENMASK(27, 26) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_FC_LINK_SPEED, x) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_FC_LINK_SPEED, x) + +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG GENMASK(25, 20) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x) + +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18) +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x) +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x) + +#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17) +#define SYS_MAC_FC_CFG_TX_FC_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_TX_FC_ENA, x) +#define SYS_MAC_FC_CFG_TX_FC_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_TX_FC_ENA, x) + +#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16) +#define SYS_MAC_FC_CFG_RX_FC_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_RX_FC_ENA, x) +#define SYS_MAC_FC_CFG_RX_FC_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_RX_FC_ENA, x) + +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG GENMASK(15, 0) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x) + +/* SYS:STAT:CNT */ +#define SYS_CNT(g) __REG(TARGET_SYS, 0, 1, 0, g, 896, 4, 0, 0, 1, 4) + +/* SYS:RAM_CTRL:RAM_INIT */ +#define SYS_RAM_INIT __REG(TARGET_SYS, 0, 1, 4432, 0, 1, 4, 0, 0, 1, 4) + +#define SYS_RAM_INIT_RAM_INIT BIT(1) +#define SYS_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(SYS_RAM_INIT_RAM_INIT, x) +#define SYS_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(SYS_RAM_INIT_RAM_INIT, x) + +#endif /* _LAN966X_REGS_H_ */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c new file mode 100644 index 000000000..1c88120eb --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c @@ -0,0 +1,664 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/if_bridge.h> +#include <net/switchdev.h> + +#include "lan966x_main.h" + +static struct notifier_block lan966x_netdevice_nb __read_mostly; + +static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port, + u32 pgid_ip) +{ + struct lan966x *lan966x = port->lan966x; + u32 flood_mask_ip; + + flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip)); + flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip); + + /* If mcast snooping is not enabled then use mcast flood mask + * to decide to enable multicast flooding or not. + */ + if (!port->mcast_ena) { + u32 flood_mask; + + flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC)); + flood_mask = ANA_PGID_PGID_GET(flood_mask); + + if (flood_mask & BIT(port->chip_port)) + flood_mask_ip |= BIT(port->chip_port); + else + flood_mask_ip &= ~BIT(port->chip_port); + } else { + flood_mask_ip &= ~BIT(port->chip_port); + } + + lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_ip)); +} + +static void lan966x_port_set_mcast_flood(struct lan966x_port *port, + bool enabled) +{ + u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC)); + + val = ANA_PGID_PGID_GET(val); + if (enabled) + val |= BIT(port->chip_port); + else + val &= ~BIT(port->chip_port); + + lan_rmw(ANA_PGID_PGID_SET(val), + ANA_PGID_PGID, + port->lan966x, ANA_PGID(PGID_MC)); + + if (!port->mcast_ena) { + lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4); + lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6); + } +} + +static void lan966x_port_set_ucast_flood(struct lan966x_port *port, + bool enabled) +{ + u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC)); + + val = ANA_PGID_PGID_GET(val); + if (enabled) + val |= BIT(port->chip_port); + else + val &= ~BIT(port->chip_port); + + lan_rmw(ANA_PGID_PGID_SET(val), + ANA_PGID_PGID, + port->lan966x, ANA_PGID(PGID_UC)); +} + +static void lan966x_port_set_bcast_flood(struct lan966x_port *port, + bool enabled) +{ + u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC)); + + val = ANA_PGID_PGID_GET(val); + if (enabled) + val |= BIT(port->chip_port); + else + val &= ~BIT(port->chip_port); + + lan_rmw(ANA_PGID_PGID_SET(val), + ANA_PGID_PGID, + port->lan966x, ANA_PGID(PGID_BC)); +} + +static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled) +{ + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled), + ANA_PORT_CFG_LEARN_ENA, + port->lan966x, ANA_PORT_CFG(port->chip_port)); + + port->learn_ena = enabled; +} + +static void lan966x_port_bridge_flags(struct lan966x_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & BR_MCAST_FLOOD) + lan966x_port_set_mcast_flood(port, + !!(flags.val & BR_MCAST_FLOOD)); + + if (flags.mask & BR_FLOOD) + lan966x_port_set_ucast_flood(port, + !!(flags.val & BR_FLOOD)); + + if (flags.mask & BR_BCAST_FLOOD) + lan966x_port_set_bcast_flood(port, + !!(flags.val & BR_BCAST_FLOOD)); + + if (flags.mask & BR_LEARNING) + lan966x_port_set_learning(port, + !!(flags.val & BR_LEARNING)); +} + +static int lan966x_port_pre_bridge_flags(struct lan966x_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD | + BR_LEARNING)) + return -EINVAL; + + return 0; +} + +void lan966x_update_fwd_mask(struct lan966x *lan966x) +{ + int i; + + for (i = 0; i < lan966x->num_phys_ports; i++) { + struct lan966x_port *port = lan966x->ports[i]; + unsigned long mask = 0; + + if (port && lan966x->bridge_fwd_mask & BIT(i)) { + mask = lan966x->bridge_fwd_mask & ~BIT(i); + + if (port->bond) + mask &= ~lan966x_lag_get_mask(lan966x, + port->bond); + } + + mask |= BIT(CPU_PORT); + + lan_wr(ANA_PGID_PGID_SET(mask), + lan966x, ANA_PGID(PGID_SRC + i)); + } +} + +void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state) +{ + struct lan966x *lan966x = port->lan966x; + bool learn_ena = false; + + if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) && + port->learn_ena) + learn_ena = true; + + if (state == BR_STATE_FORWARDING) + lan966x->bridge_fwd_mask |= BIT(port->chip_port); + else + lan966x->bridge_fwd_mask &= ~BIT(port->chip_port); + + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena), + ANA_PORT_CFG_LEARN_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + + lan966x_update_fwd_mask(lan966x); +} + +void lan966x_port_ageing_set(struct lan966x_port *port, + unsigned long ageing_clock_t) +{ + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; + + lan966x_mac_set_ageing(port->lan966x, ageing_time); +} + +static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena) +{ + struct lan966x *lan966x = port->lan966x; + + port->mcast_ena = mcast_ena; + if (mcast_ena) + lan966x_mdb_restore_entries(lan966x); + else + lan966x_mdb_clear_entries(lan966x); + + lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) | + ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) | + ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena), + ANA_CPU_FWD_CFG_IGMP_REDIR_ENA | + ANA_CPU_FWD_CFG_MLD_REDIR_ENA | + ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, + lan966x, ANA_CPU_FWD_CFG(port->chip_port)); + + lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4); + lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6); +} + +static int lan966x_port_attr_set(struct net_device *dev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct lan966x_port *port = netdev_priv(dev); + int err = 0; + + if (ctx && ctx != port) + return 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + lan966x_port_bridge_flags(port, attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + lan966x_port_stp_state_set(port, attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + lan966x_port_ageing_set(port, attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering); + lan966x_vlan_port_apply(port); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: + lan966x_port_mc_set(port, !attr->u.mc_disabled); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int lan966x_port_bridge_join(struct lan966x_port *port, + struct net_device *brport_dev, + struct net_device *bridge, + struct netlink_ext_ack *extack) +{ + struct switchdev_brport_flags flags = {0}; + struct lan966x *lan966x = port->lan966x; + struct net_device *dev = port->dev; + int err; + + if (!lan966x->bridge_mask) { + lan966x->bridge = bridge; + } else { + if (lan966x->bridge != bridge) { + NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge"); + return -ENODEV; + } + } + + err = switchdev_bridge_port_offload(brport_dev, dev, port, + &lan966x_switchdev_nb, + &lan966x_switchdev_blocking_nb, + false, extack); + if (err) + return err; + + lan966x->bridge_mask |= BIT(port->chip_port); + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask; + lan966x_port_bridge_flags(port, flags); + + return 0; +} + +static void lan966x_port_bridge_leave(struct lan966x_port *port, + struct net_device *bridge) +{ + struct switchdev_brport_flags flags = {0}; + struct lan966x *lan966x = port->lan966x; + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask & ~BR_LEARNING; + lan966x_port_bridge_flags(port, flags); + + lan966x->bridge_mask &= ~BIT(port->chip_port); + + if (!lan966x->bridge_mask) + lan966x->bridge = NULL; + + /* Set the port back to host mode */ + lan966x_vlan_port_set_vlan_aware(port, false); + lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); + lan966x_vlan_port_apply(port); +} + +int lan966x_port_changeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port = netdev_priv(dev); + struct netlink_ext_ack *extack; + int err = 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + err = lan966x_port_bridge_join(port, brport_dev, + info->upper_dev, + extack); + else + lan966x_port_bridge_leave(port, info->upper_dev); + } + + if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) + err = lan966x_lag_port_join(port, info->upper_dev, + info->upper_dev, + extack); + else + lan966x_lag_port_leave(port, info->upper_dev); + } + + return err; +} + +int lan966x_port_prechangeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port = netdev_priv(dev); + int err = NOTIFY_DONE; + + if (netif_is_bridge_master(info->upper_dev) && !info->linking) { + switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL); + lan966x_fdb_flush_workqueue(port->lan966x); + } + + if (netif_is_lag_master(info->upper_dev)) { + err = lan966x_lag_port_prechangeupper(dev, info); + if (err || info->linking) + return err; + + switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL); + lan966x_fdb_flush_workqueue(port->lan966x); + } + + return err; +} + +static int lan966x_foreign_bridging_check(struct net_device *upper, + bool *has_foreign, + bool *seen_lan966x, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = NULL; + struct net_device *dev; + struct list_head *iter; + + if (!netif_is_bridge_master(upper) && + !netif_is_lag_master(upper)) + return 0; + + netdev_for_each_lower_dev(upper, dev, iter) { + if (lan966x_netdevice_check(dev)) { + struct lan966x_port *port = netdev_priv(dev); + + if (lan966x) { + /* Upper already has at least one port of a + * lan966x switch inside it, check that it's + * the same instance of the driver. + */ + if (port->lan966x != lan966x) { + NL_SET_ERR_MSG_MOD(extack, + "Bridging between multiple lan966x switches disallowed"); + return -EINVAL; + } + } else { + /* This is the first lan966x port inside this + * upper device + */ + lan966x = port->lan966x; + *seen_lan966x = true; + } + } else if (netif_is_lag_master(dev)) { + /* Allow to have bond interfaces that have only lan966x + * devices + */ + if (lan966x_foreign_bridging_check(dev, has_foreign, + seen_lan966x, + extack)) + return -EINVAL; + } else { + *has_foreign = true; + } + + if (*seen_lan966x && *has_foreign) { + NL_SET_ERR_MSG_MOD(extack, + "Bridging lan966x ports with foreign interfaces disallowed"); + return -EINVAL; + } + } + + return 0; +} + +static int lan966x_bridge_check(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + bool has_foreign = false; + bool seen_lan966x = false; + + return lan966x_foreign_bridging_check(info->upper_dev, + &has_foreign, + &seen_lan966x, + info->info.extack); +} + +static int lan966x_netdevice_port_event(struct net_device *dev, + struct notifier_block *nb, + unsigned long event, void *ptr) +{ + int err = 0; + + if (!lan966x_netdevice_check(dev)) { + switch (event) { + case NETDEV_CHANGEUPPER: + case NETDEV_PRECHANGEUPPER: + err = lan966x_bridge_check(dev, ptr); + if (err) + return err; + + if (netif_is_lag_master(dev)) { + if (event == NETDEV_CHANGEUPPER) + err = lan966x_lag_netdev_changeupper(dev, + ptr); + else + err = lan966x_lag_netdev_prechangeupper(dev, + ptr); + + return err; + } + break; + default: + return 0; + } + + return 0; + } + + switch (event) { + case NETDEV_PRECHANGEUPPER: + err = lan966x_port_prechangeupper(dev, dev, ptr); + break; + case NETDEV_CHANGEUPPER: + err = lan966x_bridge_check(dev, ptr); + if (err) + return err; + + err = lan966x_port_changeupper(dev, dev, ptr); + break; + case NETDEV_CHANGELOWERSTATE: + err = lan966x_lag_port_changelowerstate(dev, ptr); + break; + } + + return err; +} + +static int lan966x_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int ret; + + ret = lan966x_netdevice_port_event(dev, nb, event, ptr); + + return notifier_from_errno(ret); +} + +static bool lan966x_foreign_dev_check(const struct net_device *dev, + const struct net_device *foreign_dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int i; + + if (netif_is_bridge_master(foreign_dev)) + if (lan966x->bridge == foreign_dev) + return false; + + if (netif_is_lag_master(foreign_dev)) + for (i = 0; i < lan966x->num_phys_ports; ++i) + if (lan966x->ports[i] && + lan966x->ports[i]->bond == foreign_dev) + return false; + + return true; +} + +static int lan966x_switchdev_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + lan966x_netdevice_check, + lan966x_port_attr_set); + return notifier_from_errno(err); + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + err = switchdev_handle_fdb_event_to_device(dev, event, ptr, + lan966x_netdevice_check, + lan966x_foreign_dev_check, + lan966x_handle_fdb); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +static int lan966x_handle_port_vlan_add(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct lan966x *lan966x = port->lan966x; + + if (!netif_is_bridge_master(obj->orig_dev)) + lan966x_vlan_port_add_vlan(port, v->vid, + v->flags & BRIDGE_VLAN_INFO_PVID, + v->flags & BRIDGE_VLAN_INFO_UNTAGGED); + else + lan966x_vlan_cpu_add_vlan(lan966x, v->vid); + + return 0; +} + +static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct lan966x_port *port = netdev_priv(dev); + int err; + + if (ctx && ctx != port) + return 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = lan966x_handle_port_vlan_add(port, obj); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = lan966x_handle_port_mdb_add(port, obj); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int lan966x_handle_port_vlan_del(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct lan966x *lan966x = port->lan966x; + + if (!netif_is_bridge_master(obj->orig_dev)) + lan966x_vlan_port_del_vlan(port, v->vid); + else + lan966x_vlan_cpu_del_vlan(lan966x, v->vid); + + return 0; +} + +static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj) +{ + struct lan966x_port *port = netdev_priv(dev); + int err; + + if (ctx && ctx != port) + return 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = lan966x_handle_port_vlan_del(port, obj); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = lan966x_handle_port_mdb_del(port, obj); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int lan966x_switchdev_blocking_event(struct notifier_block *nb, + unsigned long event, + void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + lan966x_netdevice_check, + lan966x_handle_port_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + lan966x_netdevice_check, + lan966x_handle_port_obj_del); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + lan966x_netdevice_check, + lan966x_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +static struct notifier_block lan966x_netdevice_nb __read_mostly = { + .notifier_call = lan966x_netdevice_event, +}; + +struct notifier_block lan966x_switchdev_nb __read_mostly = { + .notifier_call = lan966x_switchdev_event, +}; + +struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = { + .notifier_call = lan966x_switchdev_blocking_event, +}; + +void lan966x_register_notifier_blocks(void) +{ + register_netdevice_notifier(&lan966x_netdevice_nb); + register_switchdev_notifier(&lan966x_switchdev_nb); + register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb); +} + +void lan966x_unregister_notifier_blocks(void) +{ + unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb); + unregister_switchdev_notifier(&lan966x_switchdev_nb); + unregister_netdevice_notifier(&lan966x_netdevice_nb); +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c new file mode 100644 index 000000000..3f5b21206 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c @@ -0,0 +1,528 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +#define LAN966X_TAPRIO_TIMEOUT_MS 1000 +#define LAN966X_TAPRIO_ENTRIES_PER_PORT 2 + +/* Minimum supported cycle time in nanoseconds */ +#define LAN966X_TAPRIO_MIN_CYCLE_TIME_NS NSEC_PER_USEC + +/* Maximum supported cycle time in nanoseconds */ +#define LAN966X_TAPRIO_MAX_CYCLE_TIME_NS (NSEC_PER_SEC - 1) + +/* Total number of TAS GCL entries */ +#define LAN966X_TAPRIO_NUM_GCL 256 + +/* TAPRIO link speeds for calculation of guard band */ +enum lan966x_taprio_link_speed { + LAN966X_TAPRIO_SPEED_NO_GB, + LAN966X_TAPRIO_SPEED_10, + LAN966X_TAPRIO_SPEED_100, + LAN966X_TAPRIO_SPEED_1000, + LAN966X_TAPRIO_SPEED_2500, +}; + +/* TAPRIO list states */ +enum lan966x_taprio_state { + LAN966X_TAPRIO_STATE_ADMIN, + LAN966X_TAPRIO_STATE_ADVANCING, + LAN966X_TAPRIO_STATE_PENDING, + LAN966X_TAPRIO_STATE_OPERATING, + LAN966X_TAPRIO_STATE_TERMINATING, + LAN966X_TAPRIO_STATE_MAX, +}; + +/* TAPRIO GCL command */ +enum lan966x_taprio_gcl_cmd { + LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES = 0, +}; + +static u32 lan966x_taprio_list_index(struct lan966x_port *port, u8 entry) +{ + return port->chip_port * LAN966X_TAPRIO_ENTRIES_PER_PORT + entry; +} + +static u32 lan966x_taprio_list_state_get(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + u32 val; + + val = lan_rd(lan966x, QSYS_TAS_LST); + return QSYS_TAS_LST_LIST_STATE_GET(val); +} + +static u32 lan966x_taprio_list_index_state_get(struct lan966x_port *port, + u32 list) +{ + struct lan966x *lan966x = port->lan966x; + + lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list), + QSYS_TAS_CFG_CTRL_LIST_NUM, + lan966x, QSYS_TAS_CFG_CTRL); + + return lan966x_taprio_list_state_get(port); +} + +static void lan966x_taprio_list_state_set(struct lan966x_port *port, + u32 state) +{ + struct lan966x *lan966x = port->lan966x; + + lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(state), + QSYS_TAS_LST_LIST_STATE, + lan966x, QSYS_TAS_LST); +} + +static int lan966x_taprio_list_shutdown(struct lan966x_port *port, + u32 list) +{ + struct lan966x *lan966x = port->lan966x; + bool pending, operating; + unsigned long end; + u32 state; + + end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS); + /* It is required to try multiple times to set the state of list, + * because the HW can overwrite this. + */ + do { + state = lan966x_taprio_list_state_get(port); + + pending = false; + operating = false; + + if (state == LAN966X_TAPRIO_STATE_ADVANCING || + state == LAN966X_TAPRIO_STATE_PENDING) { + lan966x_taprio_list_state_set(port, + LAN966X_TAPRIO_STATE_ADMIN); + pending = true; + } + + if (state == LAN966X_TAPRIO_STATE_OPERATING) { + lan966x_taprio_list_state_set(port, + LAN966X_TAPRIO_STATE_TERMINATING); + operating = true; + } + + /* If the entry was in pending and now gets in admin, then there + * is nothing else to do, so just bail out + */ + state = lan966x_taprio_list_state_get(port); + if (pending && + state == LAN966X_TAPRIO_STATE_ADMIN) + return 0; + + /* If the list was in operating and now is in terminating or + * admin, then is OK to exit but it needs to wait until the list + * will get in admin. It is not required to set the state + * again. + */ + if (operating && + (state == LAN966X_TAPRIO_STATE_TERMINATING || + state == LAN966X_TAPRIO_STATE_ADMIN)) + break; + + } while (!time_after(jiffies, end)); + + end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS); + do { + state = lan966x_taprio_list_state_get(port); + if (state == LAN966X_TAPRIO_STATE_ADMIN) + break; + + } while (!time_after(jiffies, end)); + + /* If the list was in operating mode, it could be stopped while some + * queues where closed, so make sure to restore "all-queues-open" + */ + if (operating) { + lan_wr(QSYS_TAS_GS_CTRL_HSCH_POS_SET(port->chip_port), + lan966x, QSYS_TAS_GS_CTRL); + + lan_wr(QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(0xff), + lan966x, QSYS_TAS_GATE_STATE); + } + + return 0; +} + +static int lan966x_taprio_shutdown(struct lan966x_port *port) +{ + u32 i, list, state; + int err; + + for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) { + list = lan966x_taprio_list_index(port, i); + state = lan966x_taprio_list_index_state_get(port, list); + if (state == LAN966X_TAPRIO_STATE_ADMIN) + continue; + + err = lan966x_taprio_list_shutdown(port, list); + if (err) + return err; + } + + return 0; +} + +/* Find a suitable list for a new schedule. First priority is a list in state + * pending. Second priority is a list in state admin. + */ +static int lan966x_taprio_find_list(struct lan966x_port *port, + struct tc_taprio_qopt_offload *qopt, + int *new_list, int *obs_list) +{ + int state[LAN966X_TAPRIO_ENTRIES_PER_PORT]; + int list[LAN966X_TAPRIO_ENTRIES_PER_PORT]; + int err, oper = -1; + u32 i; + + *new_list = -1; + *obs_list = -1; + + /* If there is already an entry in operating mode, return this list in + * obs_list, such that when the new list will get activated the + * operating list will be stopped. In this way is possible to have + * smooth transitions between the lists + */ + for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) { + list[i] = lan966x_taprio_list_index(port, i); + state[i] = lan966x_taprio_list_index_state_get(port, list[i]); + if (state[i] == LAN966X_TAPRIO_STATE_OPERATING) + oper = list[i]; + } + + for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) { + if (state[i] == LAN966X_TAPRIO_STATE_PENDING) { + err = lan966x_taprio_shutdown(port); + if (err) + return err; + + *new_list = list[i]; + *obs_list = (oper == -1) ? *new_list : oper; + return 0; + } + } + + for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) { + if (state[i] == LAN966X_TAPRIO_STATE_ADMIN) { + *new_list = list[i]; + *obs_list = (oper == -1) ? *new_list : oper; + return 0; + } + } + + return -ENOSPC; +} + +static int lan966x_taprio_check(struct tc_taprio_qopt_offload *qopt) +{ + u64 total_time = 0; + u32 i; + + /* This is not supported by th HW */ + if (qopt->cycle_time_extension) + return -EOPNOTSUPP; + + /* There is a limited number of gcl entries that can be used, they are + * shared by all ports + */ + if (qopt->num_entries > LAN966X_TAPRIO_NUM_GCL) + return -EINVAL; + + /* Don't allow cycle times bigger than 1 sec or smaller than 1 usec */ + if (qopt->cycle_time < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS || + qopt->cycle_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS) + return -EINVAL; + + for (i = 0; i < qopt->num_entries; ++i) { + struct tc_taprio_sched_entry *entry = &qopt->entries[i]; + + /* Don't allow intervals bigger than 1 sec or smaller than 1 + * usec + */ + if (entry->interval < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS || + entry->interval > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS) + return -EINVAL; + + if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES) + return -EINVAL; + + total_time += qopt->entries[i].interval; + } + + /* Don't allow the total time of intervals be bigger than 1 sec */ + if (total_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS) + return -EINVAL; + + /* The HW expects that the cycle time to be at least as big as sum of + * each interval of gcl + */ + if (qopt->cycle_time < total_time) + return -EINVAL; + + return 0; +} + +static int lan966x_taprio_gcl_free_get(struct lan966x_port *port, + unsigned long *free_list) +{ + struct lan966x *lan966x = port->lan966x; + u32 num_free, state, list; + u32 base, next, max_list; + + /* By default everything is free */ + bitmap_fill(free_list, LAN966X_TAPRIO_NUM_GCL); + num_free = LAN966X_TAPRIO_NUM_GCL; + + /* Iterate over all gcl entries and find out which are free. And mark + * those that are not free. + */ + max_list = lan966x->num_phys_ports * LAN966X_TAPRIO_ENTRIES_PER_PORT; + for (list = 0; list < max_list; ++list) { + state = lan966x_taprio_list_index_state_get(port, list); + if (state == LAN966X_TAPRIO_STATE_ADMIN) + continue; + + base = lan_rd(lan966x, QSYS_TAS_LIST_CFG); + base = QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(base); + next = base; + + do { + clear_bit(next, free_list); + num_free--; + + lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next), + QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, + lan966x, QSYS_TAS_CFG_CTRL); + + next = lan_rd(lan966x, QSYS_TAS_GCL_CT_CFG2); + next = QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(next); + } while (base != next); + } + + return num_free; +} + +static void lan966x_taprio_gcl_setup_entry(struct lan966x_port *port, + struct tc_taprio_sched_entry *entry, + u32 next_entry) +{ + struct lan966x *lan966x = port->lan966x; + + /* Setup a single gcl entry */ + lan_wr(QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(entry->gate_mask) | + QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(port->chip_port) | + QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES), + lan966x, QSYS_TAS_GCL_CT_CFG); + + lan_wr(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(port->chip_port) | + QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(next_entry), + lan966x, QSYS_TAS_GCL_CT_CFG2); + + lan_wr(entry->interval, lan966x, QSYS_TAS_GCL_TM_CFG); +} + +static int lan966x_taprio_gcl_setup(struct lan966x_port *port, + struct tc_taprio_qopt_offload *qopt, + int list) +{ + DECLARE_BITMAP(free_list, LAN966X_TAPRIO_NUM_GCL); + struct lan966x *lan966x = port->lan966x; + u32 i, base, next; + + if (lan966x_taprio_gcl_free_get(port, free_list) < qopt->num_entries) + return -ENOSPC; + + /* Select list */ + lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list), + QSYS_TAS_CFG_CTRL_LIST_NUM, + lan966x, QSYS_TAS_CFG_CTRL); + + /* Setup the address of the first gcl entry */ + base = find_first_bit(free_list, LAN966X_TAPRIO_NUM_GCL); + lan_rmw(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(base), + QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, + lan966x, QSYS_TAS_LIST_CFG); + + /* Iterate over entries and add them to the gcl list */ + next = base; + for (i = 0; i < qopt->num_entries; ++i) { + lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next), + QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, + lan966x, QSYS_TAS_CFG_CTRL); + + /* If the entry is last, point back to the start of the list */ + if (i == qopt->num_entries - 1) + next = base; + else + next = find_next_bit(free_list, LAN966X_TAPRIO_NUM_GCL, + next + 1); + + lan966x_taprio_gcl_setup_entry(port, &qopt->entries[i], next); + } + + return 0; +} + +/* Calculate new base_time based on cycle_time. The HW recommends to have the + * new base time at least 2 * cycle type + current time + */ +static void lan966x_taprio_new_base_time(struct lan966x *lan966x, + const u32 cycle_time, + const ktime_t org_base_time, + ktime_t *new_base_time) +{ + ktime_t current_time, threshold_time; + struct timespec64 ts; + + /* Get the current time and calculate the threshold_time */ + lan966x_ptp_gettime64(&lan966x->phc[LAN966X_PHC_PORT].info, &ts); + current_time = timespec64_to_ktime(ts); + threshold_time = current_time + (2 * cycle_time); + + /* If the org_base_time is in enough in future just use it */ + if (org_base_time >= threshold_time) { + *new_base_time = org_base_time; + return; + } + + /* If the org_base_time is smaller than current_time, calculate the new + * base time as following. + */ + if (org_base_time <= current_time) { + u64 tmp = current_time - org_base_time; + u32 rem = 0; + + if (tmp > cycle_time) + div_u64_rem(tmp, cycle_time, &rem); + rem = cycle_time - rem; + *new_base_time = threshold_time + rem; + return; + } + + /* The only left place for org_base_time is between current_time and + * threshold_time. In this case the new_base_time is calculated like + * org_base_time + 2 * cycletime + */ + *new_base_time = org_base_time + 2 * cycle_time; +} + +int lan966x_taprio_speed_set(struct lan966x_port *port, int speed) +{ + struct lan966x *lan966x = port->lan966x; + u8 taprio_speed; + + switch (speed) { + case SPEED_10: + taprio_speed = LAN966X_TAPRIO_SPEED_10; + break; + case SPEED_100: + taprio_speed = LAN966X_TAPRIO_SPEED_100; + break; + case SPEED_1000: + taprio_speed = LAN966X_TAPRIO_SPEED_1000; + break; + case SPEED_2500: + taprio_speed = LAN966X_TAPRIO_SPEED_2500; + break; + default: + return -EINVAL; + } + + lan_rmw(QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(taprio_speed), + QSYS_TAS_PROFILE_CFG_LINK_SPEED, + lan966x, QSYS_TAS_PROFILE_CFG(port->chip_port)); + + return 0; +} + +int lan966x_taprio_add(struct lan966x_port *port, + struct tc_taprio_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + int err, new_list, obs_list; + struct timespec64 ts; + ktime_t base_time; + + err = lan966x_taprio_check(qopt); + if (err) + return err; + + err = lan966x_taprio_find_list(port, qopt, &new_list, &obs_list); + if (err) + return err; + + err = lan966x_taprio_gcl_setup(port, qopt, new_list); + if (err) + return err; + + lan966x_taprio_new_base_time(lan966x, qopt->cycle_time, + qopt->base_time, &base_time); + + ts = ktime_to_timespec64(base_time); + lan_wr(QSYS_TAS_BT_NSEC_NSEC_SET(ts.tv_nsec), + lan966x, QSYS_TAS_BT_NSEC); + + lan_wr(lower_32_bits(ts.tv_sec), + lan966x, QSYS_TAS_BT_SEC_LSB); + + lan_wr(QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(upper_32_bits(ts.tv_sec)), + lan966x, QSYS_TAS_BT_SEC_MSB); + + lan_wr(qopt->cycle_time, lan966x, QSYS_TAS_CT_CFG); + + lan_rmw(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(obs_list), + QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, + lan966x, QSYS_TAS_STARTUP_CFG); + + /* Start list processing */ + lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(LAN966X_TAPRIO_STATE_ADVANCING), + QSYS_TAS_LST_LIST_STATE, + lan966x, QSYS_TAS_LST); + + return err; +} + +int lan966x_taprio_del(struct lan966x_port *port) +{ + return lan966x_taprio_shutdown(port); +} + +void lan966x_taprio_init(struct lan966x *lan966x) +{ + int num_taprio_lists; + int p; + + lan_wr(QSYS_TAS_STM_CFG_REVISIT_DLY_SET((256 * 1000) / + lan966x_ptp_get_period_ps()), + lan966x, QSYS_TAS_STM_CFG); + + num_taprio_lists = lan966x->num_phys_ports * + LAN966X_TAPRIO_ENTRIES_PER_PORT; + + /* For now we always use guard band on all queues */ + lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(num_taprio_lists) | + QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(1), + QSYS_TAS_CFG_CTRL_LIST_NUM_MAX | + QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, + lan966x, QSYS_TAS_CFG_CTRL); + + for (p = 0; p < lan966x->num_phys_ports; p++) + lan_rmw(QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(p), + QSYS_TAS_PROFILE_CFG_PORT_NUM, + lan966x, QSYS_TAS_PROFILE_CFG(p)); +} + +void lan966x_taprio_deinit(struct lan966x *lan966x) +{ + int p; + + for (p = 0; p < lan966x->num_phys_ports; ++p) { + if (!lan966x->ports[p]) + continue; + + lan966x_taprio_del(lan966x->ports[p]); + } +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c new file mode 100644 index 000000000..4555a35d0 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +int lan966x_tbf_add(struct lan966x_port *port, + struct tc_tbf_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + bool root = qopt->parent == TC_H_ROOT; + u32 queue = 0; + u32 cir, cbs; + u32 se_idx; + + if (!root) { + queue = TC_H_MIN(qopt->parent) - 1; + if (queue >= NUM_PRIO_QUEUES) + return -EOPNOTSUPP; + } + + if (root) + se_idx = SE_IDX_PORT + port->chip_port; + else + se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue; + + cir = div_u64(qopt->replace_params.rate.rate_bytes_ps, 1000) * 8; + cbs = qopt->replace_params.max_size; + + /* Rate unit is 100 kbps */ + cir = DIV_ROUND_UP(cir, 100); + /* Avoid using zero rate */ + cir = cir ?: 1; + /* Burst unit is 4kB */ + cbs = DIV_ROUND_UP(cbs, 4096); + /* Avoid using zero burst */ + cbs = cbs ?: 1; + + /* Check that actually the result can be written */ + if (cir > GENMASK(15, 0) || + cbs > GENMASK(6, 0)) + return -EINVAL; + + lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) | + QSYS_SE_CFG_SE_FRM_MODE_SET(1), + QSYS_SE_CFG_SE_AVB_ENA | + QSYS_SE_CFG_SE_FRM_MODE, + lan966x, QSYS_SE_CFG(se_idx)); + + lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) | + QSYS_CIR_CFG_CIR_BURST_SET(cbs), + lan966x, QSYS_CIR_CFG(se_idx)); + + return 0; +} + +int lan966x_tbf_del(struct lan966x_port *port, + struct tc_tbf_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + bool root = qopt->parent == TC_H_ROOT; + u32 queue = 0; + u32 se_idx; + + if (!root) { + queue = TC_H_MIN(qopt->parent) - 1; + if (queue >= NUM_PRIO_QUEUES) + return -EOPNOTSUPP; + } + + if (root) + se_idx = SE_IDX_PORT + port->chip_port; + else + se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue; + + lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) | + QSYS_SE_CFG_SE_FRM_MODE_SET(0), + QSYS_SE_CFG_SE_AVB_ENA | + QSYS_SE_CFG_SE_FRM_MODE, + lan966x, QSYS_SE_CFG(se_idx)); + + lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) | + QSYS_CIR_CFG_CIR_BURST_SET(0), + lan966x, QSYS_CIR_CFG(se_idx)); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c new file mode 100644 index 000000000..651d5493a --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/pkt_cls.h> + +#include "lan966x_main.h" + +static LIST_HEAD(lan966x_tc_block_cb_list); + +static int lan966x_tc_setup_qdisc_mqprio(struct lan966x_port *port, + struct tc_mqprio_qopt_offload *mqprio) +{ + u8 num_tc = mqprio->qopt.num_tc; + + mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return num_tc ? lan966x_mqprio_add(port, num_tc) : + lan966x_mqprio_del(port); +} + +static int lan966x_tc_setup_qdisc_taprio(struct lan966x_port *port, + struct tc_taprio_qopt_offload *taprio) +{ + return taprio->enable ? lan966x_taprio_add(port, taprio) : + lan966x_taprio_del(port); +} + +static int lan966x_tc_setup_qdisc_tbf(struct lan966x_port *port, + struct tc_tbf_qopt_offload *qopt) +{ + switch (qopt->command) { + case TC_TBF_REPLACE: + return lan966x_tbf_add(port, qopt); + case TC_TBF_DESTROY: + return lan966x_tbf_del(port, qopt); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int lan966x_tc_setup_qdisc_cbs(struct lan966x_port *port, + struct tc_cbs_qopt_offload *qopt) +{ + return qopt->enable ? lan966x_cbs_add(port, qopt) : + lan966x_cbs_del(port, qopt); +} + +static int lan966x_tc_setup_qdisc_ets(struct lan966x_port *port, + struct tc_ets_qopt_offload *qopt) +{ + switch (qopt->command) { + case TC_ETS_REPLACE: + return lan966x_ets_add(port, qopt); + case TC_ETS_DESTROY: + return lan966x_ets_del(port, qopt); + default: + return -EOPNOTSUPP; + }; + + return -EOPNOTSUPP; +} + +static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv, bool ingress) +{ + struct lan966x_port *port = cb_priv; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return lan966x_tc_matchall(port, type_data, ingress); + default: + return -EOPNOTSUPP; + } +} + +static int lan966x_tc_block_cb_ingress(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + return lan966x_tc_block_cb(type, type_data, cb_priv, true); +} + +static int lan966x_tc_block_cb_egress(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + return lan966x_tc_block_cb(type, type_data, cb_priv, false); +} + +static int lan966x_tc_setup_block(struct lan966x_port *port, + struct flow_block_offload *f) +{ + flow_setup_cb_t *cb; + bool ingress; + + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { + cb = lan966x_tc_block_cb_ingress; + port->tc.ingress_shared_block = f->block_shared; + ingress = true; + } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + cb = lan966x_tc_block_cb_egress; + ingress = false; + } else { + return -EOPNOTSUPP; + } + + return flow_block_cb_setup_simple(f, &lan966x_tc_block_cb_list, + cb, port, port, ingress); +} + +int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct lan966x_port *port = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return lan966x_tc_setup_qdisc_mqprio(port, type_data); + case TC_SETUP_QDISC_TAPRIO: + return lan966x_tc_setup_qdisc_taprio(port, type_data); + case TC_SETUP_QDISC_TBF: + return lan966x_tc_setup_qdisc_tbf(port, type_data); + case TC_SETUP_QDISC_CBS: + return lan966x_tc_setup_qdisc_cbs(port, type_data); + case TC_SETUP_QDISC_ETS: + return lan966x_tc_setup_qdisc_ets(port, type_data); + case TC_SETUP_BLOCK: + return lan966x_tc_setup_block(port, type_data); + default: + return -EOPNOTSUPP; + } + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c new file mode 100644 index 000000000..7368433b9 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +static int lan966x_tc_matchall_add(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + struct flow_action_entry *act; + + if (!flow_offload_has_one_action(&f->rule->action)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Only once action per filter is supported"); + return -EOPNOTSUPP; + } + + act = &f->rule->action.entries[0]; + switch (act->id) { + case FLOW_ACTION_POLICE: + return lan966x_police_port_add(port, &f->rule->action, act, + f->cookie, ingress, + f->common.extack); + case FLOW_ACTION_MIRRED: + return lan966x_mirror_port_add(port, act, f->cookie, + ingress, f->common.extack); + default: + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported action"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int lan966x_tc_matchall_del(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + if (f->cookie == port->tc.police_id) { + return lan966x_police_port_del(port, f->cookie, + f->common.extack); + } else if (f->cookie == port->tc.ingress_mirror_id || + f->cookie == port->tc.egress_mirror_id) { + return lan966x_mirror_port_del(port, ingress, + f->common.extack); + } else { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported action"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int lan966x_tc_matchall_stats(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + if (f->cookie == port->tc.police_id) { + lan966x_police_port_stats(port, &f->stats); + } else if (f->cookie == port->tc.ingress_mirror_id || + f->cookie == port->tc.egress_mirror_id) { + lan966x_mirror_port_stats(port, &f->stats, ingress); + } else { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported action"); + return -EOPNOTSUPP; + } + + return 0; +} + +int lan966x_tc_matchall(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + if (!tc_cls_can_offload_and_chain0(port->dev, &f->common)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Only chain zero is supported"); + return -EOPNOTSUPP; + } + + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return lan966x_tc_matchall_add(port, f, ingress); + case TC_CLSMATCHALL_DESTROY: + return lan966x_tc_matchall_del(port, f, ingress); + case TC_CLSMATCHALL_STATS: + return lan966x_tc_matchall_stats(port, f, ingress); + default: + return -EOPNOTSUPP; + } + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c new file mode 100644 index 000000000..3c4466012 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +#define VLANACCESS_CMD_IDLE 0 +#define VLANACCESS_CMD_READ 1 +#define VLANACCESS_CMD_WRITE 2 +#define VLANACCESS_CMD_INIT 3 + +static int lan966x_vlan_get_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, ANA_VLANACCESS); +} + +static int lan966x_vlan_wait_for_completion(struct lan966x *lan966x) +{ + u32 val; + + return readx_poll_timeout(lan966x_vlan_get_status, + lan966x, val, + (val & ANA_VLANACCESS_VLAN_TBL_CMD) == + VLANACCESS_CMD_IDLE, + TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); +} + +static void lan966x_vlan_set_mask(struct lan966x *lan966x, u16 vid) +{ + u16 mask = lan966x->vlan_mask[vid]; + bool cpu_dis; + + cpu_dis = !(mask & BIT(CPU_PORT)); + + /* Set flags and the VID to configure */ + lan_rmw(ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(cpu_dis) | + ANA_VLANTIDX_V_INDEX_SET(vid), + ANA_VLANTIDX_VLAN_PGID_CPU_DIS | + ANA_VLANTIDX_V_INDEX, + lan966x, ANA_VLANTIDX); + + /* Set the vlan port members mask */ + lan_rmw(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(mask), + ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, + lan966x, ANA_VLAN_PORT_MASK); + + /* Issue a write command */ + lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_WRITE), + ANA_VLANACCESS_VLAN_TBL_CMD, + lan966x, ANA_VLANACCESS); + + if (lan966x_vlan_wait_for_completion(lan966x)) + dev_err(lan966x->dev, "Vlan set mask failed\n"); +} + +static void lan966x_vlan_port_add_vlan_mask(struct lan966x_port *port, u16 vid) +{ + struct lan966x *lan966x = port->lan966x; + u8 p = port->chip_port; + + lan966x->vlan_mask[vid] |= BIT(p); + lan966x_vlan_set_mask(lan966x, vid); +} + +static void lan966x_vlan_port_del_vlan_mask(struct lan966x_port *port, u16 vid) +{ + struct lan966x *lan966x = port->lan966x; + u8 p = port->chip_port; + + lan966x->vlan_mask[vid] &= ~BIT(p); + lan966x_vlan_set_mask(lan966x, vid); +} + +static bool lan966x_vlan_port_any_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + return !!(lan966x->vlan_mask[vid] & ~BIT(CPU_PORT)); +} + +static void lan966x_vlan_cpu_add_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + lan966x->vlan_mask[vid] |= BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, vid); +} + +static void lan966x_vlan_cpu_del_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + lan966x->vlan_mask[vid] &= ~BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, vid); +} + +static void lan966x_vlan_cpu_add_cpu_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + __set_bit(vid, lan966x->cpu_vlan_mask); +} + +static void lan966x_vlan_cpu_del_cpu_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + __clear_bit(vid, lan966x->cpu_vlan_mask); +} + +bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + return test_bit(vid, lan966x->cpu_vlan_mask); +} + +static u16 lan966x_vlan_port_get_pvid(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + + if (!(lan966x->bridge_mask & BIT(port->chip_port))) + return HOST_PVID; + + return port->vlan_aware ? port->pvid : UNAWARE_PVID; +} + +int lan966x_vlan_port_set_vid(struct lan966x_port *port, u16 vid, + bool pvid, bool untagged) +{ + struct lan966x *lan966x = port->lan966x; + + /* Egress vlan classification */ + if (untagged && port->vid != vid) { + if (port->vid) { + dev_err(lan966x->dev, + "Port already has a native VLAN: %d\n", + port->vid); + return -EBUSY; + } + port->vid = vid; + } + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + + return 0; +} + +static void lan966x_vlan_port_remove_vid(struct lan966x_port *port, u16 vid) +{ + if (port->pvid == vid) + port->pvid = 0; + + if (port->vid == vid) + port->vid = 0; +} + +void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port, + bool vlan_aware) +{ + port->vlan_aware = vlan_aware; +} + +void lan966x_vlan_port_apply(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + u16 pvid; + u32 val; + + pvid = lan966x_vlan_port_get_pvid(port); + + /* Ingress clasification (ANA_PORT_VLAN_CFG) */ + /* Default vlan to classify for untagged frames (may be zero) */ + val = ANA_VLAN_CFG_VLAN_VID_SET(pvid); + if (port->vlan_aware) + val |= ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) | + ANA_VLAN_CFG_VLAN_POP_CNT_SET(1); + + lan_rmw(val, + ANA_VLAN_CFG_VLAN_VID | ANA_VLAN_CFG_VLAN_AWARE_ENA | + ANA_VLAN_CFG_VLAN_POP_CNT, + lan966x, ANA_VLAN_CFG(port->chip_port)); + + lan_rmw(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(port->vlan_aware) | + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(port->vlan_aware), + DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, + lan966x, DEV_MAC_TAGS_CFG(port->chip_port)); + + /* Drop frames with multicast source address */ + val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1); + if (port->vlan_aware && !pvid) + /* If port is vlan-aware and tagged, drop untagged and priority + * tagged frames. + */ + val |= ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(1) | + ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(1) | + ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(1); + + lan_wr(val, lan966x, ANA_DROP_CFG(port->chip_port)); + + /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */ + val = REW_TAG_CFG_TAG_TPID_CFG_SET(0); + if (port->vlan_aware) { + if (port->vid) + /* Tag all frames except when VID == DEFAULT_VLAN */ + val |= REW_TAG_CFG_TAG_CFG_SET(1); + else + val |= REW_TAG_CFG_TAG_CFG_SET(3); + } + + /* Update only some bits in the register */ + lan_rmw(val, + REW_TAG_CFG_TAG_TPID_CFG | REW_TAG_CFG_TAG_CFG, + lan966x, REW_TAG_CFG(port->chip_port)); + + /* Set default VLAN and tag type to 8021Q */ + lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021Q) | + REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid), + REW_PORT_VLAN_CFG_PORT_TPID | + REW_PORT_VLAN_CFG_PORT_VID, + lan966x, REW_PORT_VLAN_CFG(port->chip_port)); +} + +void lan966x_vlan_port_add_vlan(struct lan966x_port *port, + u16 vid, + bool pvid, + bool untagged) +{ + struct lan966x *lan966x = port->lan966x; + + /* If the CPU(br) is already part of the vlan then add the fdb + * entries in MAC table to copy the frames to the CPU(br). + * If the CPU(br) is not part of the vlan then it would + * just drop the frames. + */ + if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, vid)) { + lan966x_vlan_cpu_add_vlan_mask(lan966x, vid); + lan966x_fdb_write_entries(lan966x, vid); + lan966x_mdb_write_entries(lan966x, vid); + } + + lan966x_vlan_port_set_vid(port, vid, pvid, untagged); + lan966x_vlan_port_add_vlan_mask(port, vid); + lan966x_vlan_port_apply(port); +} + +void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid) +{ + struct lan966x *lan966x = port->lan966x; + + lan966x_vlan_port_remove_vid(port, vid); + lan966x_vlan_port_del_vlan_mask(port, vid); + lan966x_vlan_port_apply(port); + + /* In case there are no other ports in vlan then remove the CPU from + * that vlan but still keep it in the mask because it may be needed + * again then another port gets added in that vlan + */ + if (!lan966x_vlan_port_any_vlan_mask(lan966x, vid)) { + lan966x_vlan_cpu_del_vlan_mask(lan966x, vid); + lan966x_fdb_erase_entries(lan966x, vid); + lan966x_mdb_erase_entries(lan966x, vid); + } +} + +void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid) +{ + /* Add an entry in the MAC table for the CPU + * Add the CPU part of the vlan only if there is another port in that + * vlan otherwise all the broadcast frames in that vlan will go to CPU + * even if none of the ports are in the vlan and then the CPU will just + * need to discard these frames. It is required to store this + * information so when a front port is added then it would add also the + * CPU port. + */ + if (lan966x_vlan_port_any_vlan_mask(lan966x, vid)) { + lan966x_vlan_cpu_add_vlan_mask(lan966x, vid); + lan966x_mdb_write_entries(lan966x, vid); + } + + lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, vid); + lan966x_fdb_write_entries(lan966x, vid); +} + +void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid) +{ + /* Remove the CPU part of the vlan */ + lan966x_vlan_cpu_del_cpu_vlan_mask(lan966x, vid); + lan966x_vlan_cpu_del_vlan_mask(lan966x, vid); + lan966x_fdb_erase_entries(lan966x, vid); + lan966x_mdb_erase_entries(lan966x, vid); +} + +void lan966x_vlan_init(struct lan966x *lan966x) +{ + u16 port, vid; + + /* Clear VLAN table, by default all ports are members of all VLANS */ + lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_INIT), + ANA_VLANACCESS_VLAN_TBL_CMD, + lan966x, ANA_VLANACCESS); + lan966x_vlan_wait_for_completion(lan966x); + + for (vid = 1; vid < VLAN_N_VID; vid++) { + lan966x->vlan_mask[vid] = 0; + lan966x_vlan_set_mask(lan966x, vid); + } + + /* Set all the ports + cpu to be part of HOST_PVID and UNAWARE_PVID */ + lan966x->vlan_mask[HOST_PVID] = + GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, HOST_PVID); + + lan966x->vlan_mask[UNAWARE_PVID] = + GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, UNAWARE_PVID); + + lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, UNAWARE_PVID); + + /* Configure the CPU port to be vlan aware */ + lan_wr(ANA_VLAN_CFG_VLAN_VID_SET(0) | + ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) | + ANA_VLAN_CFG_VLAN_POP_CNT_SET(1), + lan966x, ANA_VLAN_CFG(CPU_PORT)); + + /* Set vlan ingress filter mask to all ports */ + lan_wr(GENMASK(lan966x->num_phys_ports, 0), + lan966x, ANA_VLANMASK); + + for (port = 0; port < lan966x->num_phys_ports; port++) { + lan_wr(0, lan966x, REW_PORT_VLAN_CFG(port)); + lan_wr(0, lan966x, REW_TAG_CFG(port)); + } +} diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig new file mode 100644 index 000000000..cc5e48e1b --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -0,0 +1,13 @@ +config SPARX5_SWITCH + tristate "Sparx5 switch driver" + depends on NET_SWITCHDEV + depends on HAS_IOMEM + depends on OF + depends on ARCH_SPARX5 || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL + depends on BRIDGE || BRIDGE=n + select PHYLINK + select PHY_SPARX5_SERDES + select RESET_CONTROLLER + help + This driver supports the Sparx5 network switch device. diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile new file mode 100644 index 000000000..d1c6ad966 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Microchip Sparx5 network device drivers. +# + +obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o + +sparx5-switch-objs := sparx5_main.o sparx5_packet.o \ + sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \ + sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \ + sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c new file mode 100644 index 000000000..76a8bb596 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c @@ -0,0 +1,596 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/module.h> +#include <linux/device.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +/* QSYS calendar information */ +#define SPX5_PORTS_PER_CALREG 10 /* Ports mapped in a calendar register */ +#define SPX5_CALBITS_PER_PORT 3 /* Bit per port in calendar register */ + +/* DSM calendar information */ +#define SPX5_DSM_CAL_LEN 64 +#define SPX5_DSM_CAL_EMPTY 0xFFFF +#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13 +#define SPX5_DSM_CAL_TAXIS 8 +#define SPX5_DSM_CAL_BW_LOSS 553 + +#define SPX5_TAXI_PORT_MAX 70 + +#define SPEED_12500 12500 + +/* Maps from taxis to port numbers */ +static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] = { + {57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23}, + {58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31}, + {59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39}, + {60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47}, + {61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99}, + {62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99}, + {56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99}, + {64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, +}; + +struct sparx5_calendar_data { + u32 schedule[SPX5_DSM_CAL_LEN]; + u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 new_slots[SPX5_DSM_CAL_LEN]; + u32 temp_sched[SPX5_DSM_CAL_LEN]; + u32 indices[SPX5_DSM_CAL_LEN]; + u32 short_list[SPX5_DSM_CAL_LEN]; + u32 long_list[SPX5_DSM_CAL_LEN]; +}; + +static u32 sparx5_target_bandwidth(struct sparx5 *sparx5) +{ + switch (sparx5->target_ct) { + case SPX5_TARGET_CT_7546: + case SPX5_TARGET_CT_7546TSN: + return 65000; + case SPX5_TARGET_CT_7549: + case SPX5_TARGET_CT_7549TSN: + return 91000; + case SPX5_TARGET_CT_7552: + case SPX5_TARGET_CT_7552TSN: + return 129000; + case SPX5_TARGET_CT_7556: + case SPX5_TARGET_CT_7556TSN: + return 161000; + case SPX5_TARGET_CT_7558: + case SPX5_TARGET_CT_7558TSN: + return 201000; + default: + return 0; + } +} + +/* This is used in calendar configuration */ +enum sparx5_cal_bw { + SPX5_CAL_SPEED_NONE = 0, + SPX5_CAL_SPEED_1G = 1, + SPX5_CAL_SPEED_2G5 = 2, + SPX5_CAL_SPEED_5G = 3, + SPX5_CAL_SPEED_10G = 4, + SPX5_CAL_SPEED_25G = 5, + SPX5_CAL_SPEED_0G5 = 6, + SPX5_CAL_SPEED_12G5 = 7 +}; + +static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock) +{ + switch (cclock) { + case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */ + case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */ + case SPX5_CORE_CLOCK_625MHZ: return 208000; /* 625000 / 3 */ + default: return 0; + } + return 0; +} + +static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed) +{ + switch (speed) { + case SPX5_CAL_SPEED_1G: return 1000; + case SPX5_CAL_SPEED_2G5: return 2500; + case SPX5_CAL_SPEED_5G: return 5000; + case SPX5_CAL_SPEED_10G: return 10000; + case SPX5_CAL_SPEED_25G: return 25000; + case SPX5_CAL_SPEED_0G5: return 500; + case SPX5_CAL_SPEED_12G5: return 12500; + default: return 0; + } +} + +static u32 sparx5_bandwidth_to_calendar(u32 bw) +{ + switch (bw) { + case SPEED_10: return SPX5_CAL_SPEED_0G5; + case SPEED_100: return SPX5_CAL_SPEED_0G5; + case SPEED_1000: return SPX5_CAL_SPEED_1G; + case SPEED_2500: return SPX5_CAL_SPEED_2G5; + case SPEED_5000: return SPX5_CAL_SPEED_5G; + case SPEED_10000: return SPX5_CAL_SPEED_10G; + case SPEED_12500: return SPX5_CAL_SPEED_12G5; + case SPEED_25000: return SPX5_CAL_SPEED_25G; + case SPEED_UNKNOWN: return SPX5_CAL_SPEED_1G; + default: return SPX5_CAL_SPEED_NONE; + } +} + +static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, + u32 portno) +{ + struct sparx5_port *port; + + if (portno >= SPX5_PORTS) { + /* Internal ports */ + if (portno == SPX5_PORT_CPU_0 || portno == SPX5_PORT_CPU_1) { + /* Equals 1.25G */ + return SPX5_CAL_SPEED_2G5; + } else if (portno == SPX5_PORT_VD0) { + /* IPMC only idle BW */ + return SPX5_CAL_SPEED_NONE; + } else if (portno == SPX5_PORT_VD1) { + /* OAM only idle BW */ + return SPX5_CAL_SPEED_NONE; + } else if (portno == SPX5_PORT_VD2) { + /* IPinIP gets only idle BW */ + return SPX5_CAL_SPEED_NONE; + } + /* not in port map */ + return SPX5_CAL_SPEED_NONE; + } + /* Front ports - may be used */ + port = sparx5->ports[portno]; + if (!port) + return SPX5_CAL_SPEED_NONE; + return sparx5_bandwidth_to_calendar(port->conf.bandwidth); +} + +/* Auto configure the QSYS calendar based on port configuration */ +int sparx5_config_auto_calendar(struct sparx5 *sparx5) +{ + u32 cal[7], value, idx, portno; + u32 max_core_bw; + u32 total_bw = 0, used_port_bw = 0; + int err = 0; + enum sparx5_cal_bw spd; + + memset(cal, 0, sizeof(cal)); + + max_core_bw = sparx5_clk_to_bandwidth(sparx5->coreclock); + if (max_core_bw == 0) { + dev_err(sparx5->dev, "Core clock not supported"); + return -EINVAL; + } + + /* Setup the calendar with the bandwidth to each port */ + for (portno = 0; portno < SPX5_PORTS_ALL; portno++) { + u64 reg, offset, this_bw; + + spd = sparx5_get_port_cal_speed(sparx5, portno); + if (spd == SPX5_CAL_SPEED_NONE) + continue; + + this_bw = sparx5_cal_speed_to_value(spd); + if (portno < SPX5_PORTS) + used_port_bw += this_bw; + else + /* Internal ports are granted half the value */ + this_bw = this_bw / 2; + total_bw += this_bw; + reg = portno; + offset = do_div(reg, SPX5_PORTS_PER_CALREG); + cal[reg] |= spd << (offset * SPX5_CALBITS_PER_PORT); + } + + if (used_port_bw > sparx5_target_bandwidth(sparx5)) { + dev_err(sparx5->dev, + "Port BW %u above target BW %u\n", + used_port_bw, sparx5_target_bandwidth(sparx5)); + return -EINVAL; + } + + if (total_bw > max_core_bw) { + dev_err(sparx5->dev, + "Total BW %u above switch core BW %u\n", + total_bw, max_core_bw); + return -EINVAL; + } + + /* Halt the calendar while changing it */ + spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10), + QSYS_CAL_CTRL_CAL_MODE, + sparx5, QSYS_CAL_CTRL); + + /* Assign port bandwidth to auto calendar */ + for (idx = 0; idx < ARRAY_SIZE(cal); idx++) + spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx)); + + /* Increase grant rate of all ports to account for + * core clock ppm deviations + */ + spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */ + QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, + sparx5, + QSYS_CAL_CTRL); + + /* Grant idle usage to VD 0-2 */ + for (idx = 2; idx < 5; idx++) + spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12), + sparx5, + HSCH_OUTB_SHARE_ENA(idx)); + + /* Enable Auto mode */ + spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8), + QSYS_CAL_CTRL_CAL_MODE, + sparx5, QSYS_CAL_CTRL); + + /* Verify successful calendar config */ + value = spx5_rd(sparx5, QSYS_CAL_CTRL); + if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value)) { + dev_err(sparx5->dev, "QSYS calendar error\n"); + err = -EINVAL; + } + return err; +} + +static u32 sparx5_dsm_exb_gcd(u32 a, u32 b) +{ + if (b == 0) + return a; + return sparx5_dsm_exb_gcd(b, a % b); +} + +static u32 sparx5_dsm_cal_len(u32 *cal) +{ + u32 idx = 0, len = 0; + + while (idx < SPX5_DSM_CAL_LEN) { + if (cal[idx] != SPX5_DSM_CAL_EMPTY) + len++; + idx++; + } + return len; +} + +static u32 sparx5_dsm_cp_cal(u32 *sched) +{ + u32 idx = 0, tmp; + + while (idx < SPX5_DSM_CAL_LEN) { + if (sched[idx] != SPX5_DSM_CAL_EMPTY) { + tmp = sched[idx]; + sched[idx] = SPX5_DSM_CAL_EMPTY; + return tmp; + } + idx++; + } + return SPX5_DSM_CAL_EMPTY; +} + +static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi, + struct sparx5_calendar_data *data) +{ + bool slow_mode; + u32 gcd, idx, sum, min, factor; + u32 num_of_slots, slot_spd, empty_slots; + u32 taxi_bw, clk_period_ps; + + clk_period_ps = sparx5_clk_period(sparx5->coreclock); + taxi_bw = 128 * 1000000 / clk_period_ps; + slow_mode = !!(clk_period_ps > 2000); + memcpy(data->taxi_ports, &sparx5_taxi_ports[taxi], + sizeof(data->taxi_ports)); + + for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) { + data->new_slots[idx] = SPX5_DSM_CAL_EMPTY; + data->schedule[idx] = SPX5_DSM_CAL_EMPTY; + data->temp_sched[idx] = SPX5_DSM_CAL_EMPTY; + } + /* Default empty calendar */ + data->schedule[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; + + /* Map ports to taxi positions */ + for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) { + u32 portno = data->taxi_ports[idx]; + + if (portno < SPX5_TAXI_PORT_MAX) { + data->taxi_speeds[idx] = sparx5_cal_speed_to_value + (sparx5_get_port_cal_speed(sparx5, portno)); + } else { + data->taxi_speeds[idx] = 0; + } + } + + sum = 0; + min = 25000; + for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) { + u32 jdx; + + sum += data->taxi_speeds[idx]; + if (data->taxi_speeds[idx] && data->taxi_speeds[idx] < min) + min = data->taxi_speeds[idx]; + gcd = min; + for (jdx = 0; jdx < ARRAY_SIZE(data->taxi_speeds); jdx++) + gcd = sparx5_dsm_exb_gcd(gcd, data->taxi_speeds[jdx]); + } + if (sum == 0) /* Empty calendar */ + return 0; + /* Make room for overhead traffic */ + factor = 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS); + + if (sum * factor > (taxi_bw * 1000)) { + dev_err(sparx5->dev, + "Taxi %u, Requested BW %u above available BW %u\n", + taxi, sum, taxi_bw); + return -EINVAL; + } + for (idx = 0; idx < 4; idx++) { + u32 raw_spd; + + if (idx == 0) + raw_spd = gcd / 5; + else if (idx == 1) + raw_spd = gcd / 2; + else if (idx == 2) + raw_spd = gcd; + else + raw_spd = min; + slot_spd = raw_spd * factor / 1000; + num_of_slots = taxi_bw / slot_spd; + if (num_of_slots <= 64) + break; + } + + num_of_slots = num_of_slots > 64 ? 64 : num_of_slots; + slot_spd = taxi_bw / num_of_slots; + + sum = 0; + for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) { + u32 spd = data->taxi_speeds[idx]; + u32 adjusted_speed = data->taxi_speeds[idx] * factor / 1000; + + if (adjusted_speed > 0) { + data->avg_dist[idx] = (128 * 1000000 * 10) / + (adjusted_speed * clk_period_ps); + } else { + data->avg_dist[idx] = -1; + } + data->dev_slots[idx] = ((spd * factor / slot_spd) + 999) / 1000; + if (spd != 25000 && (spd != 10000 || !slow_mode)) { + if (num_of_slots < (5 * data->dev_slots[idx])) { + dev_err(sparx5->dev, + "Taxi %u, speed %u, Low slot sep.\n", + taxi, spd); + return -EINVAL; + } + } + sum += data->dev_slots[idx]; + if (sum > num_of_slots) { + dev_err(sparx5->dev, + "Taxi %u with overhead factor %u\n", + taxi, factor); + return -EINVAL; + } + } + + empty_slots = num_of_slots - sum; + + for (idx = 0; idx < empty_slots; idx++) + data->schedule[idx] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; + + for (idx = 1; idx < num_of_slots; idx++) { + u32 indices_len = 0; + u32 slot, jdx, kdx, ts; + s32 cnt; + u32 num_of_old_slots, num_of_new_slots, tgt_score; + + for (slot = 0; slot < ARRAY_SIZE(data->dev_slots); slot++) { + if (data->dev_slots[slot] == idx) { + data->indices[indices_len] = slot; + indices_len++; + } + } + if (indices_len == 0) + continue; + kdx = 0; + for (slot = 0; slot < idx; slot++) { + for (jdx = 0; jdx < indices_len; jdx++, kdx++) + data->new_slots[kdx] = data->indices[jdx]; + } + + for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) { + if (data->schedule[slot] == SPX5_DSM_CAL_EMPTY) + break; + } + + num_of_old_slots = slot; + num_of_new_slots = kdx; + cnt = 0; + ts = 0; + + if (num_of_new_slots > num_of_old_slots) { + memcpy(data->short_list, data->schedule, + sizeof(data->short_list)); + memcpy(data->long_list, data->new_slots, + sizeof(data->long_list)); + tgt_score = 100000 * num_of_old_slots / + num_of_new_slots; + } else { + memcpy(data->short_list, data->new_slots, + sizeof(data->short_list)); + memcpy(data->long_list, data->schedule, + sizeof(data->long_list)); + tgt_score = 100000 * num_of_new_slots / + num_of_old_slots; + } + + while (sparx5_dsm_cal_len(data->short_list) > 0 || + sparx5_dsm_cal_len(data->long_list) > 0) { + u32 act = 0; + + if (sparx5_dsm_cal_len(data->short_list) > 0) { + data->temp_sched[ts] = + sparx5_dsm_cp_cal(data->short_list); + ts++; + cnt += 100000; + act = 1; + } + while (sparx5_dsm_cal_len(data->long_list) > 0 && + cnt > 0) { + data->temp_sched[ts] = + sparx5_dsm_cp_cal(data->long_list); + ts++; + cnt -= tgt_score; + act = 1; + } + if (act == 0) { + dev_err(sparx5->dev, + "Error in DSM calendar calculation\n"); + return -EINVAL; + } + } + + for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) { + if (data->temp_sched[slot] == SPX5_DSM_CAL_EMPTY) + break; + } + for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) { + data->schedule[slot] = data->temp_sched[slot]; + data->temp_sched[slot] = SPX5_DSM_CAL_EMPTY; + data->new_slots[slot] = SPX5_DSM_CAL_EMPTY; + } + } + return 0; +} + +static int sparx5_dsm_calendar_check(struct sparx5 *sparx5, + struct sparx5_calendar_data *data) +{ + u32 num_of_slots, idx, port; + int cnt, max_dist; + u32 slot_indices[SPX5_DSM_CAL_LEN], distances[SPX5_DSM_CAL_LEN]; + u32 cal_length = sparx5_dsm_cal_len(data->schedule); + + for (port = 0; port < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; port++) { + num_of_slots = 0; + max_dist = data->avg_dist[port]; + for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) { + slot_indices[idx] = SPX5_DSM_CAL_EMPTY; + distances[idx] = SPX5_DSM_CAL_EMPTY; + } + + for (idx = 0; idx < cal_length; idx++) { + if (data->schedule[idx] == port) { + slot_indices[num_of_slots] = idx; + num_of_slots++; + } + } + + slot_indices[num_of_slots] = slot_indices[0] + cal_length; + + for (idx = 0; idx < num_of_slots; idx++) { + distances[idx] = (slot_indices[idx + 1] - + slot_indices[idx]) * 10; + } + + for (idx = 0; idx < num_of_slots; idx++) { + u32 jdx, kdx; + + cnt = distances[idx] - max_dist; + if (cnt < 0) + cnt = -cnt; + kdx = 0; + for (jdx = (idx + 1) % num_of_slots; + jdx != idx; + jdx = (jdx + 1) % num_of_slots, kdx++) { + cnt = cnt + distances[jdx] - max_dist; + if (cnt < 0) + cnt = -cnt; + if (cnt > max_dist) + goto check_err; + } + } + } + return 0; +check_err: + dev_err(sparx5->dev, + "Port %u: distance %u above limit %d\n", + port, cnt, max_dist); + return -EINVAL; +} + +static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi, + struct sparx5_calendar_data *data) +{ + u32 idx; + u32 cal_len = sparx5_dsm_cal_len(data->schedule), len; + + spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1), + sparx5, + DSM_TAXI_CAL_CFG(taxi)); + for (idx = 0; idx < cal_len; idx++) { + spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx), + DSM_TAXI_CAL_CFG_CAL_IDX, + sparx5, + DSM_TAXI_CAL_CFG(taxi)); + spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data->schedule[idx]), + DSM_TAXI_CAL_CFG_CAL_PGM_VAL, + sparx5, + DSM_TAXI_CAL_CFG(taxi)); + } + spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0), + sparx5, + DSM_TAXI_CAL_CFG(taxi)); + len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5, + DSM_TAXI_CAL_CFG(taxi))); + if (len != cal_len - 1) + goto update_err; + return 0; +update_err: + dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len); + return -EINVAL; +} + +/* Configure the DSM calendar based on port configuration */ +int sparx5_config_dsm_calendar(struct sparx5 *sparx5) +{ + int taxi; + struct sparx5_calendar_data *data; + int err = 0; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + for (taxi = 0; taxi < SPX5_DSM_CAL_TAXIS; ++taxi) { + err = sparx5_dsm_calendar_calc(sparx5, taxi, data); + if (err) { + dev_err(sparx5->dev, "DSM calendar calculation failed\n"); + goto cal_out; + } + err = sparx5_dsm_calendar_check(sparx5, data); + if (err) { + dev_err(sparx5->dev, "DSM calendar check failed\n"); + goto cal_out; + } + err = sparx5_dsm_calendar_update(sparx5, taxi, data); + if (err) { + dev_err(sparx5->dev, "DSM calendar update failed\n"); + goto cal_out; + } + } +cal_out: + kfree(data); + return err; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c new file mode 100644 index 000000000..01f3a3a41 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -0,0 +1,1264 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/ethtool.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" + +/* Index of ANA_AC port counters */ +#define SPX5_PORT_POLICER_DROPS 0 + +/* Add a potentially wrapping 32 bit value to a 64 bit counter */ +static void sparx5_update_counter(u64 *cnt, u32 val) +{ + if (val < (*cnt & U32_MAX)) + *cnt += (u64)1 << 32; /* value has wrapped */ + *cnt = (*cnt & ~(u64)U32_MAX) + val; +} + +enum sparx5_stats_entry { + spx5_stats_rx_symbol_err_cnt = 0, + spx5_stats_pmac_rx_symbol_err_cnt = 1, + spx5_stats_tx_uc_cnt = 2, + spx5_stats_pmac_tx_uc_cnt = 3, + spx5_stats_tx_mc_cnt = 4, + spx5_stats_tx_bc_cnt = 5, + spx5_stats_tx_backoff1_cnt = 6, + spx5_stats_tx_multi_coll_cnt = 7, + spx5_stats_rx_uc_cnt = 8, + spx5_stats_pmac_rx_uc_cnt = 9, + spx5_stats_rx_mc_cnt = 10, + spx5_stats_rx_bc_cnt = 11, + spx5_stats_rx_crc_err_cnt = 12, + spx5_stats_pmac_rx_crc_err_cnt = 13, + spx5_stats_rx_alignment_lost_cnt = 14, + spx5_stats_pmac_rx_alignment_lost_cnt = 15, + spx5_stats_tx_ok_bytes_cnt = 16, + spx5_stats_pmac_tx_ok_bytes_cnt = 17, + spx5_stats_tx_defer_cnt = 18, + spx5_stats_tx_late_coll_cnt = 19, + spx5_stats_tx_xcoll_cnt = 20, + spx5_stats_tx_csense_cnt = 21, + spx5_stats_rx_ok_bytes_cnt = 22, + spx5_stats_pmac_rx_ok_bytes_cnt = 23, + spx5_stats_pmac_tx_mc_cnt = 24, + spx5_stats_pmac_tx_bc_cnt = 25, + spx5_stats_tx_xdefer_cnt = 26, + spx5_stats_pmac_rx_mc_cnt = 27, + spx5_stats_pmac_rx_bc_cnt = 28, + spx5_stats_rx_in_range_len_err_cnt = 29, + spx5_stats_pmac_rx_in_range_len_err_cnt = 30, + spx5_stats_rx_out_of_range_len_err_cnt = 31, + spx5_stats_pmac_rx_out_of_range_len_err_cnt = 32, + spx5_stats_rx_oversize_cnt = 33, + spx5_stats_pmac_rx_oversize_cnt = 34, + spx5_stats_tx_pause_cnt = 35, + spx5_stats_pmac_tx_pause_cnt = 36, + spx5_stats_rx_pause_cnt = 37, + spx5_stats_pmac_rx_pause_cnt = 38, + spx5_stats_rx_unsup_opcode_cnt = 39, + spx5_stats_pmac_rx_unsup_opcode_cnt = 40, + spx5_stats_rx_undersize_cnt = 41, + spx5_stats_pmac_rx_undersize_cnt = 42, + spx5_stats_rx_fragments_cnt = 43, + spx5_stats_pmac_rx_fragments_cnt = 44, + spx5_stats_rx_jabbers_cnt = 45, + spx5_stats_pmac_rx_jabbers_cnt = 46, + spx5_stats_rx_size64_cnt = 47, + spx5_stats_pmac_rx_size64_cnt = 48, + spx5_stats_rx_size65to127_cnt = 49, + spx5_stats_pmac_rx_size65to127_cnt = 50, + spx5_stats_rx_size128to255_cnt = 51, + spx5_stats_pmac_rx_size128to255_cnt = 52, + spx5_stats_rx_size256to511_cnt = 53, + spx5_stats_pmac_rx_size256to511_cnt = 54, + spx5_stats_rx_size512to1023_cnt = 55, + spx5_stats_pmac_rx_size512to1023_cnt = 56, + spx5_stats_rx_size1024to1518_cnt = 57, + spx5_stats_pmac_rx_size1024to1518_cnt = 58, + spx5_stats_rx_size1519tomax_cnt = 59, + spx5_stats_pmac_rx_size1519tomax_cnt = 60, + spx5_stats_tx_size64_cnt = 61, + spx5_stats_pmac_tx_size64_cnt = 62, + spx5_stats_tx_size65to127_cnt = 63, + spx5_stats_pmac_tx_size65to127_cnt = 64, + spx5_stats_tx_size128to255_cnt = 65, + spx5_stats_pmac_tx_size128to255_cnt = 66, + spx5_stats_tx_size256to511_cnt = 67, + spx5_stats_pmac_tx_size256to511_cnt = 68, + spx5_stats_tx_size512to1023_cnt = 69, + spx5_stats_pmac_tx_size512to1023_cnt = 70, + spx5_stats_tx_size1024to1518_cnt = 71, + spx5_stats_pmac_tx_size1024to1518_cnt = 72, + spx5_stats_tx_size1519tomax_cnt = 73, + spx5_stats_pmac_tx_size1519tomax_cnt = 74, + spx5_stats_mm_rx_assembly_err_cnt = 75, + spx5_stats_mm_rx_assembly_ok_cnt = 76, + spx5_stats_mm_rx_merge_frag_cnt = 77, + spx5_stats_mm_rx_smd_err_cnt = 78, + spx5_stats_mm_tx_pfragment_cnt = 79, + spx5_stats_rx_bad_bytes_cnt = 80, + spx5_stats_pmac_rx_bad_bytes_cnt = 81, + spx5_stats_rx_in_bytes_cnt = 82, + spx5_stats_rx_ipg_shrink_cnt = 83, + spx5_stats_rx_sync_lost_err_cnt = 84, + spx5_stats_rx_tagged_frms_cnt = 85, + spx5_stats_rx_untagged_frms_cnt = 86, + spx5_stats_tx_out_bytes_cnt = 87, + spx5_stats_tx_tagged_frms_cnt = 88, + spx5_stats_tx_untagged_frms_cnt = 89, + spx5_stats_rx_hih_cksm_err_cnt = 90, + spx5_stats_pmac_rx_hih_cksm_err_cnt = 91, + spx5_stats_rx_xgmii_prot_err_cnt = 92, + spx5_stats_pmac_rx_xgmii_prot_err_cnt = 93, + spx5_stats_ana_ac_port_stat_lsb_cnt = 94, + spx5_stats_green_p0_rx_fwd = 95, + spx5_stats_green_p0_rx_port_drop = 111, + spx5_stats_green_p0_tx_port = 127, + spx5_stats_rx_local_drop = 143, + spx5_stats_tx_local_drop = 144, + spx5_stats_count = 145, +}; + +static const char *const sparx5_stats_layout[] = { + "mm_rx_assembly_err_cnt", + "mm_rx_assembly_ok_cnt", + "mm_rx_merge_frag_cnt", + "mm_rx_smd_err_cnt", + "mm_tx_pfragment_cnt", + "rx_bad_bytes_cnt", + "pmac_rx_bad_bytes_cnt", + "rx_in_bytes_cnt", + "rx_ipg_shrink_cnt", + "rx_sync_lost_err_cnt", + "rx_tagged_frms_cnt", + "rx_untagged_frms_cnt", + "tx_out_bytes_cnt", + "tx_tagged_frms_cnt", + "tx_untagged_frms_cnt", + "rx_hih_cksm_err_cnt", + "pmac_rx_hih_cksm_err_cnt", + "rx_xgmii_prot_err_cnt", + "pmac_rx_xgmii_prot_err_cnt", + "rx_port_policer_drop", + "rx_fwd_green_p0", + "rx_fwd_green_p1", + "rx_fwd_green_p2", + "rx_fwd_green_p3", + "rx_fwd_green_p4", + "rx_fwd_green_p5", + "rx_fwd_green_p6", + "rx_fwd_green_p7", + "rx_fwd_yellow_p0", + "rx_fwd_yellow_p1", + "rx_fwd_yellow_p2", + "rx_fwd_yellow_p3", + "rx_fwd_yellow_p4", + "rx_fwd_yellow_p5", + "rx_fwd_yellow_p6", + "rx_fwd_yellow_p7", + "rx_port_drop_green_p0", + "rx_port_drop_green_p1", + "rx_port_drop_green_p2", + "rx_port_drop_green_p3", + "rx_port_drop_green_p4", + "rx_port_drop_green_p5", + "rx_port_drop_green_p6", + "rx_port_drop_green_p7", + "rx_port_drop_yellow_p0", + "rx_port_drop_yellow_p1", + "rx_port_drop_yellow_p2", + "rx_port_drop_yellow_p3", + "rx_port_drop_yellow_p4", + "rx_port_drop_yellow_p5", + "rx_port_drop_yellow_p6", + "rx_port_drop_yellow_p7", + "tx_port_green_p0", + "tx_port_green_p1", + "tx_port_green_p2", + "tx_port_green_p3", + "tx_port_green_p4", + "tx_port_green_p5", + "tx_port_green_p6", + "tx_port_green_p7", + "tx_port_yellow_p0", + "tx_port_yellow_p1", + "tx_port_yellow_p2", + "tx_port_yellow_p3", + "tx_port_yellow_p4", + "tx_port_yellow_p5", + "tx_port_yellow_p6", + "tx_port_yellow_p7", + "rx_local_drop", + "tx_local_drop", +}; + +static void sparx5_get_queue_sys_stats(struct sparx5 *sparx5, int portno) +{ + u64 *portstats; + u64 *stats; + u32 addr; + int idx; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + mutex_lock(&sparx5->queue_stats_lock); + spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno), sparx5, XQS_STAT_CFG); + addr = 0; + stats = &portstats[spx5_stats_green_p0_rx_fwd]; + for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats) + sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr))); + addr = 16; + stats = &portstats[spx5_stats_green_p0_rx_port_drop]; + for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats) + sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr))); + addr = 256; + stats = &portstats[spx5_stats_green_p0_tx_port]; + for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats) + sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr))); + sparx5_update_counter(&portstats[spx5_stats_rx_local_drop], + spx5_rd(sparx5, XQS_CNT(32))); + sparx5_update_counter(&portstats[spx5_stats_tx_local_drop], + spx5_rd(sparx5, XQS_CNT(272))); + mutex_unlock(&sparx5->queue_stats_lock); +} + +static void sparx5_get_ana_ac_stats_stats(struct sparx5 *sparx5, int portno) +{ + u64 *portstats = &sparx5->stats[portno * sparx5->num_stats]; + + sparx5_update_counter(&portstats[spx5_stats_ana_ac_port_stat_lsb_cnt], + spx5_rd(sparx5, ANA_AC_PORT_STAT_LSB_CNT(portno, + SPX5_PORT_POLICER_DROPS))); +} + +static void sparx5_get_dev_phy_stats(u64 *portstats, void __iomem *inst, u32 + tinst) +{ + sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SYMBOL_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SYMBOL_ERR_CNT(tinst))); +} + +static void sparx5_get_dev_mac_stats(u64 *portstats, void __iomem *inst, u32 + tinst) +{ + sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt], + spx5_inst_rd(inst, DEV5G_TX_UC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_TX_UC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt], + spx5_inst_rd(inst, DEV5G_TX_MC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt], + spx5_inst_rd(inst, DEV5G_TX_BC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt], + spx5_inst_rd(inst, DEV5G_RX_UC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_RX_UC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt], + spx5_inst_rd(inst, DEV5G_RX_MC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt], + spx5_inst_rd(inst, DEV5G_RX_BC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt], + spx5_inst_rd(inst, DEV5G_RX_CRC_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_CRC_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt], + spx5_inst_rd(inst, + DEV5G_RX_ALIGNMENT_LOST_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt], + spx5_inst_rd(inst, DEV5G_TX_OK_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_OK_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt], + spx5_inst_rd(inst, DEV5G_RX_OK_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_OK_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_TX_MC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_TX_BC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_RX_MC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_RX_BC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt], + spx5_inst_rd(inst, + DEV5G_RX_IN_RANGE_LEN_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt], + spx5_inst_rd(inst, + DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt], + spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_OVERSIZE_CNT(tinst))); +} + +static void sparx5_get_dev_mac_ctrl_stats(u64 *portstats, void __iomem *inst, + u32 tinst) +{ + sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt], + spx5_inst_rd(inst, DEV5G_TX_PAUSE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_PAUSE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt], + spx5_inst_rd(inst, DEV5G_RX_PAUSE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_PAUSE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt], + spx5_inst_rd(inst, + DEV5G_RX_UNSUP_OPCODE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(tinst))); +} + +static void sparx5_get_dev_rmon_stats(u64 *portstats, void __iomem *inst, u32 + tinst) +{ + sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt], + spx5_inst_rd(inst, + DEV5G_RX_UNDERSIZE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_UNDERSIZE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt], + spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_OVERSIZE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt], + spx5_inst_rd(inst, + DEV5G_RX_FRAGMENTS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_FRAGMENTS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt], + spx5_inst_rd(inst, DEV5G_RX_JABBERS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_JABBERS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt], + spx5_inst_rd(inst, DEV5G_RX_SIZE64_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE64_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE65TO127_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE65TO127_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE128TO255_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE128TO255_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE256TO511_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE256TO511_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE512TO1023_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE512TO1023_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE1024TO1518_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE1024TO1518_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE1519TOMAX_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt], + spx5_inst_rd(inst, DEV5G_TX_SIZE64_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE64_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE65TO127_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE65TO127_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE128TO255_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE128TO255_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE256TO511_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE256TO511_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE512TO1023_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE512TO1023_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE1024TO1518_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE1024TO1518_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE1519TOMAX_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(tinst))); +} + +static void sparx5_get_dev_misc_stats(u64 *portstats, void __iomem *inst, u32 + tinst) +{ + sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt], + spx5_inst_rd(inst, + DEV5G_MM_RX_ASSEMBLY_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt], + spx5_inst_rd(inst, + DEV5G_MM_RX_ASSEMBLY_OK_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt], + spx5_inst_rd(inst, + DEV5G_MM_RX_MERGE_FRAG_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt], + spx5_inst_rd(inst, + DEV5G_MM_RX_SMD_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt], + spx5_inst_rd(inst, + DEV5G_MM_TX_PFRAGMENT_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt], + spx5_inst_rd(inst, + DEV5G_RX_BAD_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_BAD_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt], + spx5_inst_rd(inst, DEV5G_RX_IN_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt], + spx5_inst_rd(inst, + DEV5G_RX_IPG_SHRINK_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt], + spx5_inst_rd(inst, + DEV5G_RX_TAGGED_FRMS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt], + spx5_inst_rd(inst, + DEV5G_RX_UNTAGGED_FRMS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt], + spx5_inst_rd(inst, + DEV5G_TX_OUT_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt], + spx5_inst_rd(inst, + DEV5G_TX_TAGGED_FRMS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt], + spx5_inst_rd(inst, + DEV5G_TX_UNTAGGED_FRMS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_hih_cksm_err_cnt], + spx5_inst_rd(inst, + DEV5G_RX_HIH_CKSM_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_hih_cksm_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_xgmii_prot_err_cnt], + spx5_inst_rd(inst, + DEV5G_RX_XGMII_PROT_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_xgmii_prot_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(tinst))); +} + +static void sparx5_get_device_stats(struct sparx5 *sparx5, int portno) +{ + u64 *portstats = &sparx5->stats[portno * sparx5->num_stats]; + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + void __iomem *inst; + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_phy_stats(portstats, inst, tinst); + sparx5_get_dev_mac_stats(portstats, inst, tinst); + sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst); + sparx5_get_dev_rmon_stats(portstats, inst, tinst); + sparx5_get_dev_misc_stats(portstats, inst, tinst); +} + +static void sparx5_get_asm_phy_stats(u64 *portstats, void __iomem *inst, int + portno) +{ + sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt], + spx5_inst_rd(inst, + ASM_RX_SYMBOL_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SYMBOL_ERR_CNT(portno))); +} + +static void sparx5_get_asm_mac_stats(u64 *portstats, void __iomem *inst, int + portno) +{ + sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt], + spx5_inst_rd(inst, ASM_TX_UC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt], + spx5_inst_rd(inst, ASM_PMAC_TX_UC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt], + spx5_inst_rd(inst, ASM_TX_MC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt], + spx5_inst_rd(inst, ASM_TX_BC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_backoff1_cnt], + spx5_inst_rd(inst, ASM_TX_BACKOFF1_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_multi_coll_cnt], + spx5_inst_rd(inst, + ASM_TX_MULTI_COLL_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt], + spx5_inst_rd(inst, ASM_RX_UC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt], + spx5_inst_rd(inst, ASM_PMAC_RX_UC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt], + spx5_inst_rd(inst, ASM_RX_MC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt], + spx5_inst_rd(inst, ASM_RX_BC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt], + spx5_inst_rd(inst, ASM_RX_CRC_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_CRC_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt], + spx5_inst_rd(inst, + ASM_RX_ALIGNMENT_LOST_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_ALIGNMENT_LOST_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt], + spx5_inst_rd(inst, ASM_TX_OK_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_OK_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_defer_cnt], + spx5_inst_rd(inst, ASM_TX_DEFER_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_late_coll_cnt], + spx5_inst_rd(inst, ASM_TX_LATE_COLL_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_xcoll_cnt], + spx5_inst_rd(inst, ASM_TX_XCOLL_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_csense_cnt], + spx5_inst_rd(inst, ASM_TX_CSENSE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt], + spx5_inst_rd(inst, ASM_RX_OK_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_OK_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt], + spx5_inst_rd(inst, ASM_PMAC_TX_MC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt], + spx5_inst_rd(inst, ASM_PMAC_TX_BC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_xdefer_cnt], + spx5_inst_rd(inst, ASM_TX_XDEFER_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt], + spx5_inst_rd(inst, ASM_PMAC_RX_MC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt], + spx5_inst_rd(inst, ASM_PMAC_RX_BC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt], + spx5_inst_rd(inst, + ASM_RX_IN_RANGE_LEN_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt], + spx5_inst_rd(inst, + ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt], + spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_OVERSIZE_CNT(portno))); +} + +static void sparx5_get_asm_mac_ctrl_stats(u64 *portstats, void __iomem *inst, + int portno) +{ + sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt], + spx5_inst_rd(inst, ASM_TX_PAUSE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_PAUSE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt], + spx5_inst_rd(inst, ASM_RX_PAUSE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_PAUSE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt], + spx5_inst_rd(inst, + ASM_RX_UNSUP_OPCODE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_UNSUP_OPCODE_CNT(portno))); +} + +static void sparx5_get_asm_rmon_stats(u64 *portstats, void __iomem *inst, int + portno) +{ + sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt], + spx5_inst_rd(inst, ASM_RX_UNDERSIZE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_UNDERSIZE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt], + spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_OVERSIZE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt], + spx5_inst_rd(inst, ASM_RX_FRAGMENTS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_FRAGMENTS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt], + spx5_inst_rd(inst, ASM_RX_JABBERS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_JABBERS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt], + spx5_inst_rd(inst, ASM_RX_SIZE64_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE64_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE65TO127_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE65TO127_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE128TO255_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE128TO255_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE256TO511_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE256TO511_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE512TO1023_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE512TO1023_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE1024TO1518_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE1024TO1518_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE1519TOMAX_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE1519TOMAX_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt], + spx5_inst_rd(inst, ASM_TX_SIZE64_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE64_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE65TO127_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE65TO127_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE128TO255_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE128TO255_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE256TO511_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE256TO511_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE512TO1023_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE512TO1023_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE1024TO1518_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE1024TO1518_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE1519TOMAX_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE1519TOMAX_CNT(portno))); +} + +static void sparx5_get_asm_misc_stats(u64 *portstats, void __iomem *inst, int + portno) +{ + sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt], + spx5_inst_rd(inst, + ASM_MM_RX_ASSEMBLY_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt], + spx5_inst_rd(inst, + ASM_MM_RX_ASSEMBLY_OK_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt], + spx5_inst_rd(inst, + ASM_MM_RX_MERGE_FRAG_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt], + spx5_inst_rd(inst, + ASM_MM_RX_SMD_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt], + spx5_inst_rd(inst, + ASM_MM_TX_PFRAGMENT_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt], + spx5_inst_rd(inst, ASM_RX_BAD_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_BAD_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt], + spx5_inst_rd(inst, ASM_RX_IN_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt], + spx5_inst_rd(inst, + ASM_RX_IPG_SHRINK_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_sync_lost_err_cnt], + spx5_inst_rd(inst, + ASM_RX_SYNC_LOST_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt], + spx5_inst_rd(inst, + ASM_RX_TAGGED_FRMS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt], + spx5_inst_rd(inst, + ASM_RX_UNTAGGED_FRMS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt], + spx5_inst_rd(inst, ASM_TX_OUT_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt], + spx5_inst_rd(inst, + ASM_TX_TAGGED_FRMS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt], + spx5_inst_rd(inst, + ASM_TX_UNTAGGED_FRMS_CNT(portno))); +} + +static void sparx5_get_asm_stats(struct sparx5 *sparx5, int portno) +{ + u64 *portstats = &sparx5->stats[portno * sparx5->num_stats]; + void __iomem *inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + + sparx5_get_asm_phy_stats(portstats, inst, portno); + sparx5_get_asm_mac_stats(portstats, inst, portno); + sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno); + sparx5_get_asm_rmon_stats(portstats, inst, portno); + sparx5_get_asm_misc_stats(portstats, inst, portno); +} + +static const struct ethtool_rmon_hist_range sparx5_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 10239 }, + {} +}; + +static void sparx5_get_eth_phy_stats(struct net_device *ndev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int portno = port->portno; + void __iomem *inst; + u64 *portstats; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + if (sparx5_is_baser(port->conf.portmode)) { + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_phy_stats(portstats, inst, tinst); + } else { + inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + sparx5_get_asm_phy_stats(portstats, inst, portno); + } + phy_stats->SymbolErrorDuringCarrier = + portstats[spx5_stats_rx_symbol_err_cnt] + + portstats[spx5_stats_pmac_rx_symbol_err_cnt]; +} + +static void sparx5_get_eth_mac_stats(struct net_device *ndev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int portno = port->portno; + void __iomem *inst; + u64 *portstats; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + if (sparx5_is_baser(port->conf.portmode)) { + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_mac_stats(portstats, inst, tinst); + } else { + inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + sparx5_get_asm_mac_stats(portstats, inst, portno); + } + mac_stats->FramesTransmittedOK = portstats[spx5_stats_tx_uc_cnt] + + portstats[spx5_stats_pmac_tx_uc_cnt] + + portstats[spx5_stats_tx_mc_cnt] + + portstats[spx5_stats_tx_bc_cnt]; + mac_stats->SingleCollisionFrames = + portstats[spx5_stats_tx_backoff1_cnt]; + mac_stats->MultipleCollisionFrames = + portstats[spx5_stats_tx_multi_coll_cnt]; + mac_stats->FramesReceivedOK = portstats[spx5_stats_rx_uc_cnt] + + portstats[spx5_stats_pmac_rx_uc_cnt] + + portstats[spx5_stats_rx_mc_cnt] + + portstats[spx5_stats_rx_bc_cnt]; + mac_stats->FrameCheckSequenceErrors = + portstats[spx5_stats_rx_crc_err_cnt] + + portstats[spx5_stats_pmac_rx_crc_err_cnt]; + mac_stats->AlignmentErrors = portstats[spx5_stats_rx_alignment_lost_cnt] + + portstats[spx5_stats_pmac_rx_alignment_lost_cnt]; + mac_stats->OctetsTransmittedOK = portstats[spx5_stats_tx_ok_bytes_cnt] + + portstats[spx5_stats_pmac_tx_ok_bytes_cnt]; + mac_stats->FramesWithDeferredXmissions = + portstats[spx5_stats_tx_defer_cnt]; + mac_stats->LateCollisions = + portstats[spx5_stats_tx_late_coll_cnt]; + mac_stats->FramesAbortedDueToXSColls = + portstats[spx5_stats_tx_xcoll_cnt]; + mac_stats->CarrierSenseErrors = portstats[spx5_stats_tx_csense_cnt]; + mac_stats->OctetsReceivedOK = portstats[spx5_stats_rx_ok_bytes_cnt] + + portstats[spx5_stats_pmac_rx_ok_bytes_cnt]; + mac_stats->MulticastFramesXmittedOK = portstats[spx5_stats_tx_mc_cnt] + + portstats[spx5_stats_pmac_tx_mc_cnt]; + mac_stats->BroadcastFramesXmittedOK = portstats[spx5_stats_tx_bc_cnt] + + portstats[spx5_stats_pmac_tx_bc_cnt]; + mac_stats->FramesWithExcessiveDeferral = + portstats[spx5_stats_tx_xdefer_cnt]; + mac_stats->MulticastFramesReceivedOK = portstats[spx5_stats_rx_mc_cnt] + + portstats[spx5_stats_pmac_rx_mc_cnt]; + mac_stats->BroadcastFramesReceivedOK = portstats[spx5_stats_rx_bc_cnt] + + portstats[spx5_stats_pmac_rx_bc_cnt]; + mac_stats->InRangeLengthErrors = + portstats[spx5_stats_rx_in_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_in_range_len_err_cnt]; + mac_stats->OutOfRangeLengthField = + portstats[spx5_stats_rx_out_of_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt]; + mac_stats->FrameTooLongErrors = portstats[spx5_stats_rx_oversize_cnt] + + portstats[spx5_stats_pmac_rx_oversize_cnt]; +} + +static void sparx5_get_eth_mac_ctrl_stats(struct net_device *ndev, + struct ethtool_eth_ctrl_stats *mac_ctrl_stats) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int portno = port->portno; + void __iomem *inst; + u64 *portstats; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + if (sparx5_is_baser(port->conf.portmode)) { + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst); + } else { + inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno); + } + mac_ctrl_stats->MACControlFramesTransmitted = + portstats[spx5_stats_tx_pause_cnt] + + portstats[spx5_stats_pmac_tx_pause_cnt]; + mac_ctrl_stats->MACControlFramesReceived = + portstats[spx5_stats_rx_pause_cnt] + + portstats[spx5_stats_pmac_rx_pause_cnt]; + mac_ctrl_stats->UnsupportedOpcodesReceived = + portstats[spx5_stats_rx_unsup_opcode_cnt] + + portstats[spx5_stats_pmac_rx_unsup_opcode_cnt]; +} + +static void sparx5_get_eth_rmon_stats(struct net_device *ndev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int portno = port->portno; + void __iomem *inst; + u64 *portstats; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + if (sparx5_is_baser(port->conf.portmode)) { + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_rmon_stats(portstats, inst, tinst); + } else { + inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + sparx5_get_asm_rmon_stats(portstats, inst, portno); + } + rmon_stats->undersize_pkts = portstats[spx5_stats_rx_undersize_cnt] + + portstats[spx5_stats_pmac_rx_undersize_cnt]; + rmon_stats->oversize_pkts = portstats[spx5_stats_rx_oversize_cnt] + + portstats[spx5_stats_pmac_rx_oversize_cnt]; + rmon_stats->fragments = portstats[spx5_stats_rx_fragments_cnt] + + portstats[spx5_stats_pmac_rx_fragments_cnt]; + rmon_stats->jabbers = portstats[spx5_stats_rx_jabbers_cnt] + + portstats[spx5_stats_pmac_rx_jabbers_cnt]; + rmon_stats->hist[0] = portstats[spx5_stats_rx_size64_cnt] + + portstats[spx5_stats_pmac_rx_size64_cnt]; + rmon_stats->hist[1] = portstats[spx5_stats_rx_size65to127_cnt] + + portstats[spx5_stats_pmac_rx_size65to127_cnt]; + rmon_stats->hist[2] = portstats[spx5_stats_rx_size128to255_cnt] + + portstats[spx5_stats_pmac_rx_size128to255_cnt]; + rmon_stats->hist[3] = portstats[spx5_stats_rx_size256to511_cnt] + + portstats[spx5_stats_pmac_rx_size256to511_cnt]; + rmon_stats->hist[4] = portstats[spx5_stats_rx_size512to1023_cnt] + + portstats[spx5_stats_pmac_rx_size512to1023_cnt]; + rmon_stats->hist[5] = portstats[spx5_stats_rx_size1024to1518_cnt] + + portstats[spx5_stats_pmac_rx_size1024to1518_cnt]; + rmon_stats->hist[6] = portstats[spx5_stats_rx_size1519tomax_cnt] + + portstats[spx5_stats_pmac_rx_size1519tomax_cnt]; + rmon_stats->hist_tx[0] = portstats[spx5_stats_tx_size64_cnt] + + portstats[spx5_stats_pmac_tx_size64_cnt]; + rmon_stats->hist_tx[1] = portstats[spx5_stats_tx_size65to127_cnt] + + portstats[spx5_stats_pmac_tx_size65to127_cnt]; + rmon_stats->hist_tx[2] = portstats[spx5_stats_tx_size128to255_cnt] + + portstats[spx5_stats_pmac_tx_size128to255_cnt]; + rmon_stats->hist_tx[3] = portstats[spx5_stats_tx_size256to511_cnt] + + portstats[spx5_stats_pmac_tx_size256to511_cnt]; + rmon_stats->hist_tx[4] = portstats[spx5_stats_tx_size512to1023_cnt] + + portstats[spx5_stats_pmac_tx_size512to1023_cnt]; + rmon_stats->hist_tx[5] = portstats[spx5_stats_tx_size1024to1518_cnt] + + portstats[spx5_stats_pmac_tx_size1024to1518_cnt]; + rmon_stats->hist_tx[6] = portstats[spx5_stats_tx_size1519tomax_cnt] + + portstats[spx5_stats_pmac_tx_size1519tomax_cnt]; + *ranges = sparx5_rmon_ranges; +} + +static int sparx5_get_sset_count(struct net_device *ndev, int sset) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + return sparx5->num_ethtool_stats; +} + +static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int idx; + + if (sset != ETH_SS_STATS) + return; + + for (idx = 0; idx < sparx5->num_ethtool_stats; idx++) + strncpy(data + idx * ETH_GSTRING_LEN, + sparx5->stats_layout[idx], ETH_GSTRING_LEN); +} + +static void sparx5_get_sset_data(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int portno = port->portno; + void __iomem *inst; + u64 *portstats; + int idx; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + if (sparx5_is_baser(port->conf.portmode)) { + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_misc_stats(portstats, inst, tinst); + } else { + inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + sparx5_get_asm_misc_stats(portstats, inst, portno); + } + sparx5_get_ana_ac_stats_stats(sparx5, portno); + sparx5_get_queue_sys_stats(sparx5, portno); + /* Copy port counters to the ethtool buffer */ + for (idx = spx5_stats_mm_rx_assembly_err_cnt; + idx < spx5_stats_mm_rx_assembly_err_cnt + + sparx5->num_ethtool_stats; idx++) + *data++ = portstats[idx]; +} + +void sparx5_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + u64 *portstats; + int idx; + + if (!sparx5->stats) + return; /* Not initialized yet */ + + portstats = &sparx5->stats[port->portno * sparx5->num_stats]; + + stats->rx_packets = portstats[spx5_stats_rx_uc_cnt] + + portstats[spx5_stats_pmac_rx_uc_cnt] + + portstats[spx5_stats_rx_mc_cnt] + + portstats[spx5_stats_rx_bc_cnt]; + stats->tx_packets = portstats[spx5_stats_tx_uc_cnt] + + portstats[spx5_stats_pmac_tx_uc_cnt] + + portstats[spx5_stats_tx_mc_cnt] + + portstats[spx5_stats_tx_bc_cnt]; + stats->rx_bytes = portstats[spx5_stats_rx_ok_bytes_cnt] + + portstats[spx5_stats_pmac_rx_ok_bytes_cnt]; + stats->tx_bytes = portstats[spx5_stats_tx_ok_bytes_cnt] + + portstats[spx5_stats_pmac_tx_ok_bytes_cnt]; + stats->rx_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] + + portstats[spx5_stats_rx_out_of_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] + + portstats[spx5_stats_rx_oversize_cnt] + + portstats[spx5_stats_pmac_rx_oversize_cnt] + + portstats[spx5_stats_rx_crc_err_cnt] + + portstats[spx5_stats_pmac_rx_crc_err_cnt] + + portstats[spx5_stats_rx_alignment_lost_cnt] + + portstats[spx5_stats_pmac_rx_alignment_lost_cnt]; + stats->tx_errors = portstats[spx5_stats_tx_xcoll_cnt] + + portstats[spx5_stats_tx_csense_cnt] + + portstats[spx5_stats_tx_late_coll_cnt]; + stats->multicast = portstats[spx5_stats_rx_mc_cnt] + + portstats[spx5_stats_pmac_rx_mc_cnt]; + stats->collisions = portstats[spx5_stats_tx_late_coll_cnt] + + portstats[spx5_stats_tx_xcoll_cnt] + + portstats[spx5_stats_tx_backoff1_cnt]; + stats->rx_length_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] + + portstats[spx5_stats_rx_out_of_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] + + portstats[spx5_stats_rx_oversize_cnt] + + portstats[spx5_stats_pmac_rx_oversize_cnt]; + stats->rx_crc_errors = portstats[spx5_stats_rx_crc_err_cnt] + + portstats[spx5_stats_pmac_rx_crc_err_cnt]; + stats->rx_frame_errors = portstats[spx5_stats_rx_alignment_lost_cnt] + + portstats[spx5_stats_pmac_rx_alignment_lost_cnt]; + stats->tx_aborted_errors = portstats[spx5_stats_tx_xcoll_cnt]; + stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt]; + stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt]; + stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt]; + for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx) + stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop + + idx]; + stats->tx_dropped = portstats[spx5_stats_tx_local_drop]; +} + +static void sparx5_update_port_stats(struct sparx5 *sparx5, int portno) +{ + if (sparx5_is_baser(sparx5->ports[portno]->conf.portmode)) + sparx5_get_device_stats(sparx5, portno); + else + sparx5_get_asm_stats(sparx5, portno); + sparx5_get_ana_ac_stats_stats(sparx5, portno); + sparx5_get_queue_sys_stats(sparx5, portno); +} + +static void sparx5_update_stats(struct sparx5 *sparx5) +{ + int idx; + + for (idx = 0; idx < SPX5_PORTS; idx++) + if (sparx5->ports[idx]) + sparx5_update_port_stats(sparx5, idx); +} + +static void sparx5_check_stats_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct sparx5 *sparx5 = container_of(dwork, + struct sparx5, + stats_work); + + sparx5_update_stats(sparx5); + + queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work, + SPX5_STATS_CHECK_DELAY); +} + +static int sparx5_get_link_settings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct sparx5_port *port = netdev_priv(ndev); + + return phylink_ethtool_ksettings_get(port->phylink, cmd); +} + +static int sparx5_set_link_settings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct sparx5_port *port = netdev_priv(ndev); + + return phylink_ethtool_ksettings_set(port->phylink, cmd); +} + +static void sparx5_config_stats(struct sparx5 *sparx5) +{ + /* Enable global events for port policer drops */ + spx5_rmw(ANA_AC_PORT_SGE_CFG_MASK_SET(0xf0f0), + ANA_AC_PORT_SGE_CFG_MASK, + sparx5, + ANA_AC_PORT_SGE_CFG(SPX5_PORT_POLICER_DROPS)); +} + +static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno) +{ + /* Clear Queue System counters */ + spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno) | + XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(3), sparx5, + XQS_STAT_CFG); + + /* Use counter for port policer drop count */ + spx5_rmw(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(1) | + ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(0) | + ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(0xff), + ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE | + ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE | + ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, + sparx5, ANA_AC_PORT_STAT_CFG(portno, SPX5_PORT_POLICER_DROPS)); +} + +static int sparx5_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + struct sparx5_phc *phc; + + if (!sparx5->ptp) + return ethtool_op_get_ts_info(dev, info); + + phc = &sparx5->phc[SPARX5_PHC_PORT]; + + info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1; + if (info->phc_index == -1) { + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + } + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +const struct ethtool_ops sparx5_ethtool_ops = { + .get_sset_count = sparx5_get_sset_count, + .get_strings = sparx5_get_sset_strings, + .get_ethtool_stats = sparx5_get_sset_data, + .get_link_ksettings = sparx5_get_link_settings, + .set_link_ksettings = sparx5_set_link_settings, + .get_link = ethtool_op_get_link, + .get_eth_phy_stats = sparx5_get_eth_phy_stats, + .get_eth_mac_stats = sparx5_get_eth_mac_stats, + .get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats, + .get_rmon_stats = sparx5_get_eth_rmon_stats, + .get_ts_info = sparx5_get_ts_info, +}; + +int sparx_stats_init(struct sparx5 *sparx5) +{ + char queue_name[32]; + int portno; + + sparx5->stats_layout = sparx5_stats_layout; + sparx5->num_stats = spx5_stats_count; + sparx5->num_ethtool_stats = ARRAY_SIZE(sparx5_stats_layout); + sparx5->stats = devm_kcalloc(sparx5->dev, + SPX5_PORTS_ALL * sparx5->num_stats, + sizeof(u64), GFP_KERNEL); + if (!sparx5->stats) + return -ENOMEM; + + mutex_init(&sparx5->queue_stats_lock); + sparx5_config_stats(sparx5); + for (portno = 0; portno < SPX5_PORTS; portno++) + if (sparx5->ports[portno]) + sparx5_config_port_stats(sparx5, portno); + + snprintf(queue_name, sizeof(queue_name), "%s-stats", + dev_name(sparx5->dev)); + sparx5->stats_queue = create_singlethread_workqueue(queue_name); + if (!sparx5->stats_queue) + return -ENOMEM; + + INIT_DELAYED_WORK(&sparx5->stats_work, sparx5_check_stats_work); + queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work, + SPX5_STATS_CHECK_DELAY); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c new file mode 100644 index 000000000..141897dfe --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ + +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/dma-mapping.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" + +#define FDMA_XTR_CHANNEL 6 +#define FDMA_INJ_CHANNEL 0 + +#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_INFO_TOKEN BIT(17) +#define FDMA_DCB_INFO_INTR BIT(18) +#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24)) + +#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_STATUS_SOF BIT(16) +#define FDMA_DCB_STATUS_EOF BIT(17) +#define FDMA_DCB_STATUS_INTR BIT(18) +#define FDMA_DCB_STATUS_DONE BIT(19) +#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define FDMA_DCB_INVALID_DATA 0x1 + +#define FDMA_XTR_BUFFER_SIZE 2048 +#define FDMA_WEIGHT 4 + +/* Frame DMA DCB format + * + * +---------------------------+ + * | Next Ptr | + * +---------------------------+ + * | Reserved | Info | + * +---------------------------+ + * | Data0 Ptr | + * +---------------------------+ + * | Reserved | Status0 | + * +---------------------------+ + * | Data1 Ptr | + * +---------------------------+ + * | Reserved | Status1 | + * +---------------------------+ + * | Data2 Ptr | + * +---------------------------+ + * | Reserved | Status2 | + * |-------------|-------------| + * | | + * | | + * | | + * | | + * | | + * |---------------------------| + * | Data14 Ptr | + * +-------------|-------------+ + * | Reserved | Status14 | + * +-------------|-------------+ + */ + +/* For each hardware DB there is an entry in this list and when the HW DB + * entry is used, this SW DB entry is moved to the back of the list + */ +struct sparx5_db { + struct list_head list; + void *cpu_addr; +}; + +static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx, + struct sparx5_rx_dcb_hw *dcb, + u64 nextptr) +{ + int idx = 0; + + /* Reset the status of the DB */ + for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) { + struct sparx5_db_hw *db = &dcb->db[idx]; + + db->status = FDMA_DCB_STATUS_INTR; + } + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); + rx->last_entry->nextptr = nextptr; + rx->last_entry = dcb; +} + +static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx, + struct sparx5_tx_dcb_hw *dcb, + u64 nextptr) +{ + int idx = 0; + + /* Reset the status of the DB */ + for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) { + struct sparx5_db_hw *db = &dcb->db[idx]; + + db->status = FDMA_DCB_STATUS_DONE; + } + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); +} + +static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Write the buffer address in the LLP and LLP1 regs */ + spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5, + FDMA_DCB_LLP(rx->channel_id)); + spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id)); + + /* Set the number of RX DBs to be used, and DB end-of-frame interrupt */ + spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE), + sparx5, FDMA_CH_CFG(rx->channel_id)); + + /* Set the RX Watermark to max */ + spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM, + sparx5, + FDMA_XTR_CFG); + + /* Start RX fdma */ + spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), FDMA_PORT_CTRL_XTR_STOP, + sparx5, FDMA_PORT_CTRL(0)); + + /* Enable RX channel DB interrupt */ + spx5_rmw(BIT(rx->channel_id), + BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + + /* Activate the RX channel */ + spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Dectivate the RX channel */ + spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, + sparx5, FDMA_CH_ACTIVATE); + + /* Disable RX channel DB interrupt */ + spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + + /* Stop RX fdma */ + spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(1), FDMA_PORT_CTRL_XTR_STOP, + sparx5, FDMA_PORT_CTRL(0)); +} + +static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Write the buffer address in the LLP and LLP1 regs */ + spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5, + FDMA_DCB_LLP(tx->channel_id)); + spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id)); + + /* Set the number of TX DBs to be used, and DB end-of-frame interrupt */ + spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE), + sparx5, FDMA_CH_CFG(tx->channel_id)); + + /* Start TX fdma */ + spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP, + sparx5, FDMA_PORT_CTRL(0)); + + /* Activate the channel */ + spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Disable the channel */ + spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, + sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Reload the RX channel */ + spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD); +} + +static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Reload the TX channel */ + spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD); +} + +static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx) +{ + return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE, + GFP_ATOMIC); +} + +static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + struct sparx5_db_hw *db_hw; + unsigned int packet_size; + struct sparx5_port *port; + struct sk_buff *new_skb; + struct frame_info fi; + struct sk_buff *skb; + dma_addr_t dma_addr; + + /* Check if the DCB is done */ + db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index]; + if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE))) + return false; + skb = rx->skb[rx->dcb_index][rx->db_index]; + /* Replace the DB entry with a new SKB */ + new_skb = sparx5_fdma_rx_alloc_skb(rx); + if (unlikely(!new_skb)) + return false; + /* Map the new skb data and set the new skb */ + dma_addr = virt_to_phys(new_skb->data); + rx->skb[rx->dcb_index][rx->db_index] = new_skb; + db_hw->dataptr = dma_addr; + packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status); + skb_put(skb, packet_size); + /* Now do the normal processing of the skb */ + sparx5_ifh_parse((u32 *)skb->data, &fi); + /* Map to port netdev */ + port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL; + if (!port || !port->ndev) { + dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); + sparx5_xtr_flush(sparx5, XTR_QUEUE); + return false; + } + skb->dev = port->ndev; + skb_pull(skb, IFH_LEN * sizeof(u32)); + if (likely(!(skb->dev->features & NETIF_F_RXFCS))) + skb_trim(skb, skb->len - ETH_FCS_LEN); + + sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp); + skb->protocol = eth_type_trans(skb, skb->dev); + /* Everything we see on an interface that is in the HW bridge + * has already been forwarded + */ + if (test_bit(port->portno, sparx5->bridge_mask)) + skb->offload_fwd_mark = 1; + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + rx->packets++; + netif_receive_skb(skb); + return true; +} + +static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight) +{ + struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); + struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); + int counter = 0; + + while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) { + struct sparx5_rx_dcb_hw *old_dcb; + + rx->db_index++; + counter++; + /* Check if the DCB can be reused */ + if (rx->db_index != FDMA_RX_DCB_MAX_DBS) + continue; + /* As the DCB can be reused, just advance the dcb_index + * pointer and set the nextptr in the DCB + */ + rx->db_index = 0; + old_dcb = &rx->dcb_entries[rx->dcb_index]; + rx->dcb_index++; + rx->dcb_index &= FDMA_DCB_MAX - 1; + sparx5_fdma_rx_add_dcb(rx, old_dcb, + rx->dma + + ((unsigned long)old_dcb - + (unsigned long)rx->dcb_entries)); + } + if (counter < weight) { + napi_complete_done(&rx->napi, counter); + spx5_rmw(BIT(rx->channel_id), + BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + } + if (counter) + sparx5_fdma_rx_reload(sparx5, rx); + return counter; +} + +static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx, + struct sparx5_tx_dcb_hw *dcb) +{ + struct sparx5_tx_dcb_hw *next_dcb; + + next_dcb = dcb; + next_dcb++; + /* Handle wrap-around */ + if ((unsigned long)next_dcb >= + ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb))) + next_dcb = tx->first_entry; + return next_dcb; +} + +int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb) +{ + struct sparx5_tx_dcb_hw *next_dcb_hw; + struct sparx5_tx *tx = &sparx5->tx; + static bool first_time = true; + struct sparx5_db_hw *db_hw; + struct sparx5_db *db; + + next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry); + db_hw = &next_dcb_hw->db[0]; + if (!(db_hw->status & FDMA_DCB_STATUS_DONE)) + return -EINVAL; + db = list_first_entry(&tx->db_list, struct sparx5_db, list); + list_move_tail(&db->list, &tx->db_list); + next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA; + tx->curr_entry->nextptr = tx->dma + + ((unsigned long)next_dcb_hw - + (unsigned long)tx->first_entry); + tx->curr_entry = next_dcb_hw; + memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE); + memcpy(db->cpu_addr, ifh, IFH_LEN * 4); + memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len); + db_hw->status = FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4); + if (first_time) { + sparx5_fdma_tx_activate(sparx5, tx); + first_time = false; + } else { + sparx5_fdma_tx_reload(sparx5, tx); + } + return NETDEV_TX_OK; +} + +static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5) +{ + struct sparx5_rx *rx = &sparx5->rx; + struct sparx5_rx_dcb_hw *dcb; + int idx, jdx; + int size; + + size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); + if (!rx->dcb_entries) + return -ENOMEM; + rx->dma = virt_to_phys(rx->dcb_entries); + rx->last_entry = rx->dcb_entries; + rx->db_index = 0; + rx->dcb_index = 0; + /* Now for each dcb allocate the db */ + for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { + dcb = &rx->dcb_entries[idx]; + dcb->info = 0; + /* For each db allocate an skb and map skb data pointer to the DB + * dataptr. In this way when the frame is received the skb->data + * will contain the frame, so no memcpy is needed + */ + for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) { + struct sparx5_db_hw *db_hw = &dcb->db[jdx]; + dma_addr_t dma_addr; + struct sk_buff *skb; + + skb = sparx5_fdma_rx_alloc_skb(rx); + if (!skb) + return -ENOMEM; + + dma_addr = virt_to_phys(skb->data); + db_hw->dataptr = dma_addr; + db_hw->status = 0; + rx->skb[idx][jdx] = skb; + } + sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx); + } + netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback, + FDMA_WEIGHT); + napi_enable(&rx->napi); + sparx5_fdma_rx_activate(sparx5, rx); + return 0; +} + +static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5) +{ + struct sparx5_tx *tx = &sparx5->tx; + struct sparx5_tx_dcb_hw *dcb; + int idx, jdx; + int size; + + size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); + if (!tx->curr_entry) + return -ENOMEM; + tx->dma = virt_to_phys(tx->curr_entry); + tx->first_entry = tx->curr_entry; + INIT_LIST_HEAD(&tx->db_list); + /* Now for each dcb allocate the db */ + for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { + dcb = &tx->curr_entry[idx]; + dcb->info = 0; + /* TX databuffers must be 16byte aligned */ + for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) { + struct sparx5_db_hw *db_hw = &dcb->db[jdx]; + struct sparx5_db *db; + dma_addr_t phys; + void *cpu_addr; + + cpu_addr = devm_kzalloc(sparx5->dev, + FDMA_XTR_BUFFER_SIZE, + GFP_KERNEL); + if (!cpu_addr) + return -ENOMEM; + phys = virt_to_phys(cpu_addr); + db_hw->dataptr = phys; + db_hw->status = 0; + db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL); + if (!db) + return -ENOMEM; + db->cpu_addr = cpu_addr; + list_add_tail(&db->list, &tx->db_list); + } + sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx); + /* Let the curr_entry to point to the last allocated entry */ + if (idx == FDMA_DCB_MAX - 1) + tx->curr_entry = dcb; + } + return 0; +} + +static void sparx5_fdma_rx_init(struct sparx5 *sparx5, + struct sparx5_rx *rx, int channel) +{ + int idx; + + rx->channel_id = channel; + /* Fetch a netdev for SKB and NAPI use, any will do */ + for (idx = 0; idx < SPX5_PORTS; ++idx) { + struct sparx5_port *port = sparx5->ports[idx]; + + if (port && port->ndev) { + rx->ndev = port->ndev; + break; + } + } +} + +static void sparx5_fdma_tx_init(struct sparx5 *sparx5, + struct sparx5_tx *tx, int channel) +{ + tx->channel_id = channel; +} + +irqreturn_t sparx5_fdma_handler(int irq, void *args) +{ + struct sparx5 *sparx5 = args; + u32 db = 0, err = 0; + + db = spx5_rd(sparx5, FDMA_INTR_DB); + err = spx5_rd(sparx5, FDMA_INTR_ERR); + /* Clear interrupt */ + if (db) { + spx5_wr(0, sparx5, FDMA_INTR_DB_ENA); + spx5_wr(db, sparx5, FDMA_INTR_DB); + napi_schedule(&sparx5->rx.napi); + } + if (err) { + u32 err_type = spx5_rd(sparx5, FDMA_ERRORS); + + dev_err_ratelimited(sparx5->dev, + "ERR: int: %#x, type: %#x\n", + err, err_type); + spx5_wr(err, sparx5, FDMA_INTR_ERR); + spx5_wr(err_type, sparx5, FDMA_ERRORS); + } + return IRQ_HANDLED; +} + +static void sparx5_fdma_injection_mode(struct sparx5 *sparx5) +{ + const int byte_swap = 1; + int portno; + int urgency; + + /* Change mode to fdma extraction and injection */ + spx5_wr(QS_XTR_GRP_CFG_MODE_SET(2) | + QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | + QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); + spx5_wr(QS_INJ_GRP_CFG_MODE_SET(2) | + QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); + + /* CPU ports capture setup */ + for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { + /* ASM CPU port: No preamble, IFH, enable padding */ + spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | + ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | + ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ + sparx5, ASM_PORT_CFG(portno)); + + /* Reset WM cnt to unclog queued frames */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Set Disassembler Stop Watermark level */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(100), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Enable port in queue system */ + urgency = sparx5_port_fwd_urg(sparx5, SPEED_2500); + spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency), + QFWD_SWITCH_PORT_MODE_PORT_ENA | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY, + sparx5, + QFWD_SWITCH_PORT_MODE(portno)); + + /* Disable Disassembler buffer underrun watchdog + * to avoid truncated packets in XTR + */ + spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(1), + DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, + sparx5, + DSM_BUF_CFG(portno)); + + /* Disabling frame aging */ + spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(1), + HSCH_PORT_MODE_AGE_DIS, + sparx5, + HSCH_PORT_MODE(portno)); + } +} + +int sparx5_fdma_start(struct sparx5 *sparx5) +{ + int err; + + /* Reset FDMA state */ + spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL); + spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL); + + /* Force ACP caching but disable read/write allocation */ + spx5_rmw(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(1) | + CPU_PROC_CTRL_ACP_AWCACHE_SET(0) | + CPU_PROC_CTRL_ACP_ARCACHE_SET(0), + CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA | + CPU_PROC_CTRL_ACP_AWCACHE | + CPU_PROC_CTRL_ACP_ARCACHE, + sparx5, CPU_PROC_CTRL); + + sparx5_fdma_injection_mode(sparx5); + sparx5_fdma_rx_init(sparx5, &sparx5->rx, FDMA_XTR_CHANNEL); + sparx5_fdma_tx_init(sparx5, &sparx5->tx, FDMA_INJ_CHANNEL); + err = sparx5_fdma_rx_alloc(sparx5); + if (err) { + dev_err(sparx5->dev, "Could not allocate RX buffers: %d\n", err); + return err; + } + err = sparx5_fdma_tx_alloc(sparx5); + if (err) { + dev_err(sparx5->dev, "Could not allocate TX buffers: %d\n", err); + return err; + } + return err; +} + +static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5) +{ + return spx5_rd(sparx5, FDMA_PORT_CTRL(0)); +} + +int sparx5_fdma_stop(struct sparx5 *sparx5) +{ + u32 val; + + napi_disable(&sparx5->rx.napi); + /* Stop the fdma and channel interrupts */ + sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx); + sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx); + /* Wait for the RX channel to stop */ + read_poll_timeout(sparx5_fdma_port_ctrl, val, + FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0, + 500, 10000, 0, sparx5); + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c new file mode 100644 index 000000000..4af285918 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <net/switchdev.h> +#include <linux/if_bridge.h> +#include <linux/iopoll.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +/* Commands for Mac Table Command register */ +#define MAC_CMD_LEARN 0 /* Insert (Learn) 1 entry */ +#define MAC_CMD_UNLEARN 1 /* Unlearn (Forget) 1 entry */ +#define MAC_CMD_LOOKUP 2 /* Look up 1 entry */ +#define MAC_CMD_READ 3 /* Read entry at Mac Table Index */ +#define MAC_CMD_WRITE 4 /* Write entry at Mac Table Index */ +#define MAC_CMD_SCAN 5 /* Scan (Age or find next) */ +#define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */ +#define MAC_CMD_CLEAR_ALL 7 /* Delete all entries in table */ + +/* Commands for MAC_ENTRY_ADDR_TYPE */ +#define MAC_ENTRY_ADDR_TYPE_UPSID_PN 0 +#define MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1 +#define MAC_ENTRY_ADDR_TYPE_GLAG 2 +#define MAC_ENTRY_ADDR_TYPE_MC_IDX 3 + +#define TABLE_UPDATE_SLEEP_US 10 +#define TABLE_UPDATE_TIMEOUT_US 100000 + +struct sparx5_mact_entry { + struct list_head list; + unsigned char mac[ETH_ALEN]; + u32 flags; +#define MAC_ENT_ALIVE BIT(0) +#define MAC_ENT_MOVED BIT(1) +#define MAC_ENT_LOCK BIT(2) + u16 vid; + u16 port; +}; + +static int sparx5_mact_get_status(struct sparx5 *sparx5) +{ + return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL); +} + +static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5) +{ + u32 val; + + return readx_poll_timeout(sparx5_mact_get_status, + sparx5, val, + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0, + TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); +} + +static void sparx5_mact_select(struct sparx5 *sparx5, + const unsigned char mac[ETH_ALEN], + u16 vid) +{ + u32 macl = 0, mach = 0; + + /* Set the MAC address to handle and the vlan associated in a format + * understood by the hardware. + */ + mach |= vid << 16; + mach |= mac[0] << 8; + mach |= mac[1] << 0; + macl |= mac[2] << 24; + macl |= mac[3] << 16; + macl |= mac[4] << 8; + macl |= mac[5] << 0; + + spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0); + spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1); +} + +int sparx5_mact_learn(struct sparx5 *sparx5, int pgid, + const unsigned char mac[ETH_ALEN], u16 vid) +{ + int addr, type, ret; + + if (pgid < SPX5_PORTS) { + type = MAC_ENTRY_ADDR_TYPE_UPSID_PN; + addr = pgid % 32; + addr += (pgid / 32) << 5; /* Add upsid */ + } else { + type = MAC_ENTRY_ADDR_TYPE_MC_IDX; + addr = pgid - SPX5_PORTS; + } + + mutex_lock(&sparx5->lock); + + sparx5_mact_select(sparx5, mac, vid); + + /* MAC entry properties */ + spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) | + LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) | + LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) | + LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1), + sparx5, LRN_MAC_ACCESS_CFG_2); + spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3); + + /* Insert/learn new entry */ + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + + ret = sparx5_mact_wait_for_completion(sparx5); + + mutex_unlock(&sparx5->lock); + + return ret; +} + +int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + + return sparx5_mact_forget(sparx5, addr, port->pvid); +} + +int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + + return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid); +} + +static int sparx5_mact_get(struct sparx5 *sparx5, + unsigned char mac[ETH_ALEN], + u16 *vid, u32 *pcfg2) +{ + u32 mach, macl, cfg2; + int ret = -ENOENT; + + cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2); + if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) { + mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0); + macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1); + mac[0] = ((mach >> 8) & 0xff); + mac[1] = ((mach >> 0) & 0xff); + mac[2] = ((macl >> 24) & 0xff); + mac[3] = ((macl >> 16) & 0xff); + mac[4] = ((macl >> 8) & 0xff); + mac[5] = ((macl >> 0) & 0xff); + *vid = mach >> 16; + *pcfg2 = cfg2; + ret = 0; + } + + return ret; +} + +bool sparx5_mact_getnext(struct sparx5 *sparx5, + unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2) +{ + u32 cfg2; + int ret; + + mutex_lock(&sparx5->lock); + + sparx5_mact_select(sparx5, mac, *vid); + + spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) | + LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1), + sparx5, LRN_SCAN_NEXT_CFG); + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET + (MAC_CMD_FIND_SMALLEST) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + + ret = sparx5_mact_wait_for_completion(sparx5); + if (ret == 0) { + ret = sparx5_mact_get(sparx5, mac, vid, &cfg2); + if (ret == 0) + *pcfg2 = cfg2; + } + + mutex_unlock(&sparx5->lock); + + return ret == 0; +} + +int sparx5_mact_find(struct sparx5 *sparx5, + const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2) +{ + int ret; + u32 cfg2; + + mutex_lock(&sparx5->lock); + + sparx5_mact_select(sparx5, mac, vid); + + /* Issue a lookup command */ + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + + ret = sparx5_mact_wait_for_completion(sparx5); + if (ret == 0) { + cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2); + if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) + *pcfg2 = cfg2; + else + ret = -ENOENT; + } + + mutex_unlock(&sparx5->lock); + + return ret; +} + +int sparx5_mact_forget(struct sparx5 *sparx5, + const unsigned char mac[ETH_ALEN], u16 vid) +{ + int ret; + + mutex_lock(&sparx5->lock); + + sparx5_mact_select(sparx5, mac, vid); + + /* Issue an unlearn command */ + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + + ret = sparx5_mact_wait_for_completion(sparx5); + + mutex_unlock(&sparx5->lock); + + return ret; +} + +static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5, + const unsigned char *mac, + u16 vid, u16 port_index) +{ + struct sparx5_mact_entry *mact_entry; + + mact_entry = devm_kzalloc(sparx5->dev, + sizeof(*mact_entry), GFP_ATOMIC); + if (!mact_entry) + return NULL; + + memcpy(mact_entry->mac, mac, ETH_ALEN); + mact_entry->vid = vid; + mact_entry->port = port_index; + return mact_entry; +} + +static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5, + const unsigned char *mac, + u16 vid, u16 port_index) +{ + struct sparx5_mact_entry *mact_entry; + struct sparx5_mact_entry *res = NULL; + + mutex_lock(&sparx5->mact_lock); + list_for_each_entry(mact_entry, &sparx5->mact_entries, list) { + if (mact_entry->vid == vid && + ether_addr_equal(mac, mact_entry->mac) && + mact_entry->port == port_index) { + res = mact_entry; + break; + } + } + mutex_unlock(&sparx5->mact_lock); + + return res; +} + +static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type, + const char *mac, u16 vid, + struct net_device *dev, bool offloaded) +{ + struct switchdev_notifier_fdb_info info = {}; + + info.addr = mac; + info.vid = vid; + info.offloaded = offloaded; + call_switchdev_notifiers(type, dev, &info.info, NULL); +} + +int sparx5_add_mact_entry(struct sparx5 *sparx5, + struct net_device *dev, + u16 portno, + const unsigned char *addr, u16 vid) +{ + struct sparx5_mact_entry *mact_entry; + int ret; + u32 cfg2; + + ret = sparx5_mact_find(sparx5, addr, vid, &cfg2); + if (!ret) + return 0; + + /* In case the entry already exists, don't add it again to SW, + * just update HW, but we need to look in the actual HW because + * it is possible for an entry to be learn by HW and before the + * mact thread to start the frame will reach CPU and the CPU will + * add the entry but without the extern_learn flag. + */ + mact_entry = find_mact_entry(sparx5, addr, vid, portno); + if (mact_entry) + goto update_hw; + + /* Add the entry in SW MAC table not to get the notification when + * SW is pulling again + */ + mact_entry = alloc_mact_entry(sparx5, addr, vid, portno); + if (!mact_entry) + return -ENOMEM; + + mutex_lock(&sparx5->mact_lock); + list_add_tail(&mact_entry->list, &sparx5->mact_entries); + mutex_unlock(&sparx5->mact_lock); + +update_hw: + ret = sparx5_mact_learn(sparx5, portno, addr, vid); + + /* New entry? */ + if (mact_entry->flags == 0) { + mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */ + sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid, + dev, true); + } + + return ret; +} + +int sparx5_del_mact_entry(struct sparx5 *sparx5, + const unsigned char *addr, + u16 vid) +{ + struct sparx5_mact_entry *mact_entry, *tmp; + + /* Delete the entry in SW MAC table not to get the notification when + * SW is pulling again + */ + mutex_lock(&sparx5->mact_lock); + list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries, + list) { + if ((vid == 0 || mact_entry->vid == vid) && + ether_addr_equal(addr, mact_entry->mac)) { + list_del(&mact_entry->list); + devm_kfree(sparx5->dev, mact_entry); + + sparx5_mact_forget(sparx5, addr, mact_entry->vid); + } + } + mutex_unlock(&sparx5->mact_lock); + + return 0; +} + +static void sparx5_mact_handle_entry(struct sparx5 *sparx5, + unsigned char mac[ETH_ALEN], + u16 vid, u32 cfg2) +{ + struct sparx5_mact_entry *mact_entry; + bool found = false; + u16 port; + + if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) != + MAC_ENTRY_ADDR_TYPE_UPSID_PN) + return; + + port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2); + if (port >= SPX5_PORTS) + return; + + if (!test_bit(port, sparx5->bridge_mask)) + return; + + mutex_lock(&sparx5->mact_lock); + list_for_each_entry(mact_entry, &sparx5->mact_entries, list) { + if (mact_entry->vid == vid && + ether_addr_equal(mac, mact_entry->mac)) { + found = true; + mact_entry->flags |= MAC_ENT_ALIVE; + if (mact_entry->port != port) { + dev_warn(sparx5->dev, "Entry move: %d -> %d\n", + mact_entry->port, port); + mact_entry->port = port; + mact_entry->flags |= MAC_ENT_MOVED; + } + /* Entry handled */ + break; + } + } + mutex_unlock(&sparx5->mact_lock); + + if (found && !(mact_entry->flags & MAC_ENT_MOVED)) + /* Present, not moved */ + return; + + if (!found) { + /* Entry not found - now add */ + mact_entry = alloc_mact_entry(sparx5, mac, vid, port); + if (!mact_entry) + return; + + mact_entry->flags |= MAC_ENT_ALIVE; + mutex_lock(&sparx5->mact_lock); + list_add_tail(&mact_entry->list, &sparx5->mact_entries); + mutex_unlock(&sparx5->mact_lock); + } + + /* New or moved entry - notify bridge */ + sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, + mac, vid, sparx5->ports[port]->ndev, + true); +} + +void sparx5_mact_pull_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct sparx5 *sparx5 = container_of(del_work, struct sparx5, + mact_work); + struct sparx5_mact_entry *mact_entry, *tmp; + unsigned char mac[ETH_ALEN]; + u32 cfg2; + u16 vid; + int ret; + + /* Reset MAC entry flags */ + mutex_lock(&sparx5->mact_lock); + list_for_each_entry(mact_entry, &sparx5->mact_entries, list) + mact_entry->flags &= MAC_ENT_LOCK; + mutex_unlock(&sparx5->mact_lock); + + /* MAIN mac address processing loop */ + vid = 0; + memset(mac, 0, sizeof(mac)); + do { + mutex_lock(&sparx5->lock); + sparx5_mact_select(sparx5, mac, vid); + spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1), + sparx5, LRN_SCAN_NEXT_CFG); + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET + (MAC_CMD_FIND_SMALLEST) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + ret = sparx5_mact_wait_for_completion(sparx5); + if (ret == 0) + ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2); + mutex_unlock(&sparx5->lock); + if (ret == 0) + sparx5_mact_handle_entry(sparx5, mac, vid, cfg2); + } while (ret == 0); + + mutex_lock(&sparx5->mact_lock); + list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries, + list) { + /* If the entry is in HW or permanent, then skip */ + if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK)) + continue; + + sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, + mact_entry->mac, mact_entry->vid, + sparx5->ports[mact_entry->port]->ndev, + true); + + list_del(&mact_entry->list); + devm_kfree(sparx5->dev, mact_entry); + } + mutex_unlock(&sparx5->mact_lock); + + queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work, + SPX5_MACT_PULL_DELAY); +} + +void sparx5_set_ageing(struct sparx5 *sparx5, int msecs) +{ + int value = max(1, msecs / 10); /* unit 10 ms */ + + spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */ + LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */ + LRN_AUTOAGE_CFG_UNIT_SIZE | + LRN_AUTOAGE_CFG_PERIOD_VAL, + sparx5, + LRN_AUTOAGE_CFG(0)); +} + +void sparx5_mact_init(struct sparx5 *sparx5) +{ + mutex_init(&sparx5->lock); + + /* Flush MAC table */ + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + + if (sparx5_mact_wait_for_completion(sparx5) != 0) + dev_warn(sparx5->dev, "MAC flush error\n"); + + sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000); +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c new file mode 100644 index 000000000..3423c95cc --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -0,0 +1,940 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ +#include <linux/module.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <net/switchdev.h> +#include <linux/etherdevice.h> +#include <linux/io.h> +#include <linux/printk.h> +#include <linux/iopoll.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/types.h> +#include <linux/reset.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" +#include "sparx5_qos.h" + +#define QLIM_WM(fraction) \ + ((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100) +#define IO_RANGES 3 + +struct initial_port_config { + u32 portno; + struct device_node *node; + struct sparx5_port_config conf; + struct phy *serdes; +}; + +struct sparx5_ram_config { + void __iomem *init_reg; + u32 init_val; +}; + +struct sparx5_main_io_resource { + enum sparx5_target id; + phys_addr_t offset; + int range; +}; + +static const struct sparx5_main_io_resource sparx5_main_iomap[] = { + { TARGET_CPU, 0, 0 }, /* 0x600000000 */ + { TARGET_FDMA, 0x80000, 0 }, /* 0x600080000 */ + { TARGET_PCEP, 0x400000, 0 }, /* 0x600400000 */ + { TARGET_DEV2G5, 0x10004000, 1 }, /* 0x610004000 */ + { TARGET_DEV5G, 0x10008000, 1 }, /* 0x610008000 */ + { TARGET_PCS5G_BR, 0x1000c000, 1 }, /* 0x61000c000 */ + { TARGET_DEV2G5 + 1, 0x10010000, 1 }, /* 0x610010000 */ + { TARGET_DEV5G + 1, 0x10014000, 1 }, /* 0x610014000 */ + { TARGET_PCS5G_BR + 1, 0x10018000, 1 }, /* 0x610018000 */ + { TARGET_DEV2G5 + 2, 0x1001c000, 1 }, /* 0x61001c000 */ + { TARGET_DEV5G + 2, 0x10020000, 1 }, /* 0x610020000 */ + { TARGET_PCS5G_BR + 2, 0x10024000, 1 }, /* 0x610024000 */ + { TARGET_DEV2G5 + 6, 0x10028000, 1 }, /* 0x610028000 */ + { TARGET_DEV5G + 6, 0x1002c000, 1 }, /* 0x61002c000 */ + { TARGET_PCS5G_BR + 6, 0x10030000, 1 }, /* 0x610030000 */ + { TARGET_DEV2G5 + 7, 0x10034000, 1 }, /* 0x610034000 */ + { TARGET_DEV5G + 7, 0x10038000, 1 }, /* 0x610038000 */ + { TARGET_PCS5G_BR + 7, 0x1003c000, 1 }, /* 0x61003c000 */ + { TARGET_DEV2G5 + 8, 0x10040000, 1 }, /* 0x610040000 */ + { TARGET_DEV5G + 8, 0x10044000, 1 }, /* 0x610044000 */ + { TARGET_PCS5G_BR + 8, 0x10048000, 1 }, /* 0x610048000 */ + { TARGET_DEV2G5 + 9, 0x1004c000, 1 }, /* 0x61004c000 */ + { TARGET_DEV5G + 9, 0x10050000, 1 }, /* 0x610050000 */ + { TARGET_PCS5G_BR + 9, 0x10054000, 1 }, /* 0x610054000 */ + { TARGET_DEV2G5 + 10, 0x10058000, 1 }, /* 0x610058000 */ + { TARGET_DEV5G + 10, 0x1005c000, 1 }, /* 0x61005c000 */ + { TARGET_PCS5G_BR + 10, 0x10060000, 1 }, /* 0x610060000 */ + { TARGET_DEV2G5 + 11, 0x10064000, 1 }, /* 0x610064000 */ + { TARGET_DEV5G + 11, 0x10068000, 1 }, /* 0x610068000 */ + { TARGET_PCS5G_BR + 11, 0x1006c000, 1 }, /* 0x61006c000 */ + { TARGET_DEV2G5 + 12, 0x10070000, 1 }, /* 0x610070000 */ + { TARGET_DEV10G, 0x10074000, 1 }, /* 0x610074000 */ + { TARGET_PCS10G_BR, 0x10078000, 1 }, /* 0x610078000 */ + { TARGET_DEV2G5 + 14, 0x1007c000, 1 }, /* 0x61007c000 */ + { TARGET_DEV10G + 2, 0x10080000, 1 }, /* 0x610080000 */ + { TARGET_PCS10G_BR + 2, 0x10084000, 1 }, /* 0x610084000 */ + { TARGET_DEV2G5 + 15, 0x10088000, 1 }, /* 0x610088000 */ + { TARGET_DEV10G + 3, 0x1008c000, 1 }, /* 0x61008c000 */ + { TARGET_PCS10G_BR + 3, 0x10090000, 1 }, /* 0x610090000 */ + { TARGET_DEV2G5 + 16, 0x10094000, 1 }, /* 0x610094000 */ + { TARGET_DEV2G5 + 17, 0x10098000, 1 }, /* 0x610098000 */ + { TARGET_DEV2G5 + 18, 0x1009c000, 1 }, /* 0x61009c000 */ + { TARGET_DEV2G5 + 19, 0x100a0000, 1 }, /* 0x6100a0000 */ + { TARGET_DEV2G5 + 20, 0x100a4000, 1 }, /* 0x6100a4000 */ + { TARGET_DEV2G5 + 21, 0x100a8000, 1 }, /* 0x6100a8000 */ + { TARGET_DEV2G5 + 22, 0x100ac000, 1 }, /* 0x6100ac000 */ + { TARGET_DEV2G5 + 23, 0x100b0000, 1 }, /* 0x6100b0000 */ + { TARGET_DEV2G5 + 32, 0x100b4000, 1 }, /* 0x6100b4000 */ + { TARGET_DEV2G5 + 33, 0x100b8000, 1 }, /* 0x6100b8000 */ + { TARGET_DEV2G5 + 34, 0x100bc000, 1 }, /* 0x6100bc000 */ + { TARGET_DEV2G5 + 35, 0x100c0000, 1 }, /* 0x6100c0000 */ + { TARGET_DEV2G5 + 36, 0x100c4000, 1 }, /* 0x6100c4000 */ + { TARGET_DEV2G5 + 37, 0x100c8000, 1 }, /* 0x6100c8000 */ + { TARGET_DEV2G5 + 38, 0x100cc000, 1 }, /* 0x6100cc000 */ + { TARGET_DEV2G5 + 39, 0x100d0000, 1 }, /* 0x6100d0000 */ + { TARGET_DEV2G5 + 40, 0x100d4000, 1 }, /* 0x6100d4000 */ + { TARGET_DEV2G5 + 41, 0x100d8000, 1 }, /* 0x6100d8000 */ + { TARGET_DEV2G5 + 42, 0x100dc000, 1 }, /* 0x6100dc000 */ + { TARGET_DEV2G5 + 43, 0x100e0000, 1 }, /* 0x6100e0000 */ + { TARGET_DEV2G5 + 44, 0x100e4000, 1 }, /* 0x6100e4000 */ + { TARGET_DEV2G5 + 45, 0x100e8000, 1 }, /* 0x6100e8000 */ + { TARGET_DEV2G5 + 46, 0x100ec000, 1 }, /* 0x6100ec000 */ + { TARGET_DEV2G5 + 47, 0x100f0000, 1 }, /* 0x6100f0000 */ + { TARGET_DEV2G5 + 57, 0x100f4000, 1 }, /* 0x6100f4000 */ + { TARGET_DEV25G + 1, 0x100f8000, 1 }, /* 0x6100f8000 */ + { TARGET_PCS25G_BR + 1, 0x100fc000, 1 }, /* 0x6100fc000 */ + { TARGET_DEV2G5 + 59, 0x10104000, 1 }, /* 0x610104000 */ + { TARGET_DEV25G + 3, 0x10108000, 1 }, /* 0x610108000 */ + { TARGET_PCS25G_BR + 3, 0x1010c000, 1 }, /* 0x61010c000 */ + { TARGET_DEV2G5 + 60, 0x10114000, 1 }, /* 0x610114000 */ + { TARGET_DEV25G + 4, 0x10118000, 1 }, /* 0x610118000 */ + { TARGET_PCS25G_BR + 4, 0x1011c000, 1 }, /* 0x61011c000 */ + { TARGET_DEV2G5 + 64, 0x10124000, 1 }, /* 0x610124000 */ + { TARGET_DEV5G + 12, 0x10128000, 1 }, /* 0x610128000 */ + { TARGET_PCS5G_BR + 12, 0x1012c000, 1 }, /* 0x61012c000 */ + { TARGET_PORT_CONF, 0x10130000, 1 }, /* 0x610130000 */ + { TARGET_DEV2G5 + 3, 0x10404000, 1 }, /* 0x610404000 */ + { TARGET_DEV5G + 3, 0x10408000, 1 }, /* 0x610408000 */ + { TARGET_PCS5G_BR + 3, 0x1040c000, 1 }, /* 0x61040c000 */ + { TARGET_DEV2G5 + 4, 0x10410000, 1 }, /* 0x610410000 */ + { TARGET_DEV5G + 4, 0x10414000, 1 }, /* 0x610414000 */ + { TARGET_PCS5G_BR + 4, 0x10418000, 1 }, /* 0x610418000 */ + { TARGET_DEV2G5 + 5, 0x1041c000, 1 }, /* 0x61041c000 */ + { TARGET_DEV5G + 5, 0x10420000, 1 }, /* 0x610420000 */ + { TARGET_PCS5G_BR + 5, 0x10424000, 1 }, /* 0x610424000 */ + { TARGET_DEV2G5 + 13, 0x10428000, 1 }, /* 0x610428000 */ + { TARGET_DEV10G + 1, 0x1042c000, 1 }, /* 0x61042c000 */ + { TARGET_PCS10G_BR + 1, 0x10430000, 1 }, /* 0x610430000 */ + { TARGET_DEV2G5 + 24, 0x10434000, 1 }, /* 0x610434000 */ + { TARGET_DEV2G5 + 25, 0x10438000, 1 }, /* 0x610438000 */ + { TARGET_DEV2G5 + 26, 0x1043c000, 1 }, /* 0x61043c000 */ + { TARGET_DEV2G5 + 27, 0x10440000, 1 }, /* 0x610440000 */ + { TARGET_DEV2G5 + 28, 0x10444000, 1 }, /* 0x610444000 */ + { TARGET_DEV2G5 + 29, 0x10448000, 1 }, /* 0x610448000 */ + { TARGET_DEV2G5 + 30, 0x1044c000, 1 }, /* 0x61044c000 */ + { TARGET_DEV2G5 + 31, 0x10450000, 1 }, /* 0x610450000 */ + { TARGET_DEV2G5 + 48, 0x10454000, 1 }, /* 0x610454000 */ + { TARGET_DEV10G + 4, 0x10458000, 1 }, /* 0x610458000 */ + { TARGET_PCS10G_BR + 4, 0x1045c000, 1 }, /* 0x61045c000 */ + { TARGET_DEV2G5 + 49, 0x10460000, 1 }, /* 0x610460000 */ + { TARGET_DEV10G + 5, 0x10464000, 1 }, /* 0x610464000 */ + { TARGET_PCS10G_BR + 5, 0x10468000, 1 }, /* 0x610468000 */ + { TARGET_DEV2G5 + 50, 0x1046c000, 1 }, /* 0x61046c000 */ + { TARGET_DEV10G + 6, 0x10470000, 1 }, /* 0x610470000 */ + { TARGET_PCS10G_BR + 6, 0x10474000, 1 }, /* 0x610474000 */ + { TARGET_DEV2G5 + 51, 0x10478000, 1 }, /* 0x610478000 */ + { TARGET_DEV10G + 7, 0x1047c000, 1 }, /* 0x61047c000 */ + { TARGET_PCS10G_BR + 7, 0x10480000, 1 }, /* 0x610480000 */ + { TARGET_DEV2G5 + 52, 0x10484000, 1 }, /* 0x610484000 */ + { TARGET_DEV10G + 8, 0x10488000, 1 }, /* 0x610488000 */ + { TARGET_PCS10G_BR + 8, 0x1048c000, 1 }, /* 0x61048c000 */ + { TARGET_DEV2G5 + 53, 0x10490000, 1 }, /* 0x610490000 */ + { TARGET_DEV10G + 9, 0x10494000, 1 }, /* 0x610494000 */ + { TARGET_PCS10G_BR + 9, 0x10498000, 1 }, /* 0x610498000 */ + { TARGET_DEV2G5 + 54, 0x1049c000, 1 }, /* 0x61049c000 */ + { TARGET_DEV10G + 10, 0x104a0000, 1 }, /* 0x6104a0000 */ + { TARGET_PCS10G_BR + 10, 0x104a4000, 1 }, /* 0x6104a4000 */ + { TARGET_DEV2G5 + 55, 0x104a8000, 1 }, /* 0x6104a8000 */ + { TARGET_DEV10G + 11, 0x104ac000, 1 }, /* 0x6104ac000 */ + { TARGET_PCS10G_BR + 11, 0x104b0000, 1 }, /* 0x6104b0000 */ + { TARGET_DEV2G5 + 56, 0x104b4000, 1 }, /* 0x6104b4000 */ + { TARGET_DEV25G, 0x104b8000, 1 }, /* 0x6104b8000 */ + { TARGET_PCS25G_BR, 0x104bc000, 1 }, /* 0x6104bc000 */ + { TARGET_DEV2G5 + 58, 0x104c4000, 1 }, /* 0x6104c4000 */ + { TARGET_DEV25G + 2, 0x104c8000, 1 }, /* 0x6104c8000 */ + { TARGET_PCS25G_BR + 2, 0x104cc000, 1 }, /* 0x6104cc000 */ + { TARGET_DEV2G5 + 61, 0x104d4000, 1 }, /* 0x6104d4000 */ + { TARGET_DEV25G + 5, 0x104d8000, 1 }, /* 0x6104d8000 */ + { TARGET_PCS25G_BR + 5, 0x104dc000, 1 }, /* 0x6104dc000 */ + { TARGET_DEV2G5 + 62, 0x104e4000, 1 }, /* 0x6104e4000 */ + { TARGET_DEV25G + 6, 0x104e8000, 1 }, /* 0x6104e8000 */ + { TARGET_PCS25G_BR + 6, 0x104ec000, 1 }, /* 0x6104ec000 */ + { TARGET_DEV2G5 + 63, 0x104f4000, 1 }, /* 0x6104f4000 */ + { TARGET_DEV25G + 7, 0x104f8000, 1 }, /* 0x6104f8000 */ + { TARGET_PCS25G_BR + 7, 0x104fc000, 1 }, /* 0x6104fc000 */ + { TARGET_DSM, 0x10504000, 1 }, /* 0x610504000 */ + { TARGET_ASM, 0x10600000, 1 }, /* 0x610600000 */ + { TARGET_GCB, 0x11010000, 2 }, /* 0x611010000 */ + { TARGET_QS, 0x11030000, 2 }, /* 0x611030000 */ + { TARGET_PTP, 0x11040000, 2 }, /* 0x611040000 */ + { TARGET_ANA_ACL, 0x11050000, 2 }, /* 0x611050000 */ + { TARGET_LRN, 0x11060000, 2 }, /* 0x611060000 */ + { TARGET_VCAP_SUPER, 0x11080000, 2 }, /* 0x611080000 */ + { TARGET_QSYS, 0x110a0000, 2 }, /* 0x6110a0000 */ + { TARGET_QFWD, 0x110b0000, 2 }, /* 0x6110b0000 */ + { TARGET_XQS, 0x110c0000, 2 }, /* 0x6110c0000 */ + { TARGET_CLKGEN, 0x11100000, 2 }, /* 0x611100000 */ + { TARGET_ANA_AC_POL, 0x11200000, 2 }, /* 0x611200000 */ + { TARGET_QRES, 0x11280000, 2 }, /* 0x611280000 */ + { TARGET_EACL, 0x112c0000, 2 }, /* 0x6112c0000 */ + { TARGET_ANA_CL, 0x11400000, 2 }, /* 0x611400000 */ + { TARGET_ANA_L3, 0x11480000, 2 }, /* 0x611480000 */ + { TARGET_HSCH, 0x11580000, 2 }, /* 0x611580000 */ + { TARGET_REW, 0x11600000, 2 }, /* 0x611600000 */ + { TARGET_ANA_L2, 0x11800000, 2 }, /* 0x611800000 */ + { TARGET_ANA_AC, 0x11900000, 2 }, /* 0x611900000 */ + { TARGET_VOP, 0x11a00000, 2 }, /* 0x611a00000 */ +}; + +static int sparx5_create_targets(struct sparx5 *sparx5) +{ + struct resource *iores[IO_RANGES]; + void __iomem *iomem[IO_RANGES]; + void __iomem *begin[IO_RANGES]; + int range_id[IO_RANGES]; + int idx, jdx; + + for (idx = 0, jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) { + const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx]; + + if (idx == iomap->range) { + range_id[idx] = jdx; + idx++; + } + } + for (idx = 0; idx < IO_RANGES; idx++) { + iores[idx] = platform_get_resource(sparx5->pdev, IORESOURCE_MEM, + idx); + if (!iores[idx]) { + dev_err(sparx5->dev, "Invalid resource\n"); + return -EINVAL; + } + iomem[idx] = devm_ioremap(sparx5->dev, + iores[idx]->start, + resource_size(iores[idx])); + if (!iomem[idx]) { + dev_err(sparx5->dev, "Unable to get switch registers: %s\n", + iores[idx]->name); + return -ENOMEM; + } + begin[idx] = iomem[idx] - sparx5_main_iomap[range_id[idx]].offset; + } + for (jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) { + const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx]; + + sparx5->regs[iomap->id] = begin[iomap->range] + iomap->offset; + } + return 0; +} + +static int sparx5_create_port(struct sparx5 *sparx5, + struct initial_port_config *config) +{ + struct sparx5_port *spx5_port; + struct net_device *ndev; + struct phylink *phylink; + int err; + + ndev = sparx5_create_netdev(sparx5, config->portno); + if (IS_ERR(ndev)) { + dev_err(sparx5->dev, "Could not create net device: %02u\n", + config->portno); + return PTR_ERR(ndev); + } + spx5_port = netdev_priv(ndev); + spx5_port->of_node = config->node; + spx5_port->serdes = config->serdes; + spx5_port->pvid = NULL_VID; + spx5_port->signd_internal = true; + spx5_port->signd_active_high = true; + spx5_port->signd_enable = true; + spx5_port->max_vlan_tags = SPX5_PORT_MAX_TAGS_NONE; + spx5_port->vlan_type = SPX5_VLAN_PORT_TYPE_UNAWARE; + spx5_port->custom_etype = 0x8880; /* Vitesse */ + spx5_port->phylink_pcs.poll = true; + spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops; + spx5_port->is_mrouter = false; + sparx5->ports[config->portno] = spx5_port; + + err = sparx5_port_init(sparx5, spx5_port, &config->conf); + if (err) { + dev_err(sparx5->dev, "port init failed\n"); + return err; + } + spx5_port->conf = config->conf; + + /* Setup VLAN */ + sparx5_vlan_port_setup(sparx5, spx5_port->portno); + + /* Create a phylink for PHY management. Also handles SFPs */ + spx5_port->phylink_config.dev = &spx5_port->ndev->dev; + spx5_port->phylink_config.type = PHYLINK_NETDEV; + spx5_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | + MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | + MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD; + + __set_bit(PHY_INTERFACE_MODE_SGMII, + spx5_port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_QSGMII, + spx5_port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + spx5_port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + spx5_port->phylink_config.supported_interfaces); + + if (spx5_port->conf.bandwidth == SPEED_5000 || + spx5_port->conf.bandwidth == SPEED_10000 || + spx5_port->conf.bandwidth == SPEED_25000) + __set_bit(PHY_INTERFACE_MODE_5GBASER, + spx5_port->phylink_config.supported_interfaces); + + if (spx5_port->conf.bandwidth == SPEED_10000 || + spx5_port->conf.bandwidth == SPEED_25000) + __set_bit(PHY_INTERFACE_MODE_10GBASER, + spx5_port->phylink_config.supported_interfaces); + + if (spx5_port->conf.bandwidth == SPEED_25000) + __set_bit(PHY_INTERFACE_MODE_25GBASER, + spx5_port->phylink_config.supported_interfaces); + + phylink = phylink_create(&spx5_port->phylink_config, + of_fwnode_handle(config->node), + config->conf.phy_mode, + &sparx5_phylink_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + spx5_port->phylink = phylink; + + return 0; +} + +static int sparx5_init_ram(struct sparx5 *s5) +{ + const struct sparx5_ram_config spx5_ram_cfg[] = { + {spx5_reg_get(s5, ANA_AC_STAT_RESET), ANA_AC_STAT_RESET_RESET}, + {spx5_reg_get(s5, ASM_STAT_CFG), ASM_STAT_CFG_STAT_CNT_CLR_SHOT}, + {spx5_reg_get(s5, QSYS_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, REW_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, VOP_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, ANA_AC_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, ASM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, EACL_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, VCAP_SUPER_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, DSM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT} + }; + const struct sparx5_ram_config *cfg; + u32 value, pending, jdx, idx; + + for (jdx = 0; jdx < 10; jdx++) { + pending = ARRAY_SIZE(spx5_ram_cfg); + for (idx = 0; idx < ARRAY_SIZE(spx5_ram_cfg); idx++) { + cfg = &spx5_ram_cfg[idx]; + if (jdx == 0) { + writel(cfg->init_val, cfg->init_reg); + } else { + value = readl(cfg->init_reg); + if ((value & cfg->init_val) != cfg->init_val) + pending--; + } + } + if (!pending) + break; + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + } + + if (pending > 0) { + /* Still initializing, should be complete in + * less than 1ms + */ + dev_err(s5->dev, "Memory initialization error\n"); + return -EINVAL; + } + return 0; +} + +static int sparx5_init_switchcore(struct sparx5 *sparx5) +{ + u32 value; + int err = 0; + + spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(1), + EACL_POL_EACL_CFG_EACL_FORCE_INIT, + sparx5, + EACL_POL_EACL_CFG); + + spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(0), + EACL_POL_EACL_CFG_EACL_FORCE_INIT, + sparx5, + EACL_POL_EACL_CFG); + + /* Initialize memories, if not done already */ + value = spx5_rd(sparx5, HSCH_RESET_CFG); + if (!(value & HSCH_RESET_CFG_CORE_ENA)) { + err = sparx5_init_ram(sparx5); + if (err) + return err; + } + + /* Reset counters */ + spx5_wr(ANA_AC_STAT_RESET_RESET_SET(1), sparx5, ANA_AC_STAT_RESET); + spx5_wr(ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(1), sparx5, ASM_STAT_CFG); + + /* Enable switch-core and queue system */ + spx5_wr(HSCH_RESET_CFG_CORE_ENA_SET(1), sparx5, HSCH_RESET_CFG); + + return 0; +} + +static int sparx5_init_coreclock(struct sparx5 *sparx5) +{ + enum sparx5_core_clockfreq freq = sparx5->coreclock; + u32 clk_div, clk_period, pol_upd_int, idx; + + /* Verify if core clock frequency is supported on target. + * If 'VTSS_CORE_CLOCK_DEFAULT' then the highest supported + * freq. is used + */ + switch (sparx5->target_ct) { + case SPX5_TARGET_CT_7546: + if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT) + freq = SPX5_CORE_CLOCK_250MHZ; + else if (sparx5->coreclock != SPX5_CORE_CLOCK_250MHZ) + freq = 0; /* Not supported */ + break; + case SPX5_TARGET_CT_7549: + case SPX5_TARGET_CT_7552: + case SPX5_TARGET_CT_7556: + if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT) + freq = SPX5_CORE_CLOCK_500MHZ; + else if (sparx5->coreclock != SPX5_CORE_CLOCK_500MHZ) + freq = 0; /* Not supported */ + break; + case SPX5_TARGET_CT_7558: + case SPX5_TARGET_CT_7558TSN: + if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT) + freq = SPX5_CORE_CLOCK_625MHZ; + else if (sparx5->coreclock != SPX5_CORE_CLOCK_625MHZ) + freq = 0; /* Not supported */ + break; + case SPX5_TARGET_CT_7546TSN: + if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT) + freq = SPX5_CORE_CLOCK_625MHZ; + break; + case SPX5_TARGET_CT_7549TSN: + case SPX5_TARGET_CT_7552TSN: + case SPX5_TARGET_CT_7556TSN: + if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT) + freq = SPX5_CORE_CLOCK_625MHZ; + else if (sparx5->coreclock == SPX5_CORE_CLOCK_250MHZ) + freq = 0; /* Not supported */ + break; + default: + dev_err(sparx5->dev, "Target (%#04x) not supported\n", + sparx5->target_ct); + return -ENODEV; + } + + switch (freq) { + case SPX5_CORE_CLOCK_250MHZ: + clk_div = 10; + pol_upd_int = 312; + break; + case SPX5_CORE_CLOCK_500MHZ: + clk_div = 5; + pol_upd_int = 624; + break; + case SPX5_CORE_CLOCK_625MHZ: + clk_div = 4; + pol_upd_int = 780; + break; + default: + dev_err(sparx5->dev, "%d coreclock not supported on (%#04x)\n", + sparx5->coreclock, sparx5->target_ct); + return -EINVAL; + } + + /* Update state with chosen frequency */ + sparx5->coreclock = freq; + + /* Configure the LCPLL */ + spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1), + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, + sparx5, + CLKGEN_LCPLL1_CORE_CLK_CFG); + + clk_period = sparx5_clk_period(freq); + + spx5_rmw(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(clk_period / 100), + HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, + sparx5, + HSCH_SYS_CLK_PER); + + spx5_rmw(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100), + ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, + sparx5, + ANA_AC_POL_BDLB_DLB_CTRL); + + spx5_rmw(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100), + ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, + sparx5, + ANA_AC_POL_SLB_DLB_CTRL); + + spx5_rmw(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(clk_period / 100), + LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, + sparx5, + LRN_AUTOAGE_CFG_1); + + for (idx = 0; idx < 3; idx++) + spx5_rmw(GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(clk_period / 100), + GCB_SIO_CLOCK_SYS_CLK_PERIOD, + sparx5, + GCB_SIO_CLOCK(idx)); + + spx5_rmw(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET + ((256 * 1000) / clk_period), + HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, + sparx5, + HSCH_TAS_STATEMACHINE_CFG); + + spx5_rmw(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(pol_upd_int), + ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, + sparx5, + ANA_AC_POL_POL_UPD_INT_CFG); + + return 0; +} + +static int sparx5_qlim_set(struct sparx5 *sparx5) +{ + u32 res, dp, prio; + + for (res = 0; res < 2; res++) { + for (prio = 0; prio < 8; prio++) + spx5_wr(0xFFF, sparx5, + QRES_RES_CFG(prio + 630 + res * 1024)); + + for (dp = 0; dp < 4; dp++) + spx5_wr(0xFFF, sparx5, + QRES_RES_CFG(dp + 638 + res * 1024)); + } + + /* Set 80,90,95,100% of memory size for top watermarks */ + spx5_wr(QLIM_WM(80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0)); + spx5_wr(QLIM_WM(90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0)); + spx5_wr(QLIM_WM(95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0)); + spx5_wr(QLIM_WM(100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0)); + + return 0; +} + +/* Some boards needs to map the SGPIO for signal detect explicitly to the + * port module + */ +static void sparx5_board_init(struct sparx5 *sparx5) +{ + int idx; + + if (!sparx5->sd_sgpio_remapping) + return; + + /* Enable SGPIO Signal Detect remapping */ + spx5_rmw(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, + GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, + sparx5, + GCB_HW_SGPIO_SD_CFG); + + /* Refer to LOS SGPIO */ + for (idx = 0; idx < SPX5_PORTS; idx++) + if (sparx5->ports[idx]) + if (sparx5->ports[idx]->conf.sd_sgpio != ~0) + spx5_wr(sparx5->ports[idx]->conf.sd_sgpio, + sparx5, + GCB_HW_SGPIO_TO_SD_MAP_CFG(idx)); +} + +static int sparx5_start(struct sparx5 *sparx5) +{ + u8 broadcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + char queue_name[32]; + u32 idx; + int err; + + /* Setup own UPSIDs */ + for (idx = 0; idx < 3; idx++) { + spx5_wr(idx, sparx5, ANA_AC_OWN_UPSID(idx)); + spx5_wr(idx, sparx5, ANA_CL_OWN_UPSID(idx)); + spx5_wr(idx, sparx5, ANA_L2_OWN_UPSID(idx)); + spx5_wr(idx, sparx5, REW_OWN_UPSID(idx)); + } + + /* Enable CPU ports */ + for (idx = SPX5_PORTS; idx < SPX5_PORTS_ALL; idx++) + spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1), + QFWD_SWITCH_PORT_MODE_PORT_ENA, + sparx5, + QFWD_SWITCH_PORT_MODE(idx)); + + /* Init masks */ + sparx5_update_fwd(sparx5); + + /* CPU copy CPU pgids */ + spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), + sparx5, ANA_AC_PGID_MISC_CFG(PGID_CPU)); + spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), + sparx5, ANA_AC_PGID_MISC_CFG(PGID_BCAST)); + + /* Recalc injected frame FCS */ + for (idx = SPX5_PORT_CPU_0; idx <= SPX5_PORT_CPU_1; idx++) + spx5_rmw(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(1), + ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, + sparx5, ANA_CL_FILTER_CTRL(idx)); + + /* Init MAC table, ageing */ + sparx5_mact_init(sparx5); + + /* Init PGID table arbitrator */ + sparx5_pgid_init(sparx5); + + /* Setup VLANs */ + sparx5_vlan_init(sparx5); + + /* Add host mode BC address (points only to CPU) */ + sparx5_mact_learn(sparx5, PGID_CPU, broadcast, NULL_VID); + + /* Enable queue limitation watermarks */ + sparx5_qlim_set(sparx5); + + err = sparx5_config_auto_calendar(sparx5); + if (err) + return err; + + err = sparx5_config_dsm_calendar(sparx5); + if (err) + return err; + + /* Init stats */ + err = sparx_stats_init(sparx5); + if (err) + return err; + + /* Init mact_sw struct */ + mutex_init(&sparx5->mact_lock); + INIT_LIST_HEAD(&sparx5->mact_entries); + snprintf(queue_name, sizeof(queue_name), "%s-mact", + dev_name(sparx5->dev)); + sparx5->mact_queue = create_singlethread_workqueue(queue_name); + if (!sparx5->mact_queue) + return -ENOMEM; + + INIT_DELAYED_WORK(&sparx5->mact_work, sparx5_mact_pull_work); + queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work, + SPX5_MACT_PULL_DELAY); + + mutex_init(&sparx5->mdb_lock); + INIT_LIST_HEAD(&sparx5->mdb_entries); + + err = sparx5_register_netdevs(sparx5); + if (err) + return err; + + sparx5_board_init(sparx5); + err = sparx5_register_notifier_blocks(sparx5); + + /* Start Frame DMA with fallback to register based INJ/XTR */ + err = -ENXIO; + if (sparx5->fdma_irq >= 0) { + if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0) + err = devm_request_threaded_irq(sparx5->dev, + sparx5->fdma_irq, + NULL, + sparx5_fdma_handler, + IRQF_ONESHOT, + "sparx5-fdma", sparx5); + if (!err) + err = sparx5_fdma_start(sparx5); + if (err) + sparx5->fdma_irq = -ENXIO; + } else { + sparx5->fdma_irq = -ENXIO; + } + if (err && sparx5->xtr_irq >= 0) { + err = devm_request_irq(sparx5->dev, sparx5->xtr_irq, + sparx5_xtr_handler, IRQF_SHARED, + "sparx5-xtr", sparx5); + if (!err) + err = sparx5_manual_injection_mode(sparx5); + if (err) + sparx5->xtr_irq = -ENXIO; + } else { + sparx5->xtr_irq = -ENXIO; + } + + if (sparx5->ptp_irq >= 0) { + err = devm_request_threaded_irq(sparx5->dev, sparx5->ptp_irq, + NULL, sparx5_ptp_irq_handler, + IRQF_ONESHOT, "sparx5-ptp", + sparx5); + if (err) + sparx5->ptp_irq = -ENXIO; + + sparx5->ptp = 1; + } + + return err; +} + +static void sparx5_cleanup_ports(struct sparx5 *sparx5) +{ + sparx5_unregister_netdevs(sparx5); + sparx5_destroy_netdevs(sparx5); +} + +static int mchp_sparx5_probe(struct platform_device *pdev) +{ + struct initial_port_config *configs, *config; + struct device_node *np = pdev->dev.of_node; + struct device_node *ports, *portnp; + struct reset_control *reset; + struct sparx5 *sparx5; + int idx = 0, err = 0; + + if (!np && !pdev->dev.platform_data) + return -ENODEV; + + sparx5 = devm_kzalloc(&pdev->dev, sizeof(*sparx5), GFP_KERNEL); + if (!sparx5) + return -ENOMEM; + + platform_set_drvdata(pdev, sparx5); + sparx5->pdev = pdev; + sparx5->dev = &pdev->dev; + + /* Do switch core reset if available */ + reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch"); + if (IS_ERR(reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(reset), + "Failed to get switch reset controller.\n"); + reset_control_reset(reset); + + /* Default values, some from DT */ + sparx5->coreclock = SPX5_CORE_CLOCK_DEFAULT; + + ports = of_get_child_by_name(np, "ethernet-ports"); + if (!ports) { + dev_err(sparx5->dev, "no ethernet-ports child node found\n"); + return -ENODEV; + } + sparx5->port_count = of_get_child_count(ports); + + configs = kcalloc(sparx5->port_count, + sizeof(struct initial_port_config), GFP_KERNEL); + if (!configs) { + err = -ENOMEM; + goto cleanup_pnode; + } + + for_each_available_child_of_node(ports, portnp) { + struct sparx5_port_config *conf; + struct phy *serdes; + u32 portno; + + err = of_property_read_u32(portnp, "reg", &portno); + if (err) { + dev_err(sparx5->dev, "port reg property error\n"); + continue; + } + config = &configs[idx]; + conf = &config->conf; + conf->speed = SPEED_UNKNOWN; + conf->bandwidth = SPEED_UNKNOWN; + err = of_get_phy_mode(portnp, &conf->phy_mode); + if (err) { + dev_err(sparx5->dev, "port %u: missing phy-mode\n", + portno); + continue; + } + err = of_property_read_u32(portnp, "microchip,bandwidth", + &conf->bandwidth); + if (err) { + dev_err(sparx5->dev, "port %u: missing bandwidth\n", + portno); + continue; + } + err = of_property_read_u32(portnp, "microchip,sd-sgpio", &conf->sd_sgpio); + if (err) + conf->sd_sgpio = ~0; + else + sparx5->sd_sgpio_remapping = true; + serdes = devm_of_phy_get(sparx5->dev, portnp, NULL); + if (IS_ERR(serdes)) { + err = dev_err_probe(sparx5->dev, PTR_ERR(serdes), + "port %u: missing serdes\n", + portno); + of_node_put(portnp); + goto cleanup_config; + } + config->portno = portno; + config->node = portnp; + config->serdes = serdes; + + conf->media = PHY_MEDIA_DAC; + conf->serdes_reset = true; + conf->portmode = conf->phy_mode; + conf->power_down = true; + idx++; + } + + err = sparx5_create_targets(sparx5); + if (err) + goto cleanup_config; + + if (of_get_mac_address(np, sparx5->base_mac)) { + dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n"); + eth_random_addr(sparx5->base_mac); + sparx5->base_mac[5] = 0; + } + + sparx5->fdma_irq = platform_get_irq_byname(sparx5->pdev, "fdma"); + sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr"); + sparx5->ptp_irq = platform_get_irq_byname(sparx5->pdev, "ptp"); + + /* Read chip ID to check CPU interface */ + sparx5->chip_id = spx5_rd(sparx5, GCB_CHIP_ID); + + sparx5->target_ct = (enum spx5_target_chiptype) + GCB_CHIP_ID_PART_ID_GET(sparx5->chip_id); + + /* Initialize Switchcore and internal RAMs */ + err = sparx5_init_switchcore(sparx5); + if (err) { + dev_err(sparx5->dev, "Switchcore initialization error\n"); + goto cleanup_config; + } + + /* Initialize the LC-PLL (core clock) and set affected registers */ + err = sparx5_init_coreclock(sparx5); + if (err) { + dev_err(sparx5->dev, "LC-PLL initialization error\n"); + goto cleanup_config; + } + + for (idx = 0; idx < sparx5->port_count; ++idx) { + config = &configs[idx]; + if (!config->node) + continue; + + err = sparx5_create_port(sparx5, config); + if (err) { + dev_err(sparx5->dev, "port create error\n"); + goto cleanup_ports; + } + } + + err = sparx5_start(sparx5); + if (err) { + dev_err(sparx5->dev, "Start failed\n"); + goto cleanup_ports; + } + + err = sparx5_qos_init(sparx5); + if (err) { + dev_err(sparx5->dev, "Failed to initialize QoS\n"); + goto cleanup_ports; + } + + err = sparx5_ptp_init(sparx5); + if (err) { + dev_err(sparx5->dev, "PTP failed\n"); + goto cleanup_ports; + } + goto cleanup_config; + +cleanup_ports: + sparx5_cleanup_ports(sparx5); + if (sparx5->mact_queue) + destroy_workqueue(sparx5->mact_queue); +cleanup_config: + kfree(configs); +cleanup_pnode: + of_node_put(ports); + return err; +} + +static int mchp_sparx5_remove(struct platform_device *pdev) +{ + struct sparx5 *sparx5 = platform_get_drvdata(pdev); + + if (sparx5->xtr_irq) { + disable_irq(sparx5->xtr_irq); + sparx5->xtr_irq = -ENXIO; + } + if (sparx5->fdma_irq) { + disable_irq(sparx5->fdma_irq); + sparx5->fdma_irq = -ENXIO; + } + sparx5_ptp_deinit(sparx5); + sparx5_fdma_stop(sparx5); + sparx5_cleanup_ports(sparx5); + /* Unregister netdevs */ + sparx5_unregister_notifier_blocks(sparx5); + destroy_workqueue(sparx5->mact_queue); + + return 0; +} + +static const struct of_device_id mchp_sparx5_match[] = { + { .compatible = "microchip,sparx5-switch" }, + { } +}; +MODULE_DEVICE_TABLE(of, mchp_sparx5_match); + +static struct platform_driver mchp_sparx5_driver = { + .probe = mchp_sparx5_probe, + .remove = mchp_sparx5_remove, + .driver = { + .name = "sparx5-switch", + .of_match_table = mchp_sparx5_match, + }, +}; + +module_platform_driver(mchp_sparx5_driver); + +MODULE_DESCRIPTION("Microchip Sparx5 switch driver"); +MODULE_AUTHOR("Steen Hegelund <steen.hegelund@microchip.com>"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h new file mode 100644 index 000000000..7a83222ca --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -0,0 +1,546 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#ifndef __SPARX5_MAIN_H__ +#define __SPARX5_MAIN_H__ + +#include <linux/types.h> +#include <linux/phy/phy.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/if_vlan.h> +#include <linux/bitmap.h> +#include <linux/phylink.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/hrtimer.h> + +#include "sparx5_main_regs.h" + +/* Target chip type */ +enum spx5_target_chiptype { + SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */ + SPX5_TARGET_CT_7549 = 0x7549, /* SparX-5-90 Enterprise */ + SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */ + SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */ + SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */ + SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */ + SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */ + SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */ + SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */ + SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */ +}; + +enum sparx5_port_max_tags { + SPX5_PORT_MAX_TAGS_NONE, /* No extra tags allowed */ + SPX5_PORT_MAX_TAGS_ONE, /* Single tag allowed */ + SPX5_PORT_MAX_TAGS_TWO /* Single and double tag allowed */ +}; + +enum sparx5_vlan_port_type { + SPX5_VLAN_PORT_TYPE_UNAWARE, /* VLAN unaware port */ + SPX5_VLAN_PORT_TYPE_C, /* C-port */ + SPX5_VLAN_PORT_TYPE_S, /* S-port */ + SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */ +}; + +#define SPX5_PORTS 65 +#define SPX5_PORT_CPU (SPX5_PORTS) /* Next port is CPU port */ +#define SPX5_PORT_CPU_0 (SPX5_PORT_CPU + 0) /* CPU Port 65 */ +#define SPX5_PORT_CPU_1 (SPX5_PORT_CPU + 1) /* CPU Port 66 */ +#define SPX5_PORT_VD0 (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */ +#define SPX5_PORT_VD1 (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */ +#define SPX5_PORT_VD2 (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/ +#define SPX5_PORTS_ALL (SPX5_PORT_CPU + 5) /* Total number of ports */ + +#define PGID_BASE SPX5_PORTS /* Starts after port PGIDs */ +#define PGID_UC_FLOOD (PGID_BASE + 0) +#define PGID_MC_FLOOD (PGID_BASE + 1) +#define PGID_IPV4_MC_DATA (PGID_BASE + 2) +#define PGID_IPV4_MC_CTRL (PGID_BASE + 3) +#define PGID_IPV6_MC_DATA (PGID_BASE + 4) +#define PGID_IPV6_MC_CTRL (PGID_BASE + 5) +#define PGID_BCAST (PGID_BASE + 6) +#define PGID_CPU (PGID_BASE + 7) +#define PGID_MCAST_START (PGID_BASE + 8) + +#define PGID_TABLE_SIZE 3290 + +#define IFH_LEN 9 /* 36 bytes */ +#define NULL_VID 0 +#define SPX5_MACT_PULL_DELAY (2 * HZ) +#define SPX5_STATS_CHECK_DELAY (1 * HZ) +#define SPX5_PRIOS 8 /* Number of priority queues */ +#define SPX5_BUFFER_CELL_SZ 184 /* Cell size */ +#define SPX5_BUFFER_MEMORY 4194280 /* 22795 words * 184 bytes */ + +#define XTR_QUEUE 0 +#define INJ_QUEUE 0 + +#define FDMA_DCB_MAX 64 +#define FDMA_RX_DCB_MAX_DBS 15 +#define FDMA_TX_DCB_MAX_DBS 1 + +#define SPARX5_PHC_COUNT 3 +#define SPARX5_PHC_PORT 0 + +#define IFH_REW_OP_NOOP 0x0 +#define IFH_REW_OP_ONE_STEP_PTP 0x3 +#define IFH_REW_OP_TWO_STEP_PTP 0x4 + +#define IFH_PDU_TYPE_NONE 0x0 +#define IFH_PDU_TYPE_PTP 0x5 +#define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6 +#define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7 + +struct sparx5; + +struct sparx5_db_hw { + u64 dataptr; + u64 status; +}; + +struct sparx5_rx_dcb_hw { + u64 nextptr; + u64 info; + struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS]; +}; + +struct sparx5_tx_dcb_hw { + u64 nextptr; + u64 info; + struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS]; +}; + +/* Frame DMA receive state: + * For each DB, there is a SKB, and the skb data pointer is mapped in + * the DB. Once a frame is received the skb is given to the upper layers + * and a new skb is added to the dcb. + * When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused. + */ +struct sparx5_rx { + struct sparx5_rx_dcb_hw *dcb_entries; + struct sparx5_rx_dcb_hw *last_entry; + struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + int db_index; + int dcb_index; + dma_addr_t dma; + struct napi_struct napi; + u32 channel_id; + struct net_device *ndev; + u64 packets; +}; + +/* Frame DMA transmit state: + * DCBs are chained using the DCBs nextptr field. + */ +struct sparx5_tx { + struct sparx5_tx_dcb_hw *curr_entry; + struct sparx5_tx_dcb_hw *first_entry; + struct list_head db_list; + dma_addr_t dma; + u32 channel_id; + u64 packets; + u64 dropped; +}; + +struct sparx5_port_config { + phy_interface_t portmode; + u32 bandwidth; + int speed; + int duplex; + enum phy_media media; + bool inband; + bool power_down; + bool autoneg; + bool serdes_reset; + u32 pause; + u32 pause_adv; + phy_interface_t phy_mode; + u32 sd_sgpio; +}; + +struct sparx5_port { + struct net_device *ndev; + struct sparx5 *sparx5; + struct device_node *of_node; + struct phy *serdes; + struct sparx5_port_config conf; + struct phylink_config phylink_config; + struct phylink *phylink; + struct phylink_pcs phylink_pcs; + u16 portno; + /* Ingress default VLAN (pvid) */ + u16 pvid; + /* Egress default VLAN (vid) */ + u16 vid; + bool signd_internal; + bool signd_active_high; + bool signd_enable; + bool flow_control; + enum sparx5_port_max_tags max_vlan_tags; + enum sparx5_vlan_port_type vlan_type; + u32 custom_etype; + bool vlan_aware; + struct hrtimer inj_timer; + /* ptp */ + u8 ptp_cmd; + u16 ts_id; + struct sk_buff_head tx_skbs; + bool is_mrouter; +}; + +enum sparx5_core_clockfreq { + SPX5_CORE_CLOCK_DEFAULT, /* Defaults to the highest supported frequency */ + SPX5_CORE_CLOCK_250MHZ, /* 250MHZ core clock frequency */ + SPX5_CORE_CLOCK_500MHZ, /* 500MHZ core clock frequency */ + SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */ +}; + +struct sparx5_phc { + struct ptp_clock *clock; + struct ptp_clock_info info; + struct hwtstamp_config hwtstamp_config; + struct sparx5 *sparx5; + u8 index; +}; + +struct sparx5_skb_cb { + u8 rew_op; + u8 pdu_type; + u8 pdu_w16_offset; + u16 ts_id; + unsigned long jiffies; +}; + +struct sparx5_mdb_entry { + struct list_head list; + DECLARE_BITMAP(port_mask, SPX5_PORTS); + unsigned char addr[ETH_ALEN]; + bool cpu_copy; + u16 vid; + u16 pgid_idx; +}; + +#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10) +#define SPARX5_SKB_CB(skb) \ + ((struct sparx5_skb_cb *)((skb)->cb)) + +struct sparx5 { + struct platform_device *pdev; + struct device *dev; + u32 chip_id; + enum spx5_target_chiptype target_ct; + void __iomem *regs[NUM_TARGETS]; + int port_count; + struct mutex lock; /* MAC reg lock */ + /* port structures are in net device */ + struct sparx5_port *ports[SPX5_PORTS]; + enum sparx5_core_clockfreq coreclock; + /* Statistics */ + u32 num_stats; + u32 num_ethtool_stats; + const char * const *stats_layout; + u64 *stats; + /* Workqueue for reading stats */ + struct mutex queue_stats_lock; + struct delayed_work stats_work; + struct workqueue_struct *stats_queue; + /* Notifiers */ + struct notifier_block netdevice_nb; + struct notifier_block switchdev_nb; + struct notifier_block switchdev_blocking_nb; + /* Switch state */ + u8 base_mac[ETH_ALEN]; + /* Associated bridge device (when bridged) */ + struct net_device *hw_bridge_dev; + /* Bridged interfaces */ + DECLARE_BITMAP(bridge_mask, SPX5_PORTS); + DECLARE_BITMAP(bridge_fwd_mask, SPX5_PORTS); + DECLARE_BITMAP(bridge_lrn_mask, SPX5_PORTS); + DECLARE_BITMAP(vlan_mask[VLAN_N_VID], SPX5_PORTS); + /* SW MAC table */ + struct list_head mact_entries; + /* mac table list (mact_entries) mutex */ + struct mutex mact_lock; + /* SW MDB table */ + struct list_head mdb_entries; + /* mdb list mutex */ + struct mutex mdb_lock; + struct delayed_work mact_work; + struct workqueue_struct *mact_queue; + /* Board specifics */ + bool sd_sgpio_remapping; + /* Register based inj/xtr */ + int xtr_irq; + /* Frame DMA */ + int fdma_irq; + struct sparx5_rx rx; + struct sparx5_tx tx; + /* PTP */ + bool ptp; + struct sparx5_phc phc[SPARX5_PHC_COUNT]; + spinlock_t ptp_clock_lock; /* lock for phc */ + spinlock_t ptp_ts_id_lock; /* lock for ts_id */ + struct mutex ptp_lock; /* lock for ptp interface state */ + u16 ptp_skbs; + int ptp_irq; + /* PGID allocation map */ + u8 pgid_map[PGID_TABLE_SIZE]; +}; + +/* sparx5_switchdev.c */ +int sparx5_register_notifier_blocks(struct sparx5 *sparx5); +void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5); + +/* sparx5_packet.c */ +struct frame_info { + int src_port; + u32 timestamp; +}; + +void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp); +void sparx5_ifh_parse(u32 *ifh, struct frame_info *info); +irqreturn_t sparx5_xtr_handler(int irq, void *_priv); +netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev); +int sparx5_manual_injection_mode(struct sparx5 *sparx5); +void sparx5_port_inj_timer_setup(struct sparx5_port *port); + +/* sparx5_fdma.c */ +int sparx5_fdma_start(struct sparx5 *sparx5); +int sparx5_fdma_stop(struct sparx5 *sparx5); +int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb); +irqreturn_t sparx5_fdma_handler(int irq, void *args); + +/* sparx5_mactable.c */ +void sparx5_mact_pull_work(struct work_struct *work); +int sparx5_mact_learn(struct sparx5 *sparx5, int port, + const unsigned char mac[ETH_ALEN], u16 vid); +bool sparx5_mact_getnext(struct sparx5 *sparx5, + unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2); +int sparx5_mact_find(struct sparx5 *sparx5, + const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2); +int sparx5_mact_forget(struct sparx5 *sparx5, + const unsigned char mac[ETH_ALEN], u16 vid); +int sparx5_add_mact_entry(struct sparx5 *sparx5, + struct net_device *dev, + u16 portno, + const unsigned char *addr, u16 vid); +int sparx5_del_mact_entry(struct sparx5 *sparx5, + const unsigned char *addr, + u16 vid); +int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr); +int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr); +void sparx5_set_ageing(struct sparx5 *sparx5, int msecs); +void sparx5_mact_init(struct sparx5 *sparx5); + +/* sparx5_vlan.c */ +void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable); +void sparx5_pgid_clear(struct sparx5 *spx5, int pgid); +void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]); +void sparx5_update_fwd(struct sparx5 *sparx5); +void sparx5_vlan_init(struct sparx5 *sparx5); +void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno); +int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, + bool untagged); +int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid); +void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port); + +/* sparx5_calendar.c */ +int sparx5_config_auto_calendar(struct sparx5 *sparx5); +int sparx5_config_dsm_calendar(struct sparx5 *sparx5); + +/* sparx5_ethtool.c */ +void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats); +int sparx_stats_init(struct sparx5 *sparx5); + +/* sparx5_netdev.c */ +void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp); +void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op); +void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type); +void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset); +void sparx5_set_port_ifh(void *ifh_hdr, u16 portno); +bool sparx5_netdevice_check(const struct net_device *dev); +struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno); +int sparx5_register_netdevs(struct sparx5 *sparx5); +void sparx5_destroy_netdevs(struct sparx5 *sparx5); +void sparx5_unregister_netdevs(struct sparx5 *sparx5); + +/* sparx5_ptp.c */ +int sparx5_ptp_init(struct sparx5 *sparx5); +void sparx5_ptp_deinit(struct sparx5 *sparx5); +int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr); +int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr); +void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb, + u64 timestamp); +int sparx5_ptp_txtstamp_request(struct sparx5_port *port, + struct sk_buff *skb); +void sparx5_ptp_txtstamp_release(struct sparx5_port *port, + struct sk_buff *skb); +irqreturn_t sparx5_ptp_irq_handler(int irq, void *args); + +/* sparx5_pgid.c */ +enum sparx5_pgid_type { + SPX5_PGID_FREE, + SPX5_PGID_RESERVED, + SPX5_PGID_MULTICAST, +}; + +void sparx5_pgid_init(struct sparx5 *spx5); +int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx); +int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx); +int sparx5_pgid_free(struct sparx5 *spx5, u16 idx); + +/* Clock period in picoseconds */ +static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock) +{ + switch (cclock) { + case SPX5_CORE_CLOCK_250MHZ: + return 4000; + case SPX5_CORE_CLOCK_500MHZ: + return 2000; + case SPX5_CORE_CLOCK_625MHZ: + default: + return 1600; + } +} + +static inline bool sparx5_is_baser(phy_interface_t interface) +{ + return interface == PHY_INTERFACE_MODE_5GBASER || + interface == PHY_INTERFACE_MODE_10GBASER || + interface == PHY_INTERFACE_MODE_25GBASER; +} + +extern const struct phylink_mac_ops sparx5_phylink_mac_ops; +extern const struct phylink_pcs_ops sparx5_phylink_pcs_ops; +extern const struct ethtool_ops sparx5_ethtool_ops; + +/* Calculate raw offset */ +static inline __pure int spx5_offset(int id, int tinst, int tcnt, + int gbase, int ginst, + int gcnt, int gwidth, + int raddr, int rinst, + int rcnt, int rwidth) +{ + WARN_ON((tinst) >= tcnt); + WARN_ON((ginst) >= gcnt); + WARN_ON((rinst) >= rcnt); + return gbase + ((ginst) * gwidth) + + raddr + ((rinst) * rwidth); +} + +/* Read, Write and modify registers content. + * The register definition macros start at the id + */ +static inline void __iomem *spx5_addr(void __iomem *base[], + int id, int tinst, int tcnt, + int gbase, int ginst, + int gcnt, int gwidth, + int raddr, int rinst, + int rcnt, int rwidth) +{ + WARN_ON((tinst) >= tcnt); + WARN_ON((ginst) >= gcnt); + WARN_ON((rinst) >= rcnt); + return base[id + (tinst)] + + gbase + ((ginst) * gwidth) + + raddr + ((rinst) * rwidth); +} + +static inline void __iomem *spx5_inst_addr(void __iomem *base, + int gbase, int ginst, + int gcnt, int gwidth, + int raddr, int rinst, + int rcnt, int rwidth) +{ + WARN_ON((ginst) >= gcnt); + WARN_ON((rinst) >= rcnt); + return base + + gbase + ((ginst) * gwidth) + + raddr + ((rinst) * rwidth); +} + +static inline u32 spx5_rd(struct sparx5 *sparx5, int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + return readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +static inline u32 spx5_inst_rd(void __iomem *iomem, int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + return readl(spx5_inst_addr(iomem, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +static inline void spx5_wr(u32 val, struct sparx5 *sparx5, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + writel(val, spx5_addr(sparx5->regs, id, tinst, tcnt, + gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth)); +} + +static inline void spx5_inst_wr(u32 val, void __iomem *iomem, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + writel(val, spx5_inst_addr(iomem, + gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth)); +} + +static inline void spx5_rmw(u32 val, u32 mask, struct sparx5 *sparx5, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + u32 nval; + + nval = readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); + nval = (nval & ~mask) | (val & mask); + writel(nval, spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +static inline void spx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + u32 nval; + + nval = readl(spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr, + rinst, rcnt, rwidth)); + nval = (nval & ~mask) | (val & mask); + writel(nval, spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr, + rinst, rcnt, rwidth)); +} + +static inline void __iomem *spx5_inst_get(struct sparx5 *sparx5, int id, int tinst) +{ + return sparx5->regs[id + tinst]; +} + +static inline void __iomem *spx5_reg_get(struct sparx5 *sparx5, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + return spx5_addr(sparx5->regs, id, tinst, tcnt, + gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth); +} + +#endif /* __SPARX5_MAIN_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h new file mode 100644 index 000000000..fa2eb70f4 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h @@ -0,0 +1,5138 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. + */ + +/* This file is autogenerated by cml-utils 2022-02-26 14:15:01 +0100. + * Commit ID: 98bdd3d171cc2a1afd30d241d41a4281d471a48c (dirty) + */ + +#ifndef _SPARX5_MAIN_REGS_H_ +#define _SPARX5_MAIN_REGS_H_ + +#include <linux/bitfield.h> +#include <linux/types.h> +#include <linux/bug.h> + +enum sparx5_target { + TARGET_ANA_AC = 1, + TARGET_ANA_ACL = 2, + TARGET_ANA_AC_POL = 4, + TARGET_ANA_CL = 6, + TARGET_ANA_L2 = 7, + TARGET_ANA_L3 = 8, + TARGET_ASM = 9, + TARGET_CLKGEN = 11, + TARGET_CPU = 12, + TARGET_DEV10G = 17, + TARGET_DEV25G = 29, + TARGET_DEV2G5 = 37, + TARGET_DEV5G = 102, + TARGET_DSM = 115, + TARGET_EACL = 116, + TARGET_FDMA = 117, + TARGET_GCB = 118, + TARGET_HSCH = 119, + TARGET_LRN = 122, + TARGET_PCEP = 129, + TARGET_PCS10G_BR = 132, + TARGET_PCS25G_BR = 144, + TARGET_PCS5G_BR = 160, + TARGET_PORT_CONF = 173, + TARGET_PTP = 174, + TARGET_QFWD = 175, + TARGET_QRES = 176, + TARGET_QS = 177, + TARGET_QSYS = 178, + TARGET_REW = 179, + TARGET_VCAP_SUPER = 326, + TARGET_VOP = 327, + TARGET_XQS = 331, + NUM_TARGETS = 332 +}; + +#define __REG(...) __VA_ARGS__ + +/* ANA_AC:RAM_CTRL:RAM_INIT */ +#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC, 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4) + +#define ANA_AC_RAM_INIT_RAM_INIT BIT(1) +#define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(ANA_AC_RAM_INIT_RAM_INIT, x) +#define ANA_AC_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(ANA_AC_RAM_INIT_RAM_INIT, x) + +#define ANA_AC_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x) +#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x) + +/* ANA_AC:PS_COMMON:OWN_UPSID */ +#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC, 0, 1, 894472, 0, 1, 352, 52, r, 3, 4) + +#define ANA_AC_OWN_UPSID_OWN_UPSID GENMASK(4, 0) +#define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\ + FIELD_PREP(ANA_AC_OWN_UPSID_OWN_UPSID, x) +#define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\ + FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x) + +/* ANA_AC:SRC:SRC_CFG */ +#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 0, 0, 1, 4) + +/* ANA_AC:SRC:SRC_CFG1 */ +#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 4, 0, 1, 4) + +/* ANA_AC:SRC:SRC_CFG2 */ +#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 8, 0, 1, 4) + +#define ANA_AC_SRC_CFG2_PORT_MASK2 BIT(0) +#define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\ + FIELD_PREP(ANA_AC_SRC_CFG2_PORT_MASK2, x) +#define ANA_AC_SRC_CFG2_PORT_MASK2_GET(x)\ + FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x) + +/* ANA_AC:PGID:PGID_CFG */ +#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4) + +/* ANA_AC:PGID:PGID_CFG1 */ +#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4) + +/* ANA_AC:PGID:PGID_CFG2 */ +#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4) + +#define ANA_AC_PGID_CFG2_PORT_MASK2 BIT(0) +#define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\ + FIELD_PREP(ANA_AC_PGID_CFG2_PORT_MASK2, x) +#define ANA_AC_PGID_CFG2_PORT_MASK2_GET(x)\ + FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x) + +/* ANA_AC:PGID:PGID_MISC_CFG */ +#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4) + +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU GENMASK(6, 4) +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\ + FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x) +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_GET(x)\ + FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x) + +#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA BIT(1) +#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_SET(x)\ + FIELD_PREP(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x) +#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_GET(x)\ + FIELD_GET(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x) + +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA BIT(0) +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(x)\ + FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x) +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\ + FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x) + +/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */ +#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 0, r, 4, 4) + +#define ANA_AC_PORT_SGE_CFG_MASK GENMASK(15, 0) +#define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\ + FIELD_PREP(ANA_AC_PORT_SGE_CFG_MASK, x) +#define ANA_AC_PORT_SGE_CFG_MASK_GET(x)\ + FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x) + +/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */ +#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4) + +#define ANA_AC_STAT_RESET_RESET BIT(0) +#define ANA_AC_STAT_RESET_RESET_SET(x)\ + FIELD_PREP(ANA_AC_STAT_RESET_RESET, x) +#define ANA_AC_STAT_RESET_RESET_GET(x)\ + FIELD_GET(ANA_AC_STAT_RESET_RESET, x) + +/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */ +#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 4, r, 4, 4) + +#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK GENMASK(11, 4) +#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\ + FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x) +#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_GET(x)\ + FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x) + +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE GENMASK(3, 1) +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(x)\ + FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x) +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_GET(x)\ + FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x) + +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE BIT(0) +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(x)\ + FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x) +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_GET(x)\ + FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x) + +/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */ +#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 20, r, 4, 4) + +/* ANA_ACL:COMMON:OWN_UPSID */ +#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 580, r, 3, 4) + +#define ANA_ACL_OWN_UPSID_OWN_UPSID GENMASK(4, 0) +#define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\ + FIELD_PREP(ANA_ACL_OWN_UPSID_OWN_UPSID, x) +#define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\ + FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x) + +/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */ +#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL, 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4) + +#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT GENMASK(9, 0) +#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\ + FIELD_PREP(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x) +#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_GET(x)\ + FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x) + +/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */ +#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL, 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4) + +#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19) +#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\ + FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x) +#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\ + FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x) + +#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT GENMASK(18, 4) +#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\ + FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x) +#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\ + FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x) + +#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA BIT(1) +#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_SET(x)\ + FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x) +#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_GET(x)\ + FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x) + +#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA BIT(0) +#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\ + FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x) +#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\ + FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x) + +/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */ +#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL, 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4) + +#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19) +#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\ + FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x) +#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\ + FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x) + +#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT GENMASK(18, 4) +#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\ + FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x) +#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\ + FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x) + +#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA BIT(1) +#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_SET(x)\ + FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x) +#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_GET(x)\ + FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x) + +#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA BIT(0) +#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\ + FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x) +#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\ + FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x) + +/* ANA_CL:PORT:FILTER_CTRL */ +#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 4, 0, 1, 4) + +#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS BIT(2) +#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\ + FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x) +#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_GET(x)\ + FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x) + +#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS BIT(1) +#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_SET(x)\ + FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x) +#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_GET(x)\ + FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x) + +#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA BIT(0) +#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(x)\ + FIELD_PREP(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x) +#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_GET(x)\ + FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x) + +/* ANA_CL:PORT:VLAN_FILTER_CTRL */ +#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 8, r, 3, 4) + +#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10) +#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x) +#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x) + +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS BIT(9) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS BIT(8) +#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS BIT(7) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS BIT(6) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS BIT(5) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS BIT(4) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS BIT(3) +#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS BIT(2) +#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS BIT(1) +#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS BIT(0) +#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x) + +/* ANA_CL:PORT:ETAG_FILTER_CTRL */ +#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 20, 0, 1, 4) + +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1) +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\ + FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x) +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_GET(x)\ + FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x) + +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS BIT(0) +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x) +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x) + +/* ANA_CL:PORT:VLAN_CTRL */ +#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 32, 0, 1, 4) + +#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26) +#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x) +#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x) + +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP GENMASK(25, 23) +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x) +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x) + +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI BIT(22) +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x) +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x) + +#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA BIT(21) +#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x) +#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x) + +#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL BIT(20) +#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x) +#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x) + +#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA BIT(19) +#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x) +#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x) + +#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT GENMASK(18, 17) +#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x) +#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x) + +#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE BIT(16) +#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x) +#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x) + +#define ANA_CL_VLAN_CTRL_PORT_PCP GENMASK(15, 13) +#define ANA_CL_VLAN_CTRL_PORT_PCP_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_PCP, x) +#define ANA_CL_VLAN_CTRL_PORT_PCP_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_PCP, x) + +#define ANA_CL_VLAN_CTRL_PORT_DEI BIT(12) +#define ANA_CL_VLAN_CTRL_PORT_DEI_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_DEI, x) +#define ANA_CL_VLAN_CTRL_PORT_DEI_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_DEI, x) + +#define ANA_CL_VLAN_CTRL_PORT_VID GENMASK(11, 0) +#define ANA_CL_VLAN_CTRL_PORT_VID_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VID, x) +#define ANA_CL_VLAN_CTRL_PORT_VID_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x) + +/* ANA_CL:PORT:VLAN_CTRL_2 */ +#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 36, 0, 1, 4) + +#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT GENMASK(1, 0) +#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x) +#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x) + +/* ANA_CL:PORT:CAPTURE_BPDU_CFG */ +#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 196, 0, 1, 4) + +/* ANA_CL:COMMON:OWN_UPSID */ +#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 0, r, 3, 4) + +#define ANA_CL_OWN_UPSID_OWN_UPSID GENMASK(4, 0) +#define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\ + FIELD_PREP(ANA_CL_OWN_UPSID_OWN_UPSID, x) +#define ANA_CL_OWN_UPSID_OWN_UPSID_GET(x)\ + FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x) + +/* ANA_L2:COMMON:AUTO_LRN_CFG */ +#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4) + +/* ANA_L2:COMMON:AUTO_LRN_CFG1 */ +#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4) + +/* ANA_L2:COMMON:AUTO_LRN_CFG2 */ +#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4) + +#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2 BIT(0) +#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\ + FIELD_PREP(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x) +#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_GET(x)\ + FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x) + +/* ANA_L2:COMMON:OWN_UPSID */ +#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 672, r, 3, 4) + +#define ANA_L2_OWN_UPSID_OWN_UPSID GENMASK(4, 0) +#define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\ + FIELD_PREP(ANA_L2_OWN_UPSID_OWN_UPSID, x) +#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\ + FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x) + +/* ANA_L3:COMMON:VLAN_CTRL */ +#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3, 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4) + +#define ANA_L3_VLAN_CTRL_VLAN_ENA BIT(0) +#define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CTRL_VLAN_ENA, x) +#define ANA_L3_VLAN_CTRL_VLAN_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x) + +/* ANA_L3:VLAN:VLAN_CFG */ +#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 8, 0, 1, 4) + +#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR GENMASK(30, 24) +#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x) +#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x) + +#define ANA_L3_VLAN_CFG_VLAN_FID GENMASK(20, 8) +#define ANA_L3_VLAN_CFG_VLAN_FID_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FID, x) +#define ANA_L3_VLAN_CFG_VLAN_FID_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FID, x) + +#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA BIT(6) +#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x) +#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x) + +#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA BIT(5) +#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x) +#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x) + +#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS BIT(4) +#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x) +#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x) + +#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS BIT(3) +#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x) +#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x) + +#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA BIT(2) +#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x) +#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x) + +#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA BIT(1) +#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x) +#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x) + +#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA BIT(0) +#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x) +#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x) + +/* ANA_L3:VLAN:VLAN_MASK_CFG */ +#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 16, 0, 1, 4) + +/* ANA_L3:VLAN:VLAN_MASK_CFG1 */ +#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 20, 0, 1, 4) + +/* ANA_L3:VLAN:VLAN_MASK_CFG2 */ +#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 24, 0, 1, 4) + +#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2 BIT(0) +#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x) +#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_GET(x)\ + FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x) + +/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */ +#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 0, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */ +#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 4, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */ +#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 8, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */ +#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 12, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */ +#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 16, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */ +#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 20, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UC_CNT */ +#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 24, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_MC_CNT */ +#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 28, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_BC_CNT */ +#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 32, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */ +#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 36, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */ +#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 40, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */ +#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 44, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */ +#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 48, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 52, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */ +#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 56, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */ +#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 60, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */ +#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 64, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */ +#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 68, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */ +#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 72, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */ +#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 76, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */ +#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 80, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */ +#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 84, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */ +#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 88, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */ +#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 92, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */ +#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 96, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */ +#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 100, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */ +#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 104, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_UC_CNT */ +#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 108, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_MC_CNT */ +#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 112, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_BC_CNT */ +#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 116, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */ +#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 120, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */ +#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 124, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */ +#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 128, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */ +#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 132, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */ +#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 136, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */ +#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 140, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */ +#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 144, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */ +#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 148, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */ +#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 152, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */ +#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 156, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */ +#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 160, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */ +#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 164, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */ +#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 168, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */ +#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 172, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */ +#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 176, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */ +#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 180, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */ +#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 184, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */ +#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 188, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */ +#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 192, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */ +#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 196, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */ +#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 200, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */ +#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 204, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */ +#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 208, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */ +#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 212, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 216, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */ +#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 220, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */ +#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 224, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */ +#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 228, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */ +#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 232, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */ +#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 236, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */ +#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 240, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */ +#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 244, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */ +#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 248, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */ +#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 252, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */ +#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 256, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */ +#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 260, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */ +#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 264, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */ +#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 268, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */ +#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 272, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */ +#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 276, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */ +#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 280, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */ +#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 284, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */ +#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 288, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */ +#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 292, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */ +#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 296, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */ +#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 300, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */ +#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 304, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */ +#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 308, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */ +#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 312, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */ +#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 316, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */ +#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 320, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */ +#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 324, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */ +#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 328, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */ +#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 332, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */ +#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 336, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_DEFER_CNT */ +#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 340, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */ +#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 344, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */ +#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 348, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */ +#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 352, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */ +#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 356, 0, 1, 4) + +#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x) +#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */ +#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 360, 0, 1, 4) + +#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x) +#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */ +#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 364, 0, 1, 4) + +#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x) +#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */ +#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 368, 0, 1, 4) + +#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x) +#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */ +#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 372, 0, 1, 4) + +#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x) +#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */ +#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 376, 0, 1, 4) + +#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x) +#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */ +#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 380, 0, 1, 4) + +#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x) +#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */ +#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 384, 0, 1, 4) + +#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x) +#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */ +#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 388, 0, 1, 4) + +/* ASM:CFG:STAT_CFG */ +#define ASM_STAT_CFG __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4) + +#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT BIT(0) +#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\ + FIELD_PREP(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x) +#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_GET(x)\ + FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x) + +/* ASM:CFG:PORT_CFG */ +#define ASM_PORT_CFG(r) __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4) + +#define ASM_PORT_CFG_CSC_STAT_DIS BIT(12) +#define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_CSC_STAT_DIS, x) +#define ASM_PORT_CFG_CSC_STAT_DIS_GET(x)\ + FIELD_GET(ASM_PORT_CFG_CSC_STAT_DIS, x) + +#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA BIT(11) +#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x) +#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x) + +#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA BIT(10) +#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x) +#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x) + +#define ASM_PORT_CFG_NO_PREAMBLE_ENA BIT(9) +#define ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_NO_PREAMBLE_ENA, x) +#define ASM_PORT_CFG_NO_PREAMBLE_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_NO_PREAMBLE_ENA, x) + +#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA BIT(8) +#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x) +#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x) + +#define ASM_PORT_CFG_FRM_AGING_DIS BIT(7) +#define ASM_PORT_CFG_FRM_AGING_DIS_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_FRM_AGING_DIS, x) +#define ASM_PORT_CFG_FRM_AGING_DIS_GET(x)\ + FIELD_GET(ASM_PORT_CFG_FRM_AGING_DIS, x) + +#define ASM_PORT_CFG_PAD_ENA BIT(6) +#define ASM_PORT_CFG_PAD_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_PAD_ENA, x) +#define ASM_PORT_CFG_PAD_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_PAD_ENA, x) + +#define ASM_PORT_CFG_INJ_DISCARD_CFG GENMASK(5, 4) +#define ASM_PORT_CFG_INJ_DISCARD_CFG_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_INJ_DISCARD_CFG, x) +#define ASM_PORT_CFG_INJ_DISCARD_CFG_GET(x)\ + FIELD_GET(ASM_PORT_CFG_INJ_DISCARD_CFG, x) + +#define ASM_PORT_CFG_INJ_FORMAT_CFG GENMASK(3, 2) +#define ASM_PORT_CFG_INJ_FORMAT_CFG_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_INJ_FORMAT_CFG, x) +#define ASM_PORT_CFG_INJ_FORMAT_CFG_GET(x)\ + FIELD_GET(ASM_PORT_CFG_INJ_FORMAT_CFG, x) + +#define ASM_PORT_CFG_VSTAX2_AWR_ENA BIT(1) +#define ASM_PORT_CFG_VSTAX2_AWR_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_VSTAX2_AWR_ENA, x) +#define ASM_PORT_CFG_VSTAX2_AWR_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_VSTAX2_AWR_ENA, x) + +#define ASM_PORT_CFG_PFRM_FLUSH BIT(0) +#define ASM_PORT_CFG_PFRM_FLUSH_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_PFRM_FLUSH, x) +#define ASM_PORT_CFG_PFRM_FLUSH_GET(x)\ + FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x) + +/* ASM:RAM_CTRL:RAM_INIT */ +#define ASM_RAM_INIT __REG(TARGET_ASM, 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4) + +#define ASM_RAM_INIT_RAM_INIT BIT(1) +#define ASM_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(ASM_RAM_INIT_RAM_INIT, x) +#define ASM_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(ASM_RAM_INIT_RAM_INIT, x) + +#define ASM_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define ASM_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(ASM_RAM_INIT_RAM_CFG_HOOK, x) +#define ASM_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x) + +/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */ +#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN, 0, 1, 12, 0, 1, 36, 0, 0, 1, 4) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV GENMASK(7, 0) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV GENMASK(10, 8) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR BIT(11) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL GENMASK(13, 12) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA BIT(14) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA BIT(15) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x) + +/* CPU:CPU_REGS:PROC_CTRL */ +#define CPU_PROC_CTRL __REG(TARGET_CPU, 0, 1, 0, 0, 1, 204, 176, 0, 1, 4) + +#define CPU_PROC_CTRL_AARCH64_MODE_ENA BIT(12) +#define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_AARCH64_MODE_ENA, x) +#define CPU_PROC_CTRL_AARCH64_MODE_ENA_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_AARCH64_MODE_ENA, x) + +#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS BIT(11) +#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x) +#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x) + +#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS BIT(10) +#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x) +#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x) + +#define CPU_PROC_CTRL_BE_EXCEP_MODE BIT(9) +#define CPU_PROC_CTRL_BE_EXCEP_MODE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_BE_EXCEP_MODE, x) +#define CPU_PROC_CTRL_BE_EXCEP_MODE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_BE_EXCEP_MODE, x) + +#define CPU_PROC_CTRL_VINITHI BIT(8) +#define CPU_PROC_CTRL_VINITHI_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_VINITHI, x) +#define CPU_PROC_CTRL_VINITHI_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_VINITHI, x) + +#define CPU_PROC_CTRL_CFGTE BIT(7) +#define CPU_PROC_CTRL_CFGTE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_CFGTE, x) +#define CPU_PROC_CTRL_CFGTE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_CFGTE, x) + +#define CPU_PROC_CTRL_CP15S_DISABLE BIT(6) +#define CPU_PROC_CTRL_CP15S_DISABLE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_CP15S_DISABLE, x) +#define CPU_PROC_CTRL_CP15S_DISABLE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_CP15S_DISABLE, x) + +#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE BIT(5) +#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x) +#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x) + +#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA BIT(4) +#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x) +#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x) + +#define CPU_PROC_CTRL_ACP_AWCACHE BIT(3) +#define CPU_PROC_CTRL_ACP_AWCACHE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_ACP_AWCACHE, x) +#define CPU_PROC_CTRL_ACP_AWCACHE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_ACP_AWCACHE, x) + +#define CPU_PROC_CTRL_ACP_ARCACHE BIT(2) +#define CPU_PROC_CTRL_ACP_ARCACHE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_ACP_ARCACHE, x) +#define CPU_PROC_CTRL_ACP_ARCACHE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_ACP_ARCACHE, x) + +#define CPU_PROC_CTRL_L2_FLUSH_REQ BIT(1) +#define CPU_PROC_CTRL_L2_FLUSH_REQ_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_L2_FLUSH_REQ, x) +#define CPU_PROC_CTRL_L2_FLUSH_REQ_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_L2_FLUSH_REQ, x) + +#define CPU_PROC_CTRL_ACP_DISABLE BIT(0) +#define CPU_PROC_CTRL_ACP_DISABLE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_ACP_DISABLE, x) +#define CPU_PROC_CTRL_ACP_DISABLE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 0, 0, 1, 4) + +#define DEV10G_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ENA_CFG_RX_ENA, x) +#define DEV10G_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ENA_CFG_RX_ENA, x) + +#define DEV10G_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV10G_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ENA_CFG_TX_ENA, x) +#define DEV10G_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 8, 0, 1, 4) + +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16) +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\ + FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\ + FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) + +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */ +#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 12, 0, 1, 4) + +#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS GENMASK(1, 0) +#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\ + FIELD_PREP(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x) +#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_GET(x)\ + FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 16, r, 3, 4) + +#define DEV10G_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16) +#define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\ + FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ID, x) +#define DEV10G_MAC_TAGS_CFG_TAG_ID_GET(x)\ + FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ID, x) + +#define DEV10G_MAC_TAGS_CFG_TAG_ENA BIT(4) +#define DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ENA, x) +#define DEV10G_MAC_TAGS_CFG_TAG_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 28, 0, 1, 4) + +#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24) +#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) + +#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20) +#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) + +#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16) +#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) + +#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12) +#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) +#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) + +#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8) +#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) + +#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4) +#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) + +#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0) +#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */ +#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 48, 0, 1, 4) + +#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4) +#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\ + FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x) +#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_GET(x)\ + FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x) + +#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY BIT(3) +#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_SET(x)\ + FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x) +#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_GET(x)\ + FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x) + +#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY BIT(2) +#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_SET(x)\ + FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x) +#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_GET(x)\ + FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x) + +#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY BIT(1) +#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_SET(x)\ + FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x) +#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_GET(x)\ + FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x) + +#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY BIT(0) +#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_SET(x)\ + FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x) +#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_GET(x)\ + FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x) + +/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G, t, 12, 436, 0, 1, 52, 0, 0, 1, 4) + +#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28) +#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x) +#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x) + +#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27) +#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) +#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) + +#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25) +#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) +#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) + +#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23) +#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) +#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) + +#define DEV10G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20) +#define DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_SPEED_SEL, x) +#define DEV10G_DEV_RST_CTRL_SPEED_SEL_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_SPEED_SEL, x) + +#define DEV10G_DEV_RST_CTRL_PCS_TX_RST BIT(12) +#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x) +#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x) + +#define DEV10G_DEV_RST_CTRL_PCS_RX_RST BIT(8) +#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x) +#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x) + +#define DEV10G_DEV_RST_CTRL_MAC_TX_RST BIT(4) +#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x) +#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x) + +#define DEV10G_DEV_RST_CTRL_MAC_RX_RST BIT(0) +#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x) +#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x) + +/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */ +#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G, t, 12, 488, 0, 1, 32, 0, 0, 1, 4) + +#define DEV10G_PCS25G_CFG_PCS25G_ENA BIT(0) +#define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\ + FIELD_PREP(DEV10G_PCS25G_CFG_PCS25G_ENA, x) +#define DEV10G_PCS25G_CFG_PCS25G_ENA_GET(x)\ + FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 0, 0, 1, 4) + +#define DEV25G_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ENA_CFG_RX_ENA, x) +#define DEV25G_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ENA_CFG_RX_ENA, x) + +#define DEV25G_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV25G_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ENA_CFG_TX_ENA, x) +#define DEV25G_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 8, 0, 1, 4) + +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16) +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\ + FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\ + FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) + +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 28, 0, 1, 4) + +#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24) +#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) + +#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20) +#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) + +#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16) +#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) + +#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12) +#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) +#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) + +#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8) +#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) + +#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4) +#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) + +#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0) +#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) + +/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G, t, 8, 436, 0, 1, 52, 0, 0, 1, 4) + +#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28) +#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x) +#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x) + +#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27) +#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) +#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) + +#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25) +#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) +#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) + +#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23) +#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) +#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) + +#define DEV25G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20) +#define DEV25G_DEV_RST_CTRL_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_SPEED_SEL, x) +#define DEV25G_DEV_RST_CTRL_SPEED_SEL_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_SPEED_SEL, x) + +#define DEV25G_DEV_RST_CTRL_PCS_TX_RST BIT(12) +#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x) +#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x) + +#define DEV25G_DEV_RST_CTRL_PCS_RX_RST BIT(8) +#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x) +#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x) + +#define DEV25G_DEV_RST_CTRL_MAC_TX_RST BIT(4) +#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x) +#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x) + +#define DEV25G_DEV_RST_CTRL_MAC_RX_RST BIT(0) +#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x) +#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x) + +/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */ +#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 0, 0, 1, 4) + +#define DEV25G_PCS25G_CFG_PCS25G_ENA BIT(0) +#define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\ + FIELD_PREP(DEV25G_PCS25G_CFG_PCS25G_ENA, x) +#define DEV25G_PCS25G_CFG_PCS25G_ENA_GET(x)\ + FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x) + +/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */ +#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 4, 0, 1, 4) + +#define DEV25G_PCS25G_SD_CFG_SD_SEL BIT(8) +#define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_SEL, x) +#define DEV25G_PCS25G_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_SEL, x) + +#define DEV25G_PCS25G_SD_CFG_SD_POL BIT(4) +#define DEV25G_PCS25G_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_POL, x) +#define DEV25G_PCS25G_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_POL, x) + +#define DEV25G_PCS25G_SD_CFG_SD_ENA BIT(0) +#define DEV25G_PCS25G_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_ENA, x) +#define DEV25G_PCS25G_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x) + +/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5, t, 65, 0, 0, 1, 36, 0, 0, 1, 4) + +#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23) +#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) +#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) + +#define DEV2G5_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20) +#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x) +#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x) + +#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST BIT(17) +#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x) +#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x) + +#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST BIT(16) +#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x) +#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x) + +#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST BIT(12) +#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x) +#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x) + +#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST BIT(8) +#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x) +#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x) + +#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST BIT(4) +#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x) +#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x) + +#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST BIT(0) +#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x) +#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 0, 0, 1, 4) + +#define DEV2G5_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_ENA_CFG_RX_ENA, x) +#define DEV2G5_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_ENA_CFG_RX_ENA, x) + +#define DEV2G5_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV2G5_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_ENA_CFG_TX_ENA, x) +#define DEV2G5_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */ +#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 4, 0, 1, 4) + +#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) +#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x) +#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x) + +#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) +#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x) +#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x) + +#define DEV2G5_MAC_MODE_CFG_FDX_ENA BIT(0) +#define DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_MODE_CFG_FDX_ENA, x) +#define DEV2G5_MAC_MODE_CFG_FDX_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 8, 0, 1, 4) + +#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 12, 0, 1, 4) + +#define DEV2G5_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16) +#define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG_TAG_ID, x) +#define DEV2G5_MAC_TAGS_CFG_TAG_ID_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG_TAG_ID, x) + +#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3) +#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x) +#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x) + +#define DEV2G5_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1) +#define DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG_PB_ENA, x) +#define DEV2G5_MAC_TAGS_CFG_PB_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG_PB_ENA, x) + +#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) +#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x) +#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */ +#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 16, 0, 1, 4) + +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3 GENMASK(31, 16) +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x) +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x) + +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2 GENMASK(15, 0) +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x) +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 20, 0, 1, 4) + +#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) +#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x) +#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */ +#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 24, 0, 1, 4) + +#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) +#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\ + FIELD_PREP(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x) +#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_GET(x)\ + FIELD_GET(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x) + +#define DEV2G5_MAC_IFG_CFG_TX_IFG GENMASK(12, 8) +#define DEV2G5_MAC_IFG_CFG_TX_IFG_SET(x)\ + FIELD_PREP(DEV2G5_MAC_IFG_CFG_TX_IFG, x) +#define DEV2G5_MAC_IFG_CFG_TX_IFG_GET(x)\ + FIELD_GET(DEV2G5_MAC_IFG_CFG_TX_IFG, x) + +#define DEV2G5_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4) +#define DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(x)\ + FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG2, x) +#define DEV2G5_MAC_IFG_CFG_RX_IFG2_GET(x)\ + FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG2, x) + +#define DEV2G5_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0) +#define DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(x)\ + FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG1, x) +#define DEV2G5_MAC_IFG_CFG_RX_IFG1_GET(x)\ + FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */ +#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 28, 0, 1, 4) + +#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) +#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\ + FIELD_PREP(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x) +#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_GET(x)\ + FIELD_GET(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x) + +#define DEV2G5_MAC_HDX_CFG_SEED GENMASK(23, 16) +#define DEV2G5_MAC_HDX_CFG_SEED_SET(x)\ + FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED, x) +#define DEV2G5_MAC_HDX_CFG_SEED_GET(x)\ + FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED, x) + +#define DEV2G5_MAC_HDX_CFG_SEED_LOAD BIT(12) +#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_SET(x)\ + FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x) +#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_GET(x)\ + FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x) + +#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8) +#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x) +#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x) + +#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS GENMASK(6, 0) +#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_SET(x)\ + FIELD_PREP(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x) +#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_GET(x)\ + FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */ +#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 0, 0, 1, 4) + +#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE BIT(4) +#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x) +#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x) + +#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1) +#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x) +#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x) + +#define DEV2G5_PCS1G_CFG_PCS_ENA BIT(0) +#define DEV2G5_PCS1G_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_CFG_PCS_ENA, x) +#define DEV2G5_PCS1G_CFG_PCS_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */ +#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 4, 0, 1, 4) + +#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4) +#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x) +#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x) + +#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1) +#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x) +#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x) + +#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0) +#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) +#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */ +#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 8, 0, 1, 4) + +#define DEV2G5_PCS1G_SD_CFG_SD_SEL BIT(8) +#define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_SEL, x) +#define DEV2G5_PCS1G_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_SEL, x) + +#define DEV2G5_PCS1G_SD_CFG_SD_POL BIT(4) +#define DEV2G5_PCS1G_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_POL, x) +#define DEV2G5_PCS1G_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_POL, x) + +#define DEV2G5_PCS1G_SD_CFG_SD_ENA BIT(0) +#define DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_ENA, x) +#define DEV2G5_PCS1G_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */ +#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 12, 0, 1, 4) + +#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16) +#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x) +#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x) + +#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8) +#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x) +#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x) + +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1) +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x) +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x) + +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA BIT(0) +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x) +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */ +#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 20, 0, 1, 4) + +#define DEV2G5_PCS1G_LB_CFG_RA_ENA BIT(4) +#define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LB_CFG_RA_ENA, x) +#define DEV2G5_PCS1G_LB_CFG_RA_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LB_CFG_RA_ENA, x) + +#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1) +#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x) +#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x) + +#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0) +#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x) +#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */ +#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 32, 0, 1, 4) + +#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY GENMASK(31, 16) +#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x) +#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x) + +#define DEV2G5_PCS1G_ANEG_STATUS_PR BIT(4) +#define DEV2G5_PCS1G_ANEG_STATUS_PR_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PR, x) +#define DEV2G5_PCS1G_ANEG_STATUS_PR_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PR, x) + +#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3) +#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x) +#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x) + +#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0) +#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) +#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */ +#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 40, 0, 1, 4) + +#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR GENMASK(15, 12) +#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x) +#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x) + +#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8) +#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x) +#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x) + +#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS BIT(4) +#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x) +#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x) + +#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0) +#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x) +#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */ +#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 48, 0, 1, 4) + +#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4) +#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x) +#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x) + +#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0) +#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x) +#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x) + +/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */ +#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5, t, 65, 164, 0, 1, 4, 0, 0, 1, 4) + +#define DEV2G5_PCS_FX100_CFG_SD_SEL BIT(26) +#define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_SEL, x) +#define DEV2G5_PCS_FX100_CFG_SD_SEL_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_SEL, x) + +#define DEV2G5_PCS_FX100_CFG_SD_POL BIT(25) +#define DEV2G5_PCS_FX100_CFG_SD_POL_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_POL, x) +#define DEV2G5_PCS_FX100_CFG_SD_POL_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_POL, x) + +#define DEV2G5_PCS_FX100_CFG_SD_ENA BIT(24) +#define DEV2G5_PCS_FX100_CFG_SD_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_ENA, x) +#define DEV2G5_PCS_FX100_CFG_SD_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA BIT(20) +#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x) +#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA BIT(16) +#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x) +#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_RXBITSEL GENMASK(15, 12) +#define DEV2G5_PCS_FX100_CFG_RXBITSEL_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_RXBITSEL, x) +#define DEV2G5_PCS_FX100_CFG_RXBITSEL_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_RXBITSEL, x) + +#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG GENMASK(10, 9) +#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x) +#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x) + +#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8) +#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x) +#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER GENMASK(7, 4) +#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x) +#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x) + +#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3) +#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x) +#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA BIT(2) +#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x) +#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA BIT(1) +#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x) +#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_PCS_ENA BIT(0) +#define DEV2G5_PCS_FX100_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_PCS_ENA, x) +#define DEV2G5_PCS_FX100_CFG_PCS_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x) + +/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */ +#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 168, 0, 1, 4, 0, 0, 1, 4) + +#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP GENMASK(11, 8) +#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x) +#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x) + +#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7) +#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x) +#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x) + +#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6) +#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x) +#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x) + +#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5) +#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x) +#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x) + +#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4) +#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x) +#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x) + +#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS BIT(2) +#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x) +#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x) + +#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1) +#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x) +#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x) + +#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS BIT(0) +#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x) +#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 0, 0, 1, 4) + +#define DEV5G_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ENA_CFG_RX_ENA, x) +#define DEV5G_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ENA_CFG_RX_ENA, x) + +#define DEV5G_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV5G_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ENA_CFG_TX_ENA, x) +#define DEV5G_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 8, 0, 1, 4) + +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16) +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\ + FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\ + FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) + +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 28, 0, 1, 4) + +#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24) +#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) + +#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20) +#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) + +#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16) +#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) + +#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12) +#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) +#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) + +#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8) +#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) + +#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4) +#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) + +#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0) +#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */ +#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 0, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */ +#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 4, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */ +#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 8, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */ +#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 12, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */ +#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 16, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */ +#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 20, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */ +#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 24, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */ +#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 28, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */ +#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 32, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */ +#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 36, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 40, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */ +#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 44, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */ +#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 48, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */ +#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 52, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */ +#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 56, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */ +#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 60, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */ +#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 64, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */ +#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 68, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */ +#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 72, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */ +#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 76, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */ +#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 80, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */ +#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 84, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */ +#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 88, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */ +#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 92, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */ +#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 96, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */ +#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 100, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */ +#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 104, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */ +#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 108, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */ +#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 112, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */ +#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 116, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */ +#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 120, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */ +#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 124, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */ +#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 128, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */ +#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 132, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */ +#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 136, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */ +#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 140, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */ +#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 144, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */ +#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 148, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */ +#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 152, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */ +#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 156, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */ +#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 160, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */ +#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 164, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */ +#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 168, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */ +#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 172, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */ +#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 176, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */ +#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 180, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */ +#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\ + t, 13, 60, 0, 1, 312, 184, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\ + t, 13, 60, 0, 1, 312, 188, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */ +#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 192, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */ +#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 196, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */ +#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 200, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */ +#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 204, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */ +#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 208, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */ +#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 212, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */ +#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 216, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */ +#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 220, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */ +#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 224, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */ +#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 228, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */ +#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 232, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */ +#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 236, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */ +#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 240, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */ +#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 244, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */ +#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 248, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */ +#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 252, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */ +#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 256, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */ +#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 260, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */ +#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 264, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */ +#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 268, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */ +#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 272, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */ +#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 276, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */ +#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 280, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */ +#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 284, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */ +#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 288, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */ +#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 292, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */ +#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 296, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */ +#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 300, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */ +#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 304, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */ +#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 308, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */ +#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 0, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */ +#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 4, 0, 1, 4) + +#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x) +#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */ +#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 8, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */ +#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 12, 0, 1, 4) + +#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x) +#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */ +#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 16, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */ +#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 20, 0, 1, 4) + +#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x) +#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */ +#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 24, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */ +#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 28, 0, 1, 4) + +#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x) +#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */ +#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 32, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */ +#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 36, 0, 1, 4) + +#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x) +#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */ +#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 40, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */ +#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 44, 0, 1, 4) + +#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x) +#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */ +#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 48, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */ +#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 52, 0, 1, 4) + +#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x) +#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */ +#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 56, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */ +#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 60, 0, 1, 4) + +#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x) +#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G, t, 13, 436, 0, 1, 52, 0, 0, 1, 4) + +#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28) +#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x) +#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x) + +#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27) +#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) +#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) + +#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25) +#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) +#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) + +#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23) +#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) +#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) + +#define DEV5G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20) +#define DEV5G_DEV_RST_CTRL_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_SPEED_SEL, x) +#define DEV5G_DEV_RST_CTRL_SPEED_SEL_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_SPEED_SEL, x) + +#define DEV5G_DEV_RST_CTRL_PCS_TX_RST BIT(12) +#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x) +#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x) + +#define DEV5G_DEV_RST_CTRL_PCS_RX_RST BIT(8) +#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x) +#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x) + +#define DEV5G_DEV_RST_CTRL_MAC_TX_RST BIT(4) +#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x) +#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x) + +#define DEV5G_DEV_RST_CTRL_MAC_RX_RST BIT(0) +#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x) +#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x) + +/* DSM:RAM_CTRL:RAM_INIT */ +#define DSM_RAM_INIT __REG(TARGET_DSM, 0, 1, 0, 0, 1, 4, 0, 0, 1, 4) + +#define DSM_RAM_INIT_RAM_INIT BIT(1) +#define DSM_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(DSM_RAM_INIT_RAM_INIT, x) +#define DSM_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(DSM_RAM_INIT_RAM_INIT, x) + +#define DSM_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define DSM_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(DSM_RAM_INIT_RAM_CFG_HOOK, x) +#define DSM_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x) + +/* DSM:CFG:BUF_CFG */ +#define DSM_BUF_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 0, r, 67, 4) + +#define DSM_BUF_CFG_CSC_STAT_DIS BIT(13) +#define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\ + FIELD_PREP(DSM_BUF_CFG_CSC_STAT_DIS, x) +#define DSM_BUF_CFG_CSC_STAT_DIS_GET(x)\ + FIELD_GET(DSM_BUF_CFG_CSC_STAT_DIS, x) + +#define DSM_BUF_CFG_AGING_ENA BIT(12) +#define DSM_BUF_CFG_AGING_ENA_SET(x)\ + FIELD_PREP(DSM_BUF_CFG_AGING_ENA, x) +#define DSM_BUF_CFG_AGING_ENA_GET(x)\ + FIELD_GET(DSM_BUF_CFG_AGING_ENA, x) + +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS BIT(11) +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(x)\ + FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x) +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_GET(x)\ + FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x) + +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT GENMASK(10, 0) +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_SET(x)\ + FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x) +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_GET(x)\ + FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x) + +/* DSM:CFG:DEV_TX_STOP_WM_CFG */ +#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4) + +#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA BIT(9) +#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\ + FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x) +#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_GET(x)\ + FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x) + +#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA BIT(8) +#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(x)\ + FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x) +#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_GET(x)\ + FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x) + +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM GENMASK(7, 1) +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(x)\ + FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x) +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_GET(x)\ + FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x) + +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR BIT(0) +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(x)\ + FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x) +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_GET(x)\ + FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x) + +/* DSM:CFG:RX_PAUSE_CFG */ +#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4) + +#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN BIT(1) +#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\ + FIELD_PREP(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x) +#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_GET(x)\ + FIELD_GET(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x) + +#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL BIT(0) +#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_SET(x)\ + FIELD_PREP(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x) +#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_GET(x)\ + FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x) + +/* DSM:CFG:MAC_CFG */ +#define DSM_MAC_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4) + +#define DSM_MAC_CFG_TX_PAUSE_VAL GENMASK(31, 16) +#define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\ + FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_VAL, x) +#define DSM_MAC_CFG_TX_PAUSE_VAL_GET(x)\ + FIELD_GET(DSM_MAC_CFG_TX_PAUSE_VAL, x) + +#define DSM_MAC_CFG_HDX_BACKPREASSURE BIT(2) +#define DSM_MAC_CFG_HDX_BACKPREASSURE_SET(x)\ + FIELD_PREP(DSM_MAC_CFG_HDX_BACKPREASSURE, x) +#define DSM_MAC_CFG_HDX_BACKPREASSURE_GET(x)\ + FIELD_GET(DSM_MAC_CFG_HDX_BACKPREASSURE, x) + +#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE BIT(1) +#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_SET(x)\ + FIELD_PREP(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x) +#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_GET(x)\ + FIELD_GET(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x) + +#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF BIT(0) +#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_SET(x)\ + FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x) +#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_GET(x)\ + FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x) + +/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */ +#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4) + +#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0) +#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\ + FIELD_PREP(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x) +#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_GET(x)\ + FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x) + +/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */ +#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4) + +#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW GENMASK(23, 0) +#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\ + FIELD_PREP(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x) +#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_GET(x)\ + FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x) + +/* DSM:CFG:TAXI_CAL_CFG */ +#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4) + +#define DSM_TAXI_CAL_CFG_CAL_IDX GENMASK(20, 15) +#define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\ + FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_IDX, x) +#define DSM_TAXI_CAL_CFG_CAL_IDX_GET(x)\ + FIELD_GET(DSM_TAXI_CAL_CFG_CAL_IDX, x) + +#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN GENMASK(14, 9) +#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_SET(x)\ + FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x) +#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(x)\ + FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x) + +#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL GENMASK(8, 5) +#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_SET(x)\ + FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x) +#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_GET(x)\ + FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x) + +#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL GENMASK(4, 1) +#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(x)\ + FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x) +#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_GET(x)\ + FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x) + +#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA BIT(0) +#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(x)\ + FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x) +#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\ + FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x) + +/* EACL:POL_CFG:POL_EACL_CFG */ +#define EACL_POL_EACL_CFG __REG(TARGET_EACL, 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4) + +#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5) +#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x) +#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x) + +#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY BIT(4) +#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x) +#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x) + +#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY BIT(3) +#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x) +#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x) + +#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE BIT(2) +#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x) +#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x) + +#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN BIT(1) +#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x) +#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x) + +#define EACL_POL_EACL_CFG_EACL_FORCE_INIT BIT(0) +#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x) +#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x) + +/* EACL:RAM_CTRL:RAM_INIT */ +#define EACL_RAM_INIT __REG(TARGET_EACL, 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4) + +#define EACL_RAM_INIT_RAM_INIT BIT(1) +#define EACL_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(EACL_RAM_INIT_RAM_INIT, x) +#define EACL_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(EACL_RAM_INIT_RAM_INIT, x) + +#define EACL_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define EACL_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(EACL_RAM_INIT_RAM_CFG_HOOK, x) +#define EACL_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x) + +/* FDMA:FDMA:FDMA_CH_ACTIVATE */ +#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4) + +#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\ + FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\ + FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) + +/* FDMA:FDMA:FDMA_CH_RELOAD */ +#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4) + +#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0) +#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\ + FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x) +#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\ + FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x) + +/* FDMA:FDMA:FDMA_CH_DISABLE */ +#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4) + +#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0) +#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\ + FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x) +#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\ + FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x) + +/* FDMA:FDMA:FDMA_DCB_LLP */ +#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4) + +/* FDMA:FDMA:FDMA_DCB_LLP1 */ +#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4) + +/* FDMA:FDMA:FDMA_DCB_LLP_PREV */ +#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 116, r, 8, 4) + +/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */ +#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 148, r, 8, 4) + +/* FDMA:FDMA:FDMA_CH_CFG */ +#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4) + +#define FDMA_CH_CFG_CH_XTR_STATUS_MODE BIT(7) +#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x) +#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x) + +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(6) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) + +#define FDMA_CH_CFG_CH_INJ_PORT BIT(5) +#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x) +#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x) + +#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(4, 1) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x) + +#define FDMA_CH_CFG_CH_MEM BIT(0) +#define FDMA_CH_CFG_CH_MEM_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_MEM, x) +#define FDMA_CH_CFG_CH_MEM_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_MEM, x) + +/* FDMA:FDMA:FDMA_CH_TRANSLATE */ +#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 256, r, 8, 4) + +#define FDMA_CH_TRANSLATE_OFFSET GENMASK(15, 0) +#define FDMA_CH_TRANSLATE_OFFSET_SET(x)\ + FIELD_PREP(FDMA_CH_TRANSLATE_OFFSET, x) +#define FDMA_CH_TRANSLATE_OFFSET_GET(x)\ + FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x) + +/* FDMA:FDMA:FDMA_XTR_CFG */ +#define FDMA_XTR_CFG __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 364, 0, 1, 4) + +#define FDMA_XTR_CFG_XTR_FIFO_WM GENMASK(15, 11) +#define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\ + FIELD_PREP(FDMA_XTR_CFG_XTR_FIFO_WM, x) +#define FDMA_XTR_CFG_XTR_FIFO_WM_GET(x)\ + FIELD_GET(FDMA_XTR_CFG_XTR_FIFO_WM, x) + +#define FDMA_XTR_CFG_XTR_ARB_SAT GENMASK(10, 0) +#define FDMA_XTR_CFG_XTR_ARB_SAT_SET(x)\ + FIELD_PREP(FDMA_XTR_CFG_XTR_ARB_SAT, x) +#define FDMA_XTR_CFG_XTR_ARB_SAT_GET(x)\ + FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x) + +/* FDMA:FDMA:FDMA_PORT_CTRL */ +#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4) + +#define FDMA_PORT_CTRL_INJ_STOP BIT(4) +#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x) +#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x) + +#define FDMA_PORT_CTRL_INJ_STOP_FORCE BIT(3) +#define FDMA_PORT_CTRL_INJ_STOP_FORCE_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP_FORCE, x) +#define FDMA_PORT_CTRL_INJ_STOP_FORCE_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_INJ_STOP_FORCE, x) + +#define FDMA_PORT_CTRL_XTR_STOP BIT(2) +#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x) +#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x) + +#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY BIT(1) +#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x) +#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x) + +#define FDMA_PORT_CTRL_XTR_BUF_RST BIT(0) +#define FDMA_PORT_CTRL_XTR_BUF_RST_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_RST, x) +#define FDMA_PORT_CTRL_XTR_BUF_RST_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x) + +/* FDMA:FDMA:FDMA_INTR_DCB */ +#define FDMA_INTR_DCB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 384, 0, 1, 4) + +#define FDMA_INTR_DCB_INTR_DCB GENMASK(7, 0) +#define FDMA_INTR_DCB_INTR_DCB_SET(x)\ + FIELD_PREP(FDMA_INTR_DCB_INTR_DCB, x) +#define FDMA_INTR_DCB_INTR_DCB_GET(x)\ + FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x) + +/* FDMA:FDMA:FDMA_INTR_DCB_ENA */ +#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 388, 0, 1, 4) + +#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA GENMASK(7, 0) +#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\ + FIELD_PREP(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x) +#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_GET(x)\ + FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x) + +/* FDMA:FDMA:FDMA_INTR_DB */ +#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4) + +#define FDMA_INTR_DB_INTR_DB GENMASK(7, 0) +#define FDMA_INTR_DB_INTR_DB_SET(x)\ + FIELD_PREP(FDMA_INTR_DB_INTR_DB, x) +#define FDMA_INTR_DB_INTR_DB_GET(x)\ + FIELD_GET(FDMA_INTR_DB_INTR_DB, x) + +/* FDMA:FDMA:FDMA_INTR_DB_ENA */ +#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4) + +#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\ + FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\ + FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) + +/* FDMA:FDMA:FDMA_INTR_ERR */ +#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4) + +#define FDMA_INTR_ERR_INTR_PORT_ERR GENMASK(9, 8) +#define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\ + FIELD_PREP(FDMA_INTR_ERR_INTR_PORT_ERR, x) +#define FDMA_INTR_ERR_INTR_PORT_ERR_GET(x)\ + FIELD_GET(FDMA_INTR_ERR_INTR_PORT_ERR, x) + +#define FDMA_INTR_ERR_INTR_CH_ERR GENMASK(7, 0) +#define FDMA_INTR_ERR_INTR_CH_ERR_SET(x)\ + FIELD_PREP(FDMA_INTR_ERR_INTR_CH_ERR, x) +#define FDMA_INTR_ERR_INTR_CH_ERR_GET(x)\ + FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x) + +/* FDMA:FDMA:FDMA_ERRORS */ +#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4) + +#define FDMA_ERRORS_ERR_XTR_WR GENMASK(31, 30) +#define FDMA_ERRORS_ERR_XTR_WR_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_XTR_WR, x) +#define FDMA_ERRORS_ERR_XTR_WR_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_XTR_WR, x) + +#define FDMA_ERRORS_ERR_XTR_OVF GENMASK(29, 28) +#define FDMA_ERRORS_ERR_XTR_OVF_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_XTR_OVF, x) +#define FDMA_ERRORS_ERR_XTR_OVF_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_XTR_OVF, x) + +#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF GENMASK(27, 26) +#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x) +#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x) + +#define FDMA_ERRORS_ERR_DCB_XTR_DATAL GENMASK(25, 24) +#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x) +#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x) + +#define FDMA_ERRORS_ERR_DCB_RD GENMASK(23, 16) +#define FDMA_ERRORS_ERR_DCB_RD_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_DCB_RD, x) +#define FDMA_ERRORS_ERR_DCB_RD_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_DCB_RD, x) + +#define FDMA_ERRORS_ERR_INJ_RD GENMASK(15, 10) +#define FDMA_ERRORS_ERR_INJ_RD_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_INJ_RD, x) +#define FDMA_ERRORS_ERR_INJ_RD_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_INJ_RD, x) + +#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC GENMASK(9, 8) +#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x) +#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x) + +#define FDMA_ERRORS_ERR_CH_WR GENMASK(7, 0) +#define FDMA_ERRORS_ERR_CH_WR_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_CH_WR, x) +#define FDMA_ERRORS_ERR_CH_WR_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x) + +/* FDMA:FDMA:FDMA_ERRORS_2 */ +#define FDMA_ERRORS_2 __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 416, 0, 1, 4) + +#define FDMA_ERRORS_2_ERR_XTR_FRAG GENMASK(1, 0) +#define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\ + FIELD_PREP(FDMA_ERRORS_2_ERR_XTR_FRAG, x) +#define FDMA_ERRORS_2_ERR_XTR_FRAG_GET(x)\ + FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x) + +/* FDMA:FDMA:FDMA_CTRL */ +#define FDMA_CTRL __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 424, 0, 1, 4) + +#define FDMA_CTRL_NRESET BIT(0) +#define FDMA_CTRL_NRESET_SET(x)\ + FIELD_PREP(FDMA_CTRL_NRESET, x) +#define FDMA_CTRL_NRESET_GET(x)\ + FIELD_GET(FDMA_CTRL_NRESET, x) + +/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */ +#define GCB_CHIP_ID __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 0, 0, 1, 4) + +#define GCB_CHIP_ID_REV_ID GENMASK(31, 28) +#define GCB_CHIP_ID_REV_ID_SET(x)\ + FIELD_PREP(GCB_CHIP_ID_REV_ID, x) +#define GCB_CHIP_ID_REV_ID_GET(x)\ + FIELD_GET(GCB_CHIP_ID_REV_ID, x) + +#define GCB_CHIP_ID_PART_ID GENMASK(27, 12) +#define GCB_CHIP_ID_PART_ID_SET(x)\ + FIELD_PREP(GCB_CHIP_ID_PART_ID, x) +#define GCB_CHIP_ID_PART_ID_GET(x)\ + FIELD_GET(GCB_CHIP_ID_PART_ID, x) + +#define GCB_CHIP_ID_MFG_ID GENMASK(11, 1) +#define GCB_CHIP_ID_MFG_ID_SET(x)\ + FIELD_PREP(GCB_CHIP_ID_MFG_ID, x) +#define GCB_CHIP_ID_MFG_ID_GET(x)\ + FIELD_GET(GCB_CHIP_ID_MFG_ID, x) + +#define GCB_CHIP_ID_ONE BIT(0) +#define GCB_CHIP_ID_ONE_SET(x)\ + FIELD_PREP(GCB_CHIP_ID_ONE, x) +#define GCB_CHIP_ID_ONE_GET(x)\ + FIELD_GET(GCB_CHIP_ID_ONE, x) + +/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */ +#define GCB_SOFT_RST __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 8, 0, 1, 4) + +#define GCB_SOFT_RST_SOFT_NON_CFG_RST BIT(2) +#define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\ + FIELD_PREP(GCB_SOFT_RST_SOFT_NON_CFG_RST, x) +#define GCB_SOFT_RST_SOFT_NON_CFG_RST_GET(x)\ + FIELD_GET(GCB_SOFT_RST_SOFT_NON_CFG_RST, x) + +#define GCB_SOFT_RST_SOFT_SWC_RST BIT(1) +#define GCB_SOFT_RST_SOFT_SWC_RST_SET(x)\ + FIELD_PREP(GCB_SOFT_RST_SOFT_SWC_RST, x) +#define GCB_SOFT_RST_SOFT_SWC_RST_GET(x)\ + FIELD_GET(GCB_SOFT_RST_SOFT_SWC_RST, x) + +#define GCB_SOFT_RST_SOFT_CHIP_RST BIT(0) +#define GCB_SOFT_RST_SOFT_CHIP_RST_SET(x)\ + FIELD_PREP(GCB_SOFT_RST_SOFT_CHIP_RST, x) +#define GCB_SOFT_RST_SOFT_CHIP_RST_GET(x)\ + FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x) + +/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */ +#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 20, 0, 1, 4) + +#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA BIT(1) +#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\ + FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x) +#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_GET(x)\ + FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x) + +#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL BIT(0) +#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_SET(x)\ + FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x) +#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_GET(x)\ + FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x) + +/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */ +#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 24, r, 65, 4) + +#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0) +#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\ + FIELD_PREP(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x) +#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_GET(x)\ + FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x) + +/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */ +#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB, 0, 1, 876, g, 3, 280, 20, 0, 1, 4) + +#define GCB_SIO_CLOCK_SIO_CLK_FREQ GENMASK(19, 8) +#define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\ + FIELD_PREP(GCB_SIO_CLOCK_SIO_CLK_FREQ, x) +#define GCB_SIO_CLOCK_SIO_CLK_FREQ_GET(x)\ + FIELD_GET(GCB_SIO_CLOCK_SIO_CLK_FREQ, x) + +#define GCB_SIO_CLOCK_SYS_CLK_PERIOD GENMASK(7, 0) +#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(x)\ + FIELD_PREP(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x) +#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\ + FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x) + +/* HSCH:HSCH_CFG:CIR_CFG */ +#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 0, 0, 1, 4) + +#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6) +#define HSCH_CIR_CFG_CIR_RATE_SET(x)\ + FIELD_PREP(HSCH_CIR_CFG_CIR_RATE, x) +#define HSCH_CIR_CFG_CIR_RATE_GET(x)\ + FIELD_GET(HSCH_CIR_CFG_CIR_RATE, x) + +#define HSCH_CIR_CFG_CIR_BURST GENMASK(5, 0) +#define HSCH_CIR_CFG_CIR_BURST_SET(x)\ + FIELD_PREP(HSCH_CIR_CFG_CIR_BURST, x) +#define HSCH_CIR_CFG_CIR_BURST_GET(x)\ + FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x) + +/* HSCH:HSCH_CFG:EIR_CFG */ +#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 4, 0, 1, 4) + +#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6) +#define HSCH_EIR_CFG_EIR_RATE_SET(x)\ + FIELD_PREP(HSCH_EIR_CFG_EIR_RATE, x) +#define HSCH_EIR_CFG_EIR_RATE_GET(x)\ + FIELD_GET(HSCH_EIR_CFG_EIR_RATE, x) + +#define HSCH_EIR_CFG_EIR_BURST GENMASK(5, 0) +#define HSCH_EIR_CFG_EIR_BURST_SET(x)\ + FIELD_PREP(HSCH_EIR_CFG_EIR_BURST, x) +#define HSCH_EIR_CFG_EIR_BURST_GET(x)\ + FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x) + +/* HSCH:HSCH_CFG:SE_CFG */ +#define HSCH_SE_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 8, 0, 1, 4) + +#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6) +#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\ + FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x) +#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\ + FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x) + +#define HSCH_SE_CFG_SE_AVB_ENA BIT(5) +#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\ + FIELD_PREP(HSCH_SE_CFG_SE_AVB_ENA, x) +#define HSCH_SE_CFG_SE_AVB_ENA_GET(x)\ + FIELD_GET(HSCH_SE_CFG_SE_AVB_ENA, x) + +#define HSCH_SE_CFG_SE_FRM_MODE GENMASK(4, 3) +#define HSCH_SE_CFG_SE_FRM_MODE_SET(x)\ + FIELD_PREP(HSCH_SE_CFG_SE_FRM_MODE, x) +#define HSCH_SE_CFG_SE_FRM_MODE_GET(x)\ + FIELD_GET(HSCH_SE_CFG_SE_FRM_MODE, x) + +#define HSCH_SE_CFG_SE_DWRR_FRM_MODE GENMASK(2, 1) +#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_SET(x)\ + FIELD_PREP(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x) +#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_GET(x)\ + FIELD_GET(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x) + +#define HSCH_SE_CFG_SE_STOP BIT(0) +#define HSCH_SE_CFG_SE_STOP_SET(x)\ + FIELD_PREP(HSCH_SE_CFG_SE_STOP, x) +#define HSCH_SE_CFG_SE_STOP_GET(x)\ + FIELD_GET(HSCH_SE_CFG_SE_STOP, x) + +/* HSCH:HSCH_CFG:SE_CONNECT */ +#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 12, 0, 1, 4) + +#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0) +#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\ + FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x) +#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\ + FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x) + +/* HSCH:HSCH_CFG:SE_DLB_SENSE */ +#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 16, 0, 1, 4) + +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10) +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\ + FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x) +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\ + FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x) + +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3) +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\ + FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x) +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\ + FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x) + +#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2) +#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\ + FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x) +#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_GET(x)\ + FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x) + +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(1) +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_SET(x)\ + FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x) +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_GET(x)\ + FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x) + +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0) +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_SET(x)\ + FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x) +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\ + FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x) + +/* HSCH:HSCH_DWRR:DWRR_ENTRY */ +#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH, 0, 1, 162816, g, 72, 4, 0, 0, 1, 4) + +#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20) +#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\ + FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_COST, x) +#define HSCH_DWRR_ENTRY_DWRR_COST_GET(x)\ + FIELD_GET(HSCH_DWRR_ENTRY_DWRR_COST, x) + +#define HSCH_DWRR_ENTRY_DWRR_BALANCE GENMASK(19, 0) +#define HSCH_DWRR_ENTRY_DWRR_BALANCE_SET(x)\ + FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_BALANCE, x) +#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\ + FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x) + +/* HSCH:HSCH_MISC:HSCH_CFG_CFG */ +#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4) + +#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14) +#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\ + FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x) +#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\ + FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x) + +#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12) +#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\ + FIELD_PREP(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x) +#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_GET(x)\ + FIELD_GET(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x) + +#define HSCH_HSCH_CFG_CFG_CSR_GRANT GENMASK(11, 0) +#define HSCH_HSCH_CFG_CFG_CSR_GRANT_SET(x)\ + FIELD_PREP(HSCH_HSCH_CFG_CFG_CSR_GRANT, x) +#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\ + FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x) + +/* HSCH:HSCH_MISC:SYS_CLK_PER */ +#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4) + +#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS GENMASK(7, 0) +#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(x)\ + FIELD_PREP(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x) +#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\ + FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x) + +/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */ +#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4) + +#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0) +#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\ + FIELD_PREP(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x) +#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\ + FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x) + +/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */ +#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 16, r, 4, 4) + +#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1) +#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\ + FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x) +#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\ + FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x) + +#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0) +#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\ + FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x) +#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\ + FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x) + +/* HSCH:SYSTEM:FLUSH_CTRL */ +#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4) + +#define HSCH_FLUSH_CTRL_FLUSH_ENA BIT(27) +#define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_ENA, x) +#define HSCH_FLUSH_CTRL_FLUSH_ENA_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_ENA, x) + +#define HSCH_FLUSH_CTRL_FLUSH_SRC BIT(26) +#define HSCH_FLUSH_CTRL_FLUSH_SRC_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SRC, x) +#define HSCH_FLUSH_CTRL_FLUSH_SRC_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SRC, x) + +#define HSCH_FLUSH_CTRL_FLUSH_DST BIT(25) +#define HSCH_FLUSH_CTRL_FLUSH_DST_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_DST, x) +#define HSCH_FLUSH_CTRL_FLUSH_DST_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_DST, x) + +#define HSCH_FLUSH_CTRL_FLUSH_PORT GENMASK(24, 18) +#define HSCH_FLUSH_CTRL_FLUSH_PORT_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_PORT, x) +#define HSCH_FLUSH_CTRL_FLUSH_PORT_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_PORT, x) + +#define HSCH_FLUSH_CTRL_FLUSH_QUEUE BIT(17) +#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x) +#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x) + +#define HSCH_FLUSH_CTRL_FLUSH_SE BIT(16) +#define HSCH_FLUSH_CTRL_FLUSH_SE_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SE, x) +#define HSCH_FLUSH_CTRL_FLUSH_SE_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SE, x) + +#define HSCH_FLUSH_CTRL_FLUSH_HIER GENMASK(15, 0) +#define HSCH_FLUSH_CTRL_FLUSH_HIER_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_HIER, x) +#define HSCH_FLUSH_CTRL_FLUSH_HIER_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x) + +/* HSCH:SYSTEM:PORT_MODE */ +#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 8, r, 70, 4) + +#define HSCH_PORT_MODE_DEQUEUE_DIS BIT(4) +#define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\ + FIELD_PREP(HSCH_PORT_MODE_DEQUEUE_DIS, x) +#define HSCH_PORT_MODE_DEQUEUE_DIS_GET(x)\ + FIELD_GET(HSCH_PORT_MODE_DEQUEUE_DIS, x) + +#define HSCH_PORT_MODE_AGE_DIS BIT(3) +#define HSCH_PORT_MODE_AGE_DIS_SET(x)\ + FIELD_PREP(HSCH_PORT_MODE_AGE_DIS, x) +#define HSCH_PORT_MODE_AGE_DIS_GET(x)\ + FIELD_GET(HSCH_PORT_MODE_AGE_DIS, x) + +#define HSCH_PORT_MODE_TRUNC_ENA BIT(2) +#define HSCH_PORT_MODE_TRUNC_ENA_SET(x)\ + FIELD_PREP(HSCH_PORT_MODE_TRUNC_ENA, x) +#define HSCH_PORT_MODE_TRUNC_ENA_GET(x)\ + FIELD_GET(HSCH_PORT_MODE_TRUNC_ENA, x) + +#define HSCH_PORT_MODE_EIR_REMARK_ENA BIT(1) +#define HSCH_PORT_MODE_EIR_REMARK_ENA_SET(x)\ + FIELD_PREP(HSCH_PORT_MODE_EIR_REMARK_ENA, x) +#define HSCH_PORT_MODE_EIR_REMARK_ENA_GET(x)\ + FIELD_GET(HSCH_PORT_MODE_EIR_REMARK_ENA, x) + +#define HSCH_PORT_MODE_CPU_PRIO_MODE BIT(0) +#define HSCH_PORT_MODE_CPU_PRIO_MODE_SET(x)\ + FIELD_PREP(HSCH_PORT_MODE_CPU_PRIO_MODE, x) +#define HSCH_PORT_MODE_CPU_PRIO_MODE_GET(x)\ + FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x) + +/* HSCH:SYSTEM:OUTB_SHARE_ENA */ +#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 288, r, 5, 4) + +#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA GENMASK(7, 0) +#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\ + FIELD_PREP(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x) +#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_GET(x)\ + FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x) + +/* HSCH:MMGT:RESET_CFG */ +#define HSCH_RESET_CFG __REG(TARGET_HSCH, 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4) + +#define HSCH_RESET_CFG_CORE_ENA BIT(0) +#define HSCH_RESET_CFG_CORE_ENA_SET(x)\ + FIELD_PREP(HSCH_RESET_CFG_CORE_ENA, x) +#define HSCH_RESET_CFG_CORE_ENA_GET(x)\ + FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x) + +/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */ +#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH, 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4) + +#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY GENMASK(7, 0) +#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\ + FIELD_PREP(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x) +#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\ + FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x) + +/* LRN:COMMON:COMMON_ACCESS_CTRL */ +#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4) + +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\ + FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_GET(x)\ + FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x) + +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE BIT(19) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_SET(x)\ + FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_GET(x)\ + FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x) + +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW GENMASK(18, 5) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_SET(x)\ + FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_GET(x)\ + FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x) + +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD GENMASK(4, 1) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(x)\ + FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_GET(x)\ + FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x) + +#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT BIT(0) +#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(x)\ + FIELD_PREP(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x) +#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(x)\ + FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x) + +/* LRN:COMMON:MAC_ACCESS_CFG_0 */ +#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 4, 0, 1, 4) + +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID GENMASK(28, 16) +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x) +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x) + +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB GENMASK(15, 0) +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x) +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x) + +/* LRN:COMMON:MAC_ACCESS_CFG_1 */ +#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 8, 0, 1, 4) + +/* LRN:COMMON:MAC_ACCESS_CFG_2 */ +#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 12, 0, 1, 4) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL BIT(27) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU GENMASK(26, 24) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY BIT(23) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE BIT(22) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR BIT(21) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG GENMASK(20, 19) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL GENMASK(18, 17) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED BIT(16) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD BIT(15) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE GENMASK(14, 12) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR GENMASK(11, 0) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x) + +/* LRN:COMMON:MAC_ACCESS_CFG_3 */ +#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 16, 0, 1, 4) + +#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0) +#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x) +#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x) + +/* LRN:COMMON:SCAN_NEXT_CFG */ +#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 20, 0, 1, 4) + +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL GENMASK(18, 17) +#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL GENMASK(16, 15) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA BIT(14) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x) + +#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA BIT(13) +#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x) +#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA BIT(12) +#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA BIT(11) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA BIT(10) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA BIT(9) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA BIT(8) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA BIT(7) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK GENMASK(6, 3) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x) + +#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA BIT(2) +#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x) +#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x) + +#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA BIT(1) +#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x) +#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x) + +#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA BIT(0) +#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x) +#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x) + +/* LRN:COMMON:SCAN_NEXT_CFG_1 */ +#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 24, 0, 1, 4) + +#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR GENMASK(30, 16) +#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x) +#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x) + +#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK GENMASK(14, 0) +#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x) +#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x) + +/* LRN:COMMON:AUTOAGE_CFG */ +#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 36, r, 4, 4) + +#define LRN_AUTOAGE_CFG_UNIT_SIZE GENMASK(29, 28) +#define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_UNIT_SIZE, x) +#define LRN_AUTOAGE_CFG_UNIT_SIZE_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_UNIT_SIZE, x) + +#define LRN_AUTOAGE_CFG_PERIOD_VAL GENMASK(27, 0) +#define LRN_AUTOAGE_CFG_PERIOD_VAL_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_PERIOD_VAL, x) +#define LRN_AUTOAGE_CFG_PERIOD_VAL_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x) + +/* LRN:COMMON:AUTOAGE_CFG_1 */ +#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 52, 0, 1, 4) + +#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA BIT(25) +#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x) +#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x) + +#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN GENMASK(24, 15) +#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x) +#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x) + +#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS GENMASK(14, 7) +#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x) +#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x) + +#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA BIT(6) +#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x) +#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x) + +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT GENMASK(5, 2) +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x) +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x) + +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT BIT(1) +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x) +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x) + +#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA BIT(0) +#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x) +#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x) + +/* LRN:COMMON:AUTOAGE_CFG_2 */ +#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 56, 0, 1, 4) + +#define LRN_AUTOAGE_CFG_2_NEXT_ROW GENMASK(17, 4) +#define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_2_NEXT_ROW, x) +#define LRN_AUTOAGE_CFG_2_NEXT_ROW_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_2_NEXT_ROW, x) + +#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS GENMASK(3, 0) +#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x) +#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */ +#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4) + +#define PCEP_RCTRL_2_OUT_0_MSG_CODE GENMASK(7, 0) +#define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_MSG_CODE, x) +#define PCEP_RCTRL_2_OUT_0_MSG_CODE_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_MSG_CODE, x) + +#define PCEP_RCTRL_2_OUT_0_TAG GENMASK(15, 8) +#define PCEP_RCTRL_2_OUT_0_TAG_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG, x) +#define PCEP_RCTRL_2_OUT_0_TAG_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG, x) + +#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN BIT(16) +#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x) +#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x) + +#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS BIT(19) +#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x) +#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x) + +#define PCEP_RCTRL_2_OUT_0_SNP BIT(20) +#define PCEP_RCTRL_2_OUT_0_SNP_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_SNP, x) +#define PCEP_RCTRL_2_OUT_0_SNP_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_SNP, x) + +#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD BIT(22) +#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x) +#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x) + +#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN BIT(23) +#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x) +#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x) + +#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE BIT(28) +#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x) +#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x) + +#define PCEP_RCTRL_2_OUT_0_INVERT_MODE BIT(29) +#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x) +#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x) + +#define PCEP_RCTRL_2_OUT_0_REGION_EN BIT(31) +#define PCEP_RCTRL_2_OUT_0_REGION_EN_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_REGION_EN, x) +#define PCEP_RCTRL_2_OUT_0_REGION_EN_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4) + +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW GENMASK(15, 0) +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\ + FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x) +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_GET(x)\ + FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x) + +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW GENMASK(31, 16) +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_SET(x)\ + FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x) +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_GET(x)\ + FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4) + +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW GENMASK(15, 0) +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\ + FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x) +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_GET(x)\ + FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x) + +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW GENMASK(31, 16) +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_SET(x)\ + FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x) +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_GET(x)\ + FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4) + +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0) +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\ + FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x) +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_GET(x)\ + FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x) + +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW GENMASK(31, 2) +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_SET(x)\ + FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x) +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_GET(x)\ + FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ +#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 0, 0, 1, 4) + +#define PCS10G_BR_PCS_CFG_PCS_ENA BIT(31) +#define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_PCS_ENA, x) +#define PCS10G_BR_PCS_CFG_PCS_ENA_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_PCS_ENA, x) + +#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30) +#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) +#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) + +#define PCS10G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24) +#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x) +#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x) + +#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP BIT(18) +#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x) +#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x) + +#define PCS10G_BR_PCS_CFG_RESYNC_ENA BIT(15) +#define PCS10G_BR_PCS_CFG_RESYNC_ENA_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_RESYNC_ENA, x) +#define PCS10G_BR_PCS_CFG_RESYNC_ENA_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_RESYNC_ENA, x) + +#define PCS10G_BR_PCS_CFG_LF_GEN_DIS BIT(14) +#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x) +#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x) + +#define PCS10G_BR_PCS_CFG_RX_TEST_MODE BIT(13) +#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x) +#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x) + +#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12) +#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x) +#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x) + +#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP BIT(7) +#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x) +#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x) + +#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6) +#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) +#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) + +#define PCS10G_BR_PCS_CFG_TX_TEST_MODE BIT(4) +#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x) +#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x) + +#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3) +#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x) +#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ +#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 4, 0, 1, 4) + +#define PCS10G_BR_PCS_SD_CFG_SD_SEL BIT(8) +#define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_SEL, x) +#define PCS10G_BR_PCS_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_SEL, x) + +#define PCS10G_BR_PCS_SD_CFG_SD_POL BIT(4) +#define PCS10G_BR_PCS_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_POL, x) +#define PCS10G_BR_PCS_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_POL, x) + +#define PCS10G_BR_PCS_SD_CFG_SD_ENA BIT(0) +#define PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_ENA, x) +#define PCS10G_BR_PCS_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ +#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 0, 0, 1, 4) + +#define PCS25G_BR_PCS_CFG_PCS_ENA BIT(31) +#define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_PCS_ENA, x) +#define PCS25G_BR_PCS_CFG_PCS_ENA_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_PCS_ENA, x) + +#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30) +#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) +#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) + +#define PCS25G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24) +#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x) +#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x) + +#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP BIT(18) +#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x) +#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x) + +#define PCS25G_BR_PCS_CFG_RESYNC_ENA BIT(15) +#define PCS25G_BR_PCS_CFG_RESYNC_ENA_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_RESYNC_ENA, x) +#define PCS25G_BR_PCS_CFG_RESYNC_ENA_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_RESYNC_ENA, x) + +#define PCS25G_BR_PCS_CFG_LF_GEN_DIS BIT(14) +#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x) +#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x) + +#define PCS25G_BR_PCS_CFG_RX_TEST_MODE BIT(13) +#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x) +#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x) + +#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12) +#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x) +#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x) + +#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP BIT(7) +#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x) +#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x) + +#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6) +#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) +#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) + +#define PCS25G_BR_PCS_CFG_TX_TEST_MODE BIT(4) +#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x) +#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x) + +#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3) +#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x) +#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ +#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 4, 0, 1, 4) + +#define PCS25G_BR_PCS_SD_CFG_SD_SEL BIT(8) +#define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_SEL, x) +#define PCS25G_BR_PCS_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_SEL, x) + +#define PCS25G_BR_PCS_SD_CFG_SD_POL BIT(4) +#define PCS25G_BR_PCS_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_POL, x) +#define PCS25G_BR_PCS_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_POL, x) + +#define PCS25G_BR_PCS_SD_CFG_SD_ENA BIT(0) +#define PCS25G_BR_PCS_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_ENA, x) +#define PCS25G_BR_PCS_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ +#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 0, 0, 1, 4) + +#define PCS5G_BR_PCS_CFG_PCS_ENA BIT(31) +#define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_PCS_ENA, x) +#define PCS5G_BR_PCS_CFG_PCS_ENA_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_PCS_ENA, x) + +#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30) +#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) +#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) + +#define PCS5G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24) +#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x) +#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x) + +#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP BIT(18) +#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x) +#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x) + +#define PCS5G_BR_PCS_CFG_RESYNC_ENA BIT(15) +#define PCS5G_BR_PCS_CFG_RESYNC_ENA_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_RESYNC_ENA, x) +#define PCS5G_BR_PCS_CFG_RESYNC_ENA_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_RESYNC_ENA, x) + +#define PCS5G_BR_PCS_CFG_LF_GEN_DIS BIT(14) +#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x) +#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x) + +#define PCS5G_BR_PCS_CFG_RX_TEST_MODE BIT(13) +#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x) +#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x) + +#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12) +#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x) +#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x) + +#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP BIT(7) +#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x) +#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x) + +#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6) +#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) +#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) + +#define PCS5G_BR_PCS_CFG_TX_TEST_MODE BIT(4) +#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x) +#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x) + +#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3) +#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x) +#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ +#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 4, 0, 1, 4) + +#define PCS5G_BR_PCS_SD_CFG_SD_SEL BIT(8) +#define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_SEL, x) +#define PCS5G_BR_PCS_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_SEL, x) + +#define PCS5G_BR_PCS_SD_CFG_SD_POL BIT(4) +#define PCS5G_BR_PCS_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_POL, x) +#define PCS5G_BR_PCS_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_POL, x) + +#define PCS5G_BR_PCS_SD_CFG_SD_ENA BIT(0) +#define PCS5G_BR_PCS_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_ENA, x) +#define PCS5G_BR_PCS_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x) + +/* PORT_CONF:HW_CFG:DEV5G_MODES */ +#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 0, 0, 1, 4) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE BIT(0) +#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE BIT(1) +#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE BIT(2) +#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE BIT(3) +#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE BIT(4) +#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE BIT(5) +#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE BIT(6) +#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE BIT(7) +#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE BIT(8) +#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE BIT(9) +#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE BIT(10) +#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE BIT(11) +#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE BIT(12) +#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x) + +/* PORT_CONF:HW_CFG:DEV10G_MODES */ +#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 4, 0, 1, 4) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE BIT(0) +#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE BIT(1) +#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE BIT(2) +#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE BIT(3) +#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE BIT(4) +#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE BIT(5) +#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE BIT(6) +#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE BIT(7) +#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE BIT(8) +#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE BIT(9) +#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE BIT(10) +#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE BIT(11) +#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x) + +/* PORT_CONF:HW_CFG:DEV25G_MODES */ +#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 8, 0, 1, 4) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE BIT(0) +#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE BIT(1) +#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE BIT(2) +#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE BIT(3) +#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE BIT(4) +#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE BIT(5) +#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE BIT(6) +#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE BIT(7) +#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x) + +/* PORT_CONF:HW_CFG:QSGMII_ENA */ +#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 12, 0, 1, 4) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0 BIT(0) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1 BIT(1) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2 BIT(2) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3 BIT(3) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4 BIT(4) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5 BIT(5) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6 BIT(6) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7 BIT(7) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8 BIT(8) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9 BIT(9) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10 BIT(10) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11 BIT(11) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x) + +/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */ +#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF, 0, 1, 72, g, 6, 8, 0, 0, 1, 4) + +#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM BIT(9) +#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x) +#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x) + +#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM BIT(8) +#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x) +#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x) + +#define PORT_CONF_USGMII_CFG_FLIP_LANES BIT(7) +#define PORT_CONF_USGMII_CFG_FLIP_LANES_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_FLIP_LANES, x) +#define PORT_CONF_USGMII_CFG_FLIP_LANES_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_FLIP_LANES, x) + +#define PORT_CONF_USGMII_CFG_SHYST_DIS BIT(6) +#define PORT_CONF_USGMII_CFG_SHYST_DIS_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_SHYST_DIS, x) +#define PORT_CONF_USGMII_CFG_SHYST_DIS_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_SHYST_DIS, x) + +#define PORT_CONF_USGMII_CFG_E_DET_ENA BIT(5) +#define PORT_CONF_USGMII_CFG_E_DET_ENA_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_E_DET_ENA, x) +#define PORT_CONF_USGMII_CFG_E_DET_ENA_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_E_DET_ENA, x) + +#define PORT_CONF_USGMII_CFG_USE_I1_ENA BIT(4) +#define PORT_CONF_USGMII_CFG_USE_I1_ENA_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_USE_I1_ENA, x) +#define PORT_CONF_USGMII_CFG_USE_I1_ENA_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_USE_I1_ENA, x) + +#define PORT_CONF_USGMII_CFG_QUAD_MODE BIT(1) +#define PORT_CONF_USGMII_CFG_QUAD_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_QUAD_MODE, x) +#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */ +#define PTP_PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 0, 0, 1, 4) + +#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0) +#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_INTR_INTR_PTP, x) +#define PTP_PTP_PIN_INTR_INTR_PTP_GET(x)\ + FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */ +#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 4, 0, 1, 4) + +#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0) +#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x) +#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_GET(x)\ + FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */ +#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 8, 0, 1, 4) + +#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0) +#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\ + FIELD_PREP(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x) +#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_GET(x)\ + FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */ +#define PTP_PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 12, 0, 1, 4) + +#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9) +#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_ENA, x) +#define PTP_PTP_DOM_CFG_PTP_ENA_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_ENA, x) + +#define PTP_PTP_DOM_CFG_PTP_HOLD GENMASK(8, 6) +#define PTP_PTP_DOM_CFG_PTP_HOLD_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_HOLD, x) +#define PTP_PTP_DOM_CFG_PTP_HOLD_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_HOLD, x) + +#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE GENMASK(5, 3) +#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x) +#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x) + +#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS GENMASK(2, 0) +#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x) +#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */ +#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 0, r, 2, 4) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */ +#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 8, 0, 1, 4) + +#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0) +#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\ + FIELD_PREP(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x) +#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_GET(x)\ + FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */ +#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 12, 0, 1, 4) + +#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0) +#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\ + FIELD_PREP(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x) +#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_GET(x)\ + FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */ +#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 16, 0, 1, 4) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */ +#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 20, 0, 1, 4) + +#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0) +#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\ + FIELD_PREP(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x) +#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_GET(x)\ + FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */ +#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 24, 0, 1, 4) + +/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */ +#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 0, 0, 1, 4) + +#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26) +#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC GENMASK(25, 24) +#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL BIT(23) +#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT GENMASK(22, 21) +#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x) + +#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT GENMASK(20, 18) +#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x) +#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_DOM GENMASK(17, 16) +#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_OPT GENMASK(15, 14) +#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK BIT(13) +#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS GENMASK(12, 0) +#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */ +#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 4, 0, 1, 4) + +#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0) +#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\ + FIELD_PREP(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x) +#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_GET(x)\ + FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */ +#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 8, 0, 1, 4) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */ +#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 12, 0, 1, 4) + +#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0) +#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\ + FIELD_PREP(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x) +#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_GET(x)\ + FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */ +#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 16, 0, 1, 4) + +#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0) +#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\ + FIELD_PREP(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x) +#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_GET(x)\ + FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x) + +/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */ +#define PTP_NTP_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 20, 0, 1, 4) + +/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */ +#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 24, 0, 1, 4) + +#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0) +#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\ + FIELD_PREP(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x) +#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_GET(x)\ + FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x) + +/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */ +#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 28, 0, 1, 4) + +#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0) +#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\ + FIELD_PREP(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x) +#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_GET(x)\ + FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x) + +/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */ +#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 32, 0, 1, 4) + +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\ + FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_GET(x)\ + FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x) + +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG GENMASK(2, 0) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_SET(x)\ + FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_GET(x)\ + FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x) + +/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */ +#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 0, 0, 1, 4) + +#define PTP_PHAD_CTRL_PHAD_ENA BIT(7) +#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_PHAD_ENA, x) +#define PTP_PHAD_CTRL_PHAD_ENA_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_PHAD_ENA, x) + +#define PTP_PHAD_CTRL_PHAD_FAILED BIT(6) +#define PTP_PHAD_CTRL_PHAD_FAILED_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_PHAD_FAILED, x) +#define PTP_PHAD_CTRL_PHAD_FAILED_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_PHAD_FAILED, x) + +#define PTP_PHAD_CTRL_REDUCED_RES GENMASK(5, 3) +#define PTP_PHAD_CTRL_REDUCED_RES_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_REDUCED_RES, x) +#define PTP_PHAD_CTRL_REDUCED_RES_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_REDUCED_RES, x) + +#define PTP_PHAD_CTRL_LOCK_ACC GENMASK(2, 0) +#define PTP_PHAD_CTRL_LOCK_ACC_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_LOCK_ACC, x) +#define PTP_PHAD_CTRL_LOCK_ACC_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x) + +/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */ +#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 4, 0, 1, 4) + +/* QFWD:SYSTEM:SWITCH_PORT_MODE */ +#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4) + +#define QFWD_SWITCH_PORT_MODE_PORT_ENA BIT(19) +#define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_PORT_ENA, x) +#define QFWD_SWITCH_PORT_MODE_PORT_ENA_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_PORT_ENA, x) + +#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY GENMASK(18, 10) +#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x) +#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x) + +#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD GENMASK(9, 6) +#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x) +#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x) + +#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(5) +#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x) +#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x) + +#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING BIT(4) +#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x) +#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x) + +#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING BIT(3) +#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x) +#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x) + +#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE BIT(2) +#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x) +#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x) + +#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS BIT(1) +#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x) +#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x) + +#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE BIT(0) +#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x) +#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x) + +/* QRES:RES_CTRL:RES_CFG */ +#define QRES_RES_CFG(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 0, 0, 1, 4) + +#define QRES_RES_CFG_WM_HIGH GENMASK(11, 0) +#define QRES_RES_CFG_WM_HIGH_SET(x)\ + FIELD_PREP(QRES_RES_CFG_WM_HIGH, x) +#define QRES_RES_CFG_WM_HIGH_GET(x)\ + FIELD_GET(QRES_RES_CFG_WM_HIGH, x) + +/* QRES:RES_CTRL:RES_STAT */ +#define QRES_RES_STAT(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 4, 0, 1, 4) + +#define QRES_RES_STAT_MAXUSE GENMASK(20, 0) +#define QRES_RES_STAT_MAXUSE_SET(x)\ + FIELD_PREP(QRES_RES_STAT_MAXUSE, x) +#define QRES_RES_STAT_MAXUSE_GET(x)\ + FIELD_GET(QRES_RES_STAT_MAXUSE, x) + +/* QRES:RES_CTRL:RES_STAT_CUR */ +#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 8, 0, 1, 4) + +#define QRES_RES_STAT_CUR_INUSE GENMASK(20, 0) +#define QRES_RES_STAT_CUR_INUSE_SET(x)\ + FIELD_PREP(QRES_RES_STAT_CUR_INUSE, x) +#define QRES_RES_STAT_CUR_INUSE_GET(x)\ + FIELD_GET(QRES_RES_STAT_CUR_INUSE, x) + +/* DEVCPU_QS:XTR:XTR_GRP_CFG */ +#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4) + +#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2) +#define QS_XTR_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_MODE, x) +#define QS_XTR_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_MODE, x) + +#define QS_XTR_GRP_CFG_STATUS_WORD_POS BIT(1) +#define QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_STATUS_WORD_POS, x) +#define QS_XTR_GRP_CFG_STATUS_WORD_POS_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_STATUS_WORD_POS, x) + +#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x) +#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:XTR:XTR_RD */ +#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4) + +/* DEVCPU_QS:XTR:XTR_FLUSH */ +#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4) + +#define QS_XTR_FLUSH_FLUSH GENMASK(1, 0) +#define QS_XTR_FLUSH_FLUSH_SET(x)\ + FIELD_PREP(QS_XTR_FLUSH_FLUSH, x) +#define QS_XTR_FLUSH_FLUSH_GET(x)\ + FIELD_GET(QS_XTR_FLUSH_FLUSH, x) + +/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */ +#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4) + +#define QS_XTR_DATA_PRESENT_DATA_PRESENT GENMASK(1, 0) +#define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\ + FIELD_PREP(QS_XTR_DATA_PRESENT_DATA_PRESENT, x) +#define QS_XTR_DATA_PRESENT_DATA_PRESENT_GET(x)\ + FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x) + +/* DEVCPU_QS:INJ:INJ_GRP_CFG */ +#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4) + +#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2) +#define QS_INJ_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_MODE, x) +#define QS_INJ_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_MODE, x) + +#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x) +#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:INJ:INJ_WR */ +#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4) + +/* DEVCPU_QS:INJ:INJ_CTRL */ +#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4) + +#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21) +#define QS_INJ_CTRL_GAP_SIZE_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x) +#define QS_INJ_CTRL_GAP_SIZE_GET(x)\ + FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x) + +#define QS_INJ_CTRL_ABORT BIT(20) +#define QS_INJ_CTRL_ABORT_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_ABORT, x) +#define QS_INJ_CTRL_ABORT_GET(x)\ + FIELD_GET(QS_INJ_CTRL_ABORT, x) + +#define QS_INJ_CTRL_EOF BIT(19) +#define QS_INJ_CTRL_EOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_EOF, x) +#define QS_INJ_CTRL_EOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_EOF, x) + +#define QS_INJ_CTRL_SOF BIT(18) +#define QS_INJ_CTRL_SOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_SOF, x) +#define QS_INJ_CTRL_SOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_SOF, x) + +#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16) +#define QS_INJ_CTRL_VLD_BYTES_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x) +#define QS_INJ_CTRL_VLD_BYTES_GET(x)\ + FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x) + +/* DEVCPU_QS:INJ:INJ_STATUS */ +#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4) + +#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4) +#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x) +#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\ + FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x) + +#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2) +#define QS_INJ_STATUS_FIFO_RDY_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x) +#define QS_INJ_STATUS_FIFO_RDY_GET(x)\ + FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x) + +#define QS_INJ_STATUS_INJ_IN_PROGRESS GENMASK(1, 0) +#define QS_INJ_STATUS_INJ_IN_PROGRESS_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_INJ_IN_PROGRESS, x) +#define QS_INJ_STATUS_INJ_IN_PROGRESS_GET(x)\ + FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x) + +/* QSYS:PAUSE_CFG:PAUSE_CFG */ +#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 0, r, 70, 4) + +#define QSYS_PAUSE_CFG_PAUSE_START GENMASK(25, 14) +#define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\ + FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_START, x) +#define QSYS_PAUSE_CFG_PAUSE_START_GET(x)\ + FIELD_GET(QSYS_PAUSE_CFG_PAUSE_START, x) + +#define QSYS_PAUSE_CFG_PAUSE_STOP GENMASK(13, 2) +#define QSYS_PAUSE_CFG_PAUSE_STOP_SET(x)\ + FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_STOP, x) +#define QSYS_PAUSE_CFG_PAUSE_STOP_GET(x)\ + FIELD_GET(QSYS_PAUSE_CFG_PAUSE_STOP, x) + +#define QSYS_PAUSE_CFG_PAUSE_ENA BIT(1) +#define QSYS_PAUSE_CFG_PAUSE_ENA_SET(x)\ + FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_ENA, x) +#define QSYS_PAUSE_CFG_PAUSE_ENA_GET(x)\ + FIELD_GET(QSYS_PAUSE_CFG_PAUSE_ENA, x) + +#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA BIT(0) +#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_SET(x)\ + FIELD_PREP(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x) +#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_GET(x)\ + FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x) + +/* QSYS:PAUSE_CFG:ATOP */ +#define QSYS_ATOP(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 284, r, 70, 4) + +#define QSYS_ATOP_ATOP GENMASK(11, 0) +#define QSYS_ATOP_ATOP_SET(x)\ + FIELD_PREP(QSYS_ATOP_ATOP, x) +#define QSYS_ATOP_ATOP_GET(x)\ + FIELD_GET(QSYS_ATOP_ATOP, x) + +/* QSYS:PAUSE_CFG:FWD_PRESSURE */ +#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 564, r, 70, 4) + +#define QSYS_FWD_PRESSURE_FWD_PRESSURE GENMASK(11, 1) +#define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\ + FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE, x) +#define QSYS_FWD_PRESSURE_FWD_PRESSURE_GET(x)\ + FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE, x) + +#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS BIT(0) +#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(x)\ + FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x) +#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_GET(x)\ + FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x) + +/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */ +#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4) + +#define QSYS_ATOP_TOT_CFG_ATOP_TOT GENMASK(11, 0) +#define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\ + FIELD_PREP(QSYS_ATOP_TOT_CFG_ATOP_TOT, x) +#define QSYS_ATOP_TOT_CFG_ATOP_TOT_GET(x)\ + FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x) + +/* QSYS:CALCFG:CAL_AUTO */ +#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 0, r, 7, 4) + +#define QSYS_CAL_AUTO_CAL_AUTO GENMASK(29, 0) +#define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\ + FIELD_PREP(QSYS_CAL_AUTO_CAL_AUTO, x) +#define QSYS_CAL_AUTO_CAL_AUTO_GET(x)\ + FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x) + +/* QSYS:CALCFG:CAL_CTRL */ +#define QSYS_CAL_CTRL __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4) + +#define QSYS_CAL_CTRL_CAL_MODE GENMASK(14, 11) +#define QSYS_CAL_CTRL_CAL_MODE_SET(x)\ + FIELD_PREP(QSYS_CAL_CTRL_CAL_MODE, x) +#define QSYS_CAL_CTRL_CAL_MODE_GET(x)\ + FIELD_GET(QSYS_CAL_CTRL_CAL_MODE, x) + +#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE GENMASK(10, 1) +#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(x)\ + FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x) +#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_GET(x)\ + FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x) + +#define QSYS_CAL_CTRL_CAL_AUTO_ERROR BIT(0) +#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_SET(x)\ + FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x) +#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(x)\ + FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x) + +/* QSYS:RAM_CTRL:RAM_INIT */ +#define QSYS_RAM_INIT __REG(TARGET_QSYS, 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4) + +#define QSYS_RAM_INIT_RAM_INIT BIT(1) +#define QSYS_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(QSYS_RAM_INIT_RAM_INIT, x) +#define QSYS_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(QSYS_RAM_INIT_RAM_INIT, x) + +#define QSYS_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define QSYS_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(QSYS_RAM_INIT_RAM_CFG_HOOK, x) +#define QSYS_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x) + +/* REW:COMMON:OWN_UPSID */ +#define REW_OWN_UPSID(r) __REG(TARGET_REW, 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4) + +#define REW_OWN_UPSID_OWN_UPSID GENMASK(4, 0) +#define REW_OWN_UPSID_OWN_UPSID_SET(x)\ + FIELD_PREP(REW_OWN_UPSID_OWN_UPSID, x) +#define REW_OWN_UPSID_OWN_UPSID_GET(x)\ + FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x) + +/* REW:PORT:PORT_VLAN_CFG */ +#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 0, 0, 1, 4) + +#define REW_PORT_VLAN_CFG_PORT_PCP GENMASK(15, 13) +#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_PCP, x) +#define REW_PORT_VLAN_CFG_PORT_PCP_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_PCP, x) + +#define REW_PORT_VLAN_CFG_PORT_DEI BIT(12) +#define REW_PORT_VLAN_CFG_PORT_DEI_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_DEI, x) +#define REW_PORT_VLAN_CFG_PORT_DEI_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_DEI, x) + +#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0) +#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x) +#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x) + +/* REW:PORT:TAG_CTRL */ +#define REW_TAG_CTRL(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 132, 0, 1, 4) + +#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED BIT(13) +#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x) +#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x) + +#define REW_TAG_CTRL_TAG_CFG GENMASK(12, 11) +#define REW_TAG_CTRL_TAG_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_CFG, x) +#define REW_TAG_CTRL_TAG_CFG_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_CFG, x) + +#define REW_TAG_CTRL_TAG_TPID_CFG GENMASK(10, 8) +#define REW_TAG_CTRL_TAG_TPID_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_TPID_CFG, x) +#define REW_TAG_CTRL_TAG_TPID_CFG_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_TPID_CFG, x) + +#define REW_TAG_CTRL_TAG_VID_CFG GENMASK(7, 6) +#define REW_TAG_CTRL_TAG_VID_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_VID_CFG, x) +#define REW_TAG_CTRL_TAG_VID_CFG_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_VID_CFG, x) + +#define REW_TAG_CTRL_TAG_PCP_CFG GENMASK(5, 3) +#define REW_TAG_CTRL_TAG_PCP_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_PCP_CFG, x) +#define REW_TAG_CTRL_TAG_PCP_CFG_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_PCP_CFG, x) + +#define REW_TAG_CTRL_TAG_DEI_CFG GENMASK(2, 0) +#define REW_TAG_CTRL_TAG_DEI_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_DEI_CFG, x) +#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x) + +/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */ +#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4) + +#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x) + +#define REW_PTP_TWOSTEP_CTRL_PTP_NXT BIT(11) +#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x) + +#define REW_PTP_TWOSTEP_CTRL_PTP_VLD BIT(10) +#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x) + +#define REW_PTP_TWOSTEP_CTRL_STAMP_TX BIT(9) +#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x) +#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x) + +#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1) +#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x) +#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x) + +#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL BIT(0) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x) + +/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */ +#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4) + +#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0) +#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x) +#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x) + +/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */ +#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4) + +#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0) +#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x) +#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x) + +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */ +#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4) + +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */ +#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4) + +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */ +#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4) + +#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0) +#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\ + FIELD_PREP(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x) +#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_GET(x)\ + FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x) + +/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */ +#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4) + +#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2) +#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\ + FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_OFS, x) +#define REW_PTP_GEN_STAMP_FMT_RT_OFS_GET(x)\ + FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_OFS, x) + +#define REW_PTP_GEN_STAMP_FMT_RT_FMT GENMASK(1, 0) +#define REW_PTP_GEN_STAMP_FMT_RT_FMT_SET(x)\ + FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_FMT, x) +#define REW_PTP_GEN_STAMP_FMT_RT_FMT_GET(x)\ + FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x) + +/* REW:RAM_CTRL:RAM_INIT */ +#define REW_RAM_INIT __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4) + +#define REW_RAM_INIT_RAM_INIT BIT(1) +#define REW_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(REW_RAM_INIT_RAM_INIT, x) +#define REW_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(REW_RAM_INIT_RAM_INIT, x) + +#define REW_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define REW_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(REW_RAM_INIT_RAM_CFG_HOOK, x) +#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x) + +/* VCAP_SUPER:RAM_CTRL:RAM_INIT */ +#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4) + +#define VCAP_SUPER_RAM_INIT_RAM_INIT BIT(1) +#define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_INIT, x) +#define VCAP_SUPER_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_INIT, x) + +#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x) +#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x) + +/* VOP:RAM_CTRL:RAM_INIT */ +#define VOP_RAM_INIT __REG(TARGET_VOP, 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4) + +#define VOP_RAM_INIT_RAM_INIT BIT(1) +#define VOP_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(VOP_RAM_INIT_RAM_INIT, x) +#define VOP_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(VOP_RAM_INIT_RAM_INIT, x) + +#define VOP_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define VOP_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(VOP_RAM_INIT_RAM_CFG_HOOK, x) +#define VOP_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x) + +/* XQS:SYSTEM:STAT_CFG */ +#define XQS_STAT_CFG __REG(TARGET_XQS, 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4) + +#define XQS_STAT_CFG_STAT_CLEAR_SHOT GENMASK(21, 18) +#define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\ + FIELD_PREP(XQS_STAT_CFG_STAT_CLEAR_SHOT, x) +#define XQS_STAT_CFG_STAT_CLEAR_SHOT_GET(x)\ + FIELD_GET(XQS_STAT_CFG_STAT_CLEAR_SHOT, x) + +#define XQS_STAT_CFG_STAT_VIEW GENMASK(17, 5) +#define XQS_STAT_CFG_STAT_VIEW_SET(x)\ + FIELD_PREP(XQS_STAT_CFG_STAT_VIEW, x) +#define XQS_STAT_CFG_STAT_VIEW_GET(x)\ + FIELD_GET(XQS_STAT_CFG_STAT_VIEW, x) + +#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY BIT(4) +#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_SET(x)\ + FIELD_PREP(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x) +#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_GET(x)\ + FIELD_GET(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x) + +#define XQS_STAT_CFG_STAT_WRAP_DIS GENMASK(3, 0) +#define XQS_STAT_CFG_STAT_WRAP_DIS_SET(x)\ + FIELD_PREP(XQS_STAT_CFG_STAT_WRAP_DIS, x) +#define XQS_STAT_CFG_STAT_WRAP_DIS_GET(x)\ + FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x) + +/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */ +#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 0, 0, 1, 4) + +#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP GENMASK(14, 0) +#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\ + FIELD_PREP(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x) +#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_GET(x)\ + FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x) + +/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */ +#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 4, 0, 1, 4) + +#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP GENMASK(14, 0) +#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\ + FIELD_PREP(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x) +#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_GET(x)\ + FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x) + +/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */ +#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 8, 0, 1, 4) + +#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP GENMASK(14, 0) +#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\ + FIELD_PREP(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x) +#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_GET(x)\ + FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x) + +/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */ +#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 12, 0, 1, 4) + +#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM GENMASK(14, 0) +#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\ + FIELD_PREP(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x) +#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_GET(x)\ + FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x) + +/* XQS:STAT:CNT */ +#define XQS_CNT(g) __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4) + +#endif /* _SPARX5_MAIN_REGS_H_ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c new file mode 100644 index 000000000..d07815658 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" +#include "sparx5_tc.h" + +/* The IFH bit position of the first VSTAX bit. This is because the + * VSTAX bit positions in Data sheet is starting from zero. + */ +#define VSTAX 73 + +#define ifh_encode_bitfield(ifh, value, pos, _width) \ + ({ \ + u32 width = (_width); \ + \ + /* Max width is 5 bytes - 40 bits. In worst case this will + * spread over 6 bytes - 48 bits + */ \ + compiletime_assert(width <= 40, \ + "Unsupported width, must be <= 40"); \ + __ifh_encode_bitfield((ifh), (value), (pos), width); \ + }) + +static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width) +{ + u8 *ifh_hdr = ifh; + /* Calculate the Start IFH byte position of this IFH bit position */ + u32 byte = (35 - (pos / 8)); + /* Calculate the Start bit position in the Start IFH byte */ + u32 bit = (pos % 8); + u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit); + + /* The b0-b7 goes into the start IFH byte */ + if (encode & 0xFF) + ifh_hdr[byte] |= (u8)((encode & 0xFF)); + /* The b8-b15 goes into the next IFH byte */ + if (encode & 0xFF00) + ifh_hdr[byte - 1] |= (u8)((encode & 0xFF00) >> 8); + /* The b16-b23 goes into the next IFH byte */ + if (encode & 0xFF0000) + ifh_hdr[byte - 2] |= (u8)((encode & 0xFF0000) >> 16); + /* The b24-b31 goes into the next IFH byte */ + if (encode & 0xFF000000) + ifh_hdr[byte - 3] |= (u8)((encode & 0xFF000000) >> 24); + /* The b32-b39 goes into the next IFH byte */ + if (encode & 0xFF00000000) + ifh_hdr[byte - 4] |= (u8)((encode & 0xFF00000000) >> 32); + /* The b40-b47 goes into the next IFH byte */ + if (encode & 0xFF0000000000) + ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40); +} + +void sparx5_set_port_ifh(void *ifh_hdr, u16 portno) +{ + /* VSTAX.RSV = 1. MSBit must be 1 */ + ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1); + /* VSTAX.INGR_DROP_MODE = Enable. Don't make head-of-line blocking */ + ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 55, 1); + /* MISC.CPU_MASK/DPORT = Destination port */ + ifh_encode_bitfield(ifh_hdr, portno, 29, 8); + /* MISC.PIPELINE_PT */ + ifh_encode_bitfield(ifh_hdr, 16, 37, 5); + /* MISC.PIPELINE_ACT */ + ifh_encode_bitfield(ifh_hdr, 1, 42, 3); + /* FWD.SRC_PORT = CPU */ + ifh_encode_bitfield(ifh_hdr, SPX5_PORT_CPU, 46, 7); + /* FWD.SFLOW_ID (disable SFlow sampling) */ + ifh_encode_bitfield(ifh_hdr, 124, 57, 7); + /* FWD.UPDATE_FCS = Enable. Enforce update of FCS. */ + ifh_encode_bitfield(ifh_hdr, 1, 67, 1); +} + +void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op) +{ + ifh_encode_bitfield(ifh_hdr, rew_op, VSTAX + 32, 10); +} + +void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type) +{ + ifh_encode_bitfield(ifh_hdr, pdu_type, 191, 4); +} + +void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset) +{ + ifh_encode_bitfield(ifh_hdr, pdu_w16_offset, 195, 6); +} + +void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp) +{ + ifh_encode_bitfield(ifh_hdr, timestamp, 232, 40); +} + +static int sparx5_port_open(struct net_device *ndev) +{ + struct sparx5_port *port = netdev_priv(ndev); + int err = 0; + + sparx5_port_enable(port, true); + err = phylink_of_phy_connect(port->phylink, port->of_node, 0); + if (err) { + netdev_err(ndev, "Could not attach to PHY\n"); + goto err_connect; + } + + phylink_start(port->phylink); + + if (!ndev->phydev) { + /* power up serdes */ + port->conf.power_down = false; + if (port->conf.serdes_reset) + err = sparx5_serdes_set(port->sparx5, port, &port->conf); + else + err = phy_power_on(port->serdes); + if (err) { + netdev_err(ndev, "%s failed\n", __func__); + goto out_power; + } + } + + return 0; + +out_power: + phylink_stop(port->phylink); + phylink_disconnect_phy(port->phylink); +err_connect: + sparx5_port_enable(port, false); + + return err; +} + +static int sparx5_port_stop(struct net_device *ndev) +{ + struct sparx5_port *port = netdev_priv(ndev); + int err = 0; + + sparx5_port_enable(port, false); + phylink_stop(port->phylink); + phylink_disconnect_phy(port->phylink); + + if (!ndev->phydev) { + /* power down serdes */ + port->conf.power_down = true; + if (port->conf.serdes_reset) + err = sparx5_serdes_set(port->sparx5, port, &port->conf); + else + err = phy_power_off(port->serdes); + if (err) + netdev_err(ndev, "%s failed\n", __func__); + } + return 0; +} + +static void sparx5_set_rx_mode(struct net_device *dev) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + + if (!test_bit(port->portno, sparx5->bridge_mask)) + __dev_mc_sync(dev, sparx5_mc_sync, sparx5_mc_unsync); +} + +static int sparx5_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct sparx5_port *port = netdev_priv(dev); + int ret; + + ret = snprintf(buf, len, "p%d", port->portno); + if (ret >= len) + return -EINVAL; + + return 0; +} + +static int sparx5_set_mac_address(struct net_device *dev, void *p) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + const struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* Remove current */ + sparx5_mact_forget(sparx5, dev->dev_addr, port->pvid); + + /* Add new */ + sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid); + + /* Record the address */ + eth_hw_addr_set(dev, addr->sa_data); + + return 0; +} + +static int sparx5_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct sparx5_port *sparx5_port = netdev_priv(dev); + struct sparx5 *sparx5 = sparx5_port->sparx5; + + ppid->id_len = sizeof(sparx5->base_mac); + memcpy(&ppid->id, &sparx5->base_mac, ppid->id_len); + + return 0; +} + +static int sparx5_port_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct sparx5_port *sparx5_port = netdev_priv(dev); + struct sparx5 *sparx5 = sparx5_port->sparx5; + + if (!phy_has_hwtstamp(dev->phydev) && sparx5->ptp) { + switch (cmd) { + case SIOCSHWTSTAMP: + return sparx5_ptp_hwtstamp_set(sparx5_port, ifr); + case SIOCGHWTSTAMP: + return sparx5_ptp_hwtstamp_get(sparx5_port, ifr); + } + } + + return phy_mii_ioctl(dev->phydev, ifr, cmd); +} + +static const struct net_device_ops sparx5_port_netdev_ops = { + .ndo_open = sparx5_port_open, + .ndo_stop = sparx5_port_stop, + .ndo_start_xmit = sparx5_port_xmit_impl, + .ndo_set_rx_mode = sparx5_set_rx_mode, + .ndo_get_phys_port_name = sparx5_port_get_phys_port_name, + .ndo_set_mac_address = sparx5_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats64 = sparx5_get_stats64, + .ndo_get_port_parent_id = sparx5_get_port_parent_id, + .ndo_eth_ioctl = sparx5_port_ioctl, + .ndo_setup_tc = sparx5_port_setup_tc, +}; + +bool sparx5_netdevice_check(const struct net_device *dev) +{ + return dev && (dev->netdev_ops == &sparx5_port_netdev_ops); +} + +struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno) +{ + struct sparx5_port *spx5_port; + struct net_device *ndev; + + ndev = devm_alloc_etherdev_mqs(sparx5->dev, sizeof(struct sparx5_port), + SPX5_PRIOS, 1); + if (!ndev) + return ERR_PTR(-ENOMEM); + + ndev->hw_features |= NETIF_F_HW_TC; + ndev->features |= NETIF_F_HW_TC; + + SET_NETDEV_DEV(ndev, sparx5->dev); + spx5_port = netdev_priv(ndev); + spx5_port->ndev = ndev; + spx5_port->sparx5 = sparx5; + spx5_port->portno = portno; + + ndev->netdev_ops = &sparx5_port_netdev_ops; + ndev->ethtool_ops = &sparx5_ethtool_ops; + + eth_hw_addr_gen(ndev, sparx5->base_mac, portno + 1); + + return ndev; +} + +int sparx5_register_netdevs(struct sparx5 *sparx5) +{ + int portno; + int err; + + for (portno = 0; portno < SPX5_PORTS; portno++) + if (sparx5->ports[portno]) { + err = register_netdev(sparx5->ports[portno]->ndev); + if (err) { + dev_err(sparx5->dev, + "port: %02u: netdev registration failed\n", + portno); + return err; + } + sparx5_port_inj_timer_setup(sparx5->ports[portno]); + } + return 0; +} + +void sparx5_destroy_netdevs(struct sparx5 *sparx5) +{ + struct sparx5_port *port; + int portno; + + for (portno = 0; portno < SPX5_PORTS; portno++) { + port = sparx5->ports[portno]; + if (port && port->phylink) { + /* Disconnect the phy */ + rtnl_lock(); + sparx5_port_stop(port->ndev); + phylink_disconnect_phy(port->phylink); + rtnl_unlock(); + phylink_destroy(port->phylink); + port->phylink = NULL; + } + } +} + +void sparx5_unregister_netdevs(struct sparx5 *sparx5) +{ + int portno; + + for (portno = 0; portno < SPX5_PORTS; portno++) + if (sparx5->ports[portno]) + unregister_netdev(sparx5->ports[portno]->ndev); +} + diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c new file mode 100644 index 000000000..6db6ac6a3 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +#define XTR_EOF_0 ntohl((__force __be32)0x80000000u) +#define XTR_EOF_1 ntohl((__force __be32)0x80000001u) +#define XTR_EOF_2 ntohl((__force __be32)0x80000002u) +#define XTR_EOF_3 ntohl((__force __be32)0x80000003u) +#define XTR_PRUNED ntohl((__force __be32)0x80000004u) +#define XTR_ABORT ntohl((__force __be32)0x80000005u) +#define XTR_ESCAPE ntohl((__force __be32)0x80000006u) +#define XTR_NOT_READY ntohl((__force __be32)0x80000007u) + +#define XTR_VALID_BYTES(x) (4 - ((x) & 3)) + +#define INJ_TIMEOUT_NS 50000 + +void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) +{ + /* Start flush */ + spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH); + + /* Allow to drain */ + mdelay(1); + + /* All Queues normal */ + spx5_wr(0, sparx5, QS_XTR_FLUSH); +} + +void sparx5_ifh_parse(u32 *ifh, struct frame_info *info) +{ + u8 *xtr_hdr = (u8 *)ifh; + + /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */ + u32 fwd = + ((u32)xtr_hdr[27] << 24) | + ((u32)xtr_hdr[28] << 16) | + ((u32)xtr_hdr[29] << 8) | + ((u32)xtr_hdr[30] << 0); + fwd = (fwd >> 5); + info->src_port = FIELD_GET(GENMASK(7, 1), fwd); + + info->timestamp = + ((u64)xtr_hdr[2] << 24) | + ((u64)xtr_hdr[3] << 16) | + ((u64)xtr_hdr[4] << 8) | + ((u64)xtr_hdr[5] << 0); +} + +static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) +{ + bool eof_flag = false, pruned_flag = false, abort_flag = false; + struct net_device *netdev; + struct sparx5_port *port; + struct frame_info fi; + int i, byte_cnt = 0; + struct sk_buff *skb; + u32 ifh[IFH_LEN]; + u32 *rxbuf; + + /* Get IFH */ + for (i = 0; i < IFH_LEN; i++) + ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp)); + + /* Decode IFH (whats needed) */ + sparx5_ifh_parse(ifh, &fi); + + /* Map to port netdev */ + port = fi.src_port < SPX5_PORTS ? + sparx5->ports[fi.src_port] : NULL; + if (!port || !port->ndev) { + dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); + sparx5_xtr_flush(sparx5, grp); + return; + } + + /* Have netdev, get skb */ + netdev = port->ndev; + skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN); + if (!skb) { + sparx5_xtr_flush(sparx5, grp); + dev_err(sparx5->dev, "No skb allocated\n"); + netdev->stats.rx_dropped++; + return; + } + rxbuf = (u32 *)skb->data; + + /* Now, pull frame data */ + while (!eof_flag) { + u32 val = spx5_rd(sparx5, QS_XTR_RD(grp)); + u32 cmp = val; + + if (byte_swap) + cmp = ntohl((__force __be32)val); + + switch (cmp) { + case XTR_NOT_READY: + break; + case XTR_ABORT: + /* No accompanying data */ + abort_flag = true; + eof_flag = true; + break; + case XTR_EOF_0: + case XTR_EOF_1: + case XTR_EOF_2: + case XTR_EOF_3: + /* This assumes STATUS_WORD_POS == 1, Status + * just after last data + */ + if (!byte_swap) + val = ntohl((__force __be32)val); + byte_cnt -= (4 - XTR_VALID_BYTES(val)); + eof_flag = true; + break; + case XTR_PRUNED: + /* But get the last 4 bytes as well */ + eof_flag = true; + pruned_flag = true; + fallthrough; + case XTR_ESCAPE: + *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp)); + byte_cnt += 4; + rxbuf++; + break; + default: + *rxbuf = val; + byte_cnt += 4; + rxbuf++; + } + } + + if (abort_flag || pruned_flag || !eof_flag) { + netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n", + abort_flag, pruned_flag, eof_flag); + kfree_skb(skb); + netdev->stats.rx_dropped++; + return; + } + + /* Everything we see on an interface that is in the HW bridge + * has already been forwarded + */ + if (test_bit(port->portno, sparx5->bridge_mask)) + skb->offload_fwd_mark = 1; + + /* Finish up skb */ + skb_put(skb, byte_cnt - ETH_FCS_LEN); + eth_skb_pad(skb); + sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp); + skb->protocol = eth_type_trans(skb, netdev); + netdev->stats.rx_bytes += skb->len; + netdev->stats.rx_packets++; + netif_rx(skb); +} + +static int sparx5_inject(struct sparx5 *sparx5, + u32 *ifh, + struct sk_buff *skb, + struct net_device *ndev) +{ + int grp = INJ_QUEUE; + u32 val, w, count; + u8 *buf; + + val = spx5_rd(sparx5, QS_INJ_STATUS); + if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) { + pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n", + QS_INJ_STATUS_FIFO_RDY_GET(val)); + return -EBUSY; + } + + /* Indicate SOF */ + spx5_wr(QS_INJ_CTRL_SOF_SET(1) | + QS_INJ_CTRL_GAP_SIZE_SET(1), + sparx5, QS_INJ_CTRL(grp)); + + /* Write the IFH to the chip. */ + for (w = 0; w < IFH_LEN; w++) + spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp)); + + /* Write words, round up */ + count = DIV_ROUND_UP(skb->len, 4); + buf = skb->data; + for (w = 0; w < count; w++, buf += 4) { + val = get_unaligned((const u32 *)buf); + spx5_wr(val, sparx5, QS_INJ_WR(grp)); + } + + /* Add padding */ + while (w < (60 / 4)) { + spx5_wr(0, sparx5, QS_INJ_WR(grp)); + w++; + } + + /* Indicate EOF and valid bytes in last word */ + spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | + QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) | + QS_INJ_CTRL_EOF_SET(1), + sparx5, QS_INJ_CTRL(grp)); + + /* Add dummy CRC */ + spx5_wr(0, sparx5, QS_INJ_WR(grp)); + w++; + + val = spx5_rd(sparx5, QS_INJ_STATUS); + if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) { + struct sparx5_port *port = netdev_priv(ndev); + + pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n", + QS_INJ_STATUS_WMARK_REACHED_GET(val)); + netif_stop_queue(ndev); + hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS, + HRTIMER_MODE_REL); + } + + return NETDEV_TX_OK; +} + +netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + u32 ifh[IFH_LEN]; + netdev_tx_t ret; + + memset(ifh, 0, IFH_LEN * 4); + sparx5_set_port_ifh(ifh, port->portno); + + if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + if (sparx5_ptp_txtstamp_request(port, skb) < 0) + return NETDEV_TX_BUSY; + + sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op); + sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type); + sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset); + sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id); + } + + skb_tx_timestamp(skb); + if (sparx5->fdma_irq > 0) + ret = sparx5_fdma_xmit(sparx5, ifh, skb); + else + ret = sparx5_inject(sparx5, ifh, skb, dev); + + if (ret == -EBUSY) + goto busy; + if (ret < 0) + goto drop; + + stats->tx_bytes += skb->len; + stats->tx_packets++; + sparx5->tx.packets++; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + return NETDEV_TX_OK; + + dev_consume_skb_any(skb); + return NETDEV_TX_OK; +drop: + stats->tx_dropped++; + sparx5->tx.dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +busy: + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + sparx5_ptp_txtstamp_release(port, skb); + return NETDEV_TX_BUSY; +} + +static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr) +{ + struct sparx5_port *port = container_of(tmr, struct sparx5_port, + inj_timer); + int grp = INJ_QUEUE; + u32 val; + + val = spx5_rd(port->sparx5, QS_INJ_STATUS); + if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) { + pr_err_ratelimited("Injection: Reset watermark count\n"); + /* Reset Watermark count to restart */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, + port->sparx5, + DSM_DEV_TX_STOP_WM_CFG(port->portno)); + } + netif_wake_queue(port->ndev); + return HRTIMER_NORESTART; +} + +int sparx5_manual_injection_mode(struct sparx5 *sparx5) +{ + const int byte_swap = 1; + int portno; + + /* Change mode to manual extraction and injection */ + spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) | + QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | + QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); + spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) | + QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); + + /* CPU ports capture setup */ + for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { + /* ASM CPU port: No preamble, IFH, enable padding */ + spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | + ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | + ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ + sparx5, ASM_PORT_CFG(portno)); + + /* Reset WM cnt to unclog queued frames */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Set Disassembler Stop Watermark level */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Enable Disassembler buffer underrun watchdog + */ + spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0), + DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, + sparx5, + DSM_BUF_CFG(portno)); + } + return 0; +} + +irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5) +{ + struct sparx5 *s5 = _sparx5; + int poll = 64; + + /* Check data in queue */ + while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0) + sparx5_xtr_grp(s5, XTR_QUEUE, false); + + return IRQ_HANDLED; +} + +void sparx5_port_inj_timer_setup(struct sparx5_port *port) +{ + hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + port->inj_timer.function = sparx5_injection_timeout; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c new file mode 100644 index 000000000..af8b43500 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0+ +#include "sparx5_main.h" + +void sparx5_pgid_init(struct sparx5 *spx5) +{ + int i; + + for (i = 0; i < PGID_TABLE_SIZE; i++) + spx5->pgid_map[i] = SPX5_PGID_FREE; + + /* Reserved for unicast, flood control, broadcast, and CPU. + * These cannot be freed. + */ + for (i = 0; i <= PGID_CPU; i++) + spx5->pgid_map[i] = SPX5_PGID_RESERVED; +} + +int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx) +{ + int i; + + /* The multicast area starts at index 65, but the first 7 + * are reserved for flood masks and CPU. Start alloc after that. + */ + for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) { + if (spx5->pgid_map[i] == SPX5_PGID_FREE) { + spx5->pgid_map[i] = SPX5_PGID_MULTICAST; + *idx = i; + return 0; + } + } + + return -EBUSY; +} + +int sparx5_pgid_free(struct sparx5 *spx5, u16 idx) +{ + if (idx <= PGID_CPU || idx >= PGID_TABLE_SIZE) + return -EINVAL; + + if (spx5->pgid_map[idx] == SPX5_PGID_FREE) + return -EINVAL; + + spx5->pgid_map[idx] = SPX5_PGID_FREE; + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c new file mode 100644 index 000000000..830da0e5f --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/module.h> +#include <linux/phylink.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/sfp.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" + +static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_port_config *b) +{ + if (a->speed != b->speed || + a->portmode != b->portmode || + a->autoneg != b->autoneg || + a->pause_adv != b->pause_adv || + a->power_down != b->power_down || + a->media != b->media) + return true; + return false; +} + +static struct phylink_pcs * +sparx5_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct sparx5_port *port = netdev_priv(to_net_dev(config->dev)); + + return &port->phylink_pcs; +} + +static void sparx5_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + /* Currently not used */ +} + +static void sparx5_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct sparx5_port *port = netdev_priv(to_net_dev(config->dev)); + struct sparx5_port_config conf; + int err; + + conf = port->conf; + conf.duplex = duplex; + conf.pause = 0; + conf.pause |= tx_pause ? MLO_PAUSE_TX : 0; + conf.pause |= rx_pause ? MLO_PAUSE_RX : 0; + conf.speed = speed; + /* Configure the port to speed/duplex/pause */ + err = sparx5_port_config(port->sparx5, port, &conf); + if (err) + netdev_err(port->ndev, "port config failed: %d\n", err); +} + +static void sparx5_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + /* Currently not used */ +} + +static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct sparx5_port, phylink_pcs); +} + +static void sparx5_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct sparx5_port *port = sparx5_pcs_to_port(pcs); + struct sparx5_port_status status; + + sparx5_get_port_status(port->sparx5, port, &status); + state->link = status.link && !status.link_down; + state->an_complete = status.an_complete; + state->speed = status.speed; + state->duplex = status.duplex; + state->pause = status.pause; +} + +static int sparx5_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct sparx5_port *port = sparx5_pcs_to_port(pcs); + struct sparx5_port_config conf; + int ret = 0; + + conf = port->conf; + conf.power_down = false; + conf.portmode = interface; + conf.inband = phylink_autoneg_inband(mode); + conf.autoneg = phylink_test(advertising, Autoneg); + conf.pause_adv = 0; + if (phylink_test(advertising, Pause)) + conf.pause_adv |= ADVERTISE_1000XPAUSE; + if (phylink_test(advertising, Asym_Pause)) + conf.pause_adv |= ADVERTISE_1000XPSE_ASYM; + if (sparx5_is_baser(interface)) { + if (phylink_test(advertising, FIBRE)) + conf.media = PHY_MEDIA_SR; + else + conf.media = PHY_MEDIA_DAC; + } + if (!port_conf_has_changed(&port->conf, &conf)) + return ret; + /* Enable the PCS matching this interface type */ + ret = sparx5_port_pcs_set(port->sparx5, port, &conf); + if (ret) + netdev_err(port->ndev, "port PCS config failed: %d\n", ret); + return ret; +} + +static void sparx5_pcs_aneg_restart(struct phylink_pcs *pcs) +{ + /* Currently not used */ +} + +const struct phylink_pcs_ops sparx5_phylink_pcs_ops = { + .pcs_get_state = sparx5_pcs_get_state, + .pcs_config = sparx5_pcs_config, + .pcs_an_restart = sparx5_pcs_aneg_restart, +}; + +const struct phylink_mac_ops sparx5_phylink_mac_ops = { + .validate = phylink_generic_validate, + .mac_select_pcs = sparx5_phylink_mac_select_pcs, + .mac_config = sparx5_phylink_mac_config, + .mac_link_down = sparx5_phylink_mac_link_down, + .mac_link_up = sparx5_phylink_mac_link_up, +}; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c new file mode 100644 index 000000000..32709d21a --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -0,0 +1,1146 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/module.h> +#include <linux/phy/phy.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" + +#define SPX5_ETYPE_TAG_C 0x8100 +#define SPX5_ETYPE_TAG_S 0x88a8 + +#define SPX5_WAIT_US 1000 +#define SPX5_WAIT_MAX_US 2000 + +enum port_error { + SPX5_PERR_SPEED, + SPX5_PERR_IFTYPE, +}; + +#define PAUSE_DISCARD 0xC +#define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN) + +static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status) +{ + status->an_complete = true; + if (!(lp_abil & LPA_SGMII_LINK)) { + status->link = false; + return; + } + + switch (lp_abil & LPA_SGMII_SPD_MASK) { + case LPA_SGMII_10: + status->speed = SPEED_10; + break; + case LPA_SGMII_100: + status->speed = SPEED_100; + break; + case LPA_SGMII_1000: + status->speed = SPEED_1000; + break; + default: + status->link = false; + return; + } + if (lp_abil & LPA_SGMII_FULL_DUPLEX) + status->duplex = DUPLEX_FULL; + else + status->duplex = DUPLEX_HALF; +} + +static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status) +{ + status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link; + status->an_complete = true; + status->duplex = (ADVERTISE_1000XFULL & lp_abil) ? + DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported + + if ((ld_abil & ADVERTISE_1000XPAUSE) && + (lp_abil & ADVERTISE_1000XPAUSE)) { + status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX; + } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) && + (lp_abil & ADVERTISE_1000XPSE_ASYM)) { + status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ? + MLO_PAUSE_TX : 0; + status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ? + MLO_PAUSE_RX : 0; + } else { + status->pause = MLO_PAUSE_NONE; + } +} + +static int sparx5_get_dev2g5_status(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_status *status) +{ + u32 portno = port->portno; + u16 lp_adv, ld_adv; + u32 value; + + /* Get PCS Link down sticky */ + value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno)); + status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value); + if (status->link_down) /* Clear the sticky */ + spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno)); + + /* Get both current Link and Sync status */ + value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno)); + status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) && + DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value); + + if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX) + status->speed = SPEED_1000; + else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX) + status->speed = SPEED_2500; + + status->duplex = DUPLEX_FULL; + + /* Get PCS ANEG status register */ + value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno)); + + /* Aneg complete provides more information */ + if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) { + lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value); + if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) { + decode_sgmii_word(lp_adv, status); + } else { + value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno)); + ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value); + decode_cl37_word(lp_adv, ld_adv, status); + } + } + return 0; +} + +static int sparx5_get_sfi_status(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_status *status) +{ + bool high_speed_dev = sparx5_is_baser(port->conf.portmode); + u32 portno = port->portno; + u32 value, dev, tinst; + void __iomem *inst; + + if (!high_speed_dev) { + netdev_err(port->ndev, "error: low speed and SFI mode\n"); + return -EINVAL; + } + + dev = sparx5_to_high_dev(portno); + tinst = sparx5_port_dev_index(portno); + inst = spx5_inst_get(sparx5, dev, tinst); + + value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0)); + if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) { + /* The link is or has been down. Clear the sticky bit */ + status->link_down = 1; + spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0)); + value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0)); + } + status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY); + status->duplex = DUPLEX_FULL; + if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER) + status->speed = SPEED_5000; + else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER) + status->speed = SPEED_10000; + else + status->speed = SPEED_25000; + + return 0; +} + +/* Get link status of 1000Base-X/in-band and SFI ports. + */ +int sparx5_get_port_status(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_status *status) +{ + memset(status, 0, sizeof(*status)); + status->speed = port->conf.speed; + if (port->conf.power_down) { + status->link = false; + return 0; + } + switch (port->conf.portmode) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return sparx5_get_dev2g5_status(sparx5, port, status); + case PHY_INTERFACE_MODE_5GBASER: + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_25GBASER: + return sparx5_get_sfi_status(sparx5, port, status); + case PHY_INTERFACE_MODE_NA: + return 0; + default: + netdev_err(port->ndev, "Status not supported"); + return -ENODEV; + } + return 0; +} + +static int sparx5_port_error(struct sparx5_port *port, + struct sparx5_port_config *conf, + enum port_error errtype) +{ + switch (errtype) { + case SPX5_PERR_SPEED: + netdev_err(port->ndev, + "Interface does not support speed: %u: for %s\n", + conf->speed, phy_modes(conf->portmode)); + break; + case SPX5_PERR_IFTYPE: + netdev_err(port->ndev, + "Switch port does not support interface type: %s\n", + phy_modes(conf->portmode)); + break; + default: + netdev_err(port->ndev, + "Interface configuration error\n"); + } + + return -EINVAL; +} + +static int sparx5_port_verify_speed(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + if ((sparx5_port_is_2g5(port->portno) && + conf->speed > SPEED_2500) || + (sparx5_port_is_5g(port->portno) && + conf->speed > SPEED_5000) || + (sparx5_port_is_10g(port->portno) && + conf->speed > SPEED_10000)) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + + switch (conf->portmode) { + case PHY_INTERFACE_MODE_NA: + return -EINVAL; + case PHY_INTERFACE_MODE_1000BASEX: + if (conf->speed != SPEED_1000 || + sparx5_port_is_2g5(port->portno)) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + if (sparx5_port_is_2g5(port->portno)) + return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE); + break; + case PHY_INTERFACE_MODE_2500BASEX: + if (conf->speed != SPEED_2500 || + sparx5_port_is_2g5(port->portno)) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + break; + case PHY_INTERFACE_MODE_QSGMII: + if (port->portno > 47) + return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE); + fallthrough; + case PHY_INTERFACE_MODE_SGMII: + if (conf->speed != SPEED_1000 && + conf->speed != SPEED_100 && + conf->speed != SPEED_10 && + conf->speed != SPEED_2500) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + break; + case PHY_INTERFACE_MODE_5GBASER: + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_25GBASER: + if ((conf->speed != SPEED_5000 && + conf->speed != SPEED_10000 && + conf->speed != SPEED_25000)) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + break; + default: + return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE); + } + return 0; +} + +static bool sparx5_dev_change(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + return sparx5_is_baser(port->conf.portmode) ^ + sparx5_is_baser(conf->portmode); +} + +static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno) +{ + u32 value, resource, prio, delay_cnt = 0; + bool poll_src = true; + char *mem = ""; + + /* Resource == 0: Memory tracked per source (SRC-MEM) + * Resource == 1: Frame references tracked per source (SRC-REF) + * Resource == 2: Memory tracked per destination (DST-MEM) + * Resource == 3: Frame references tracked per destination. (DST-REF) + */ + while (1) { + bool empty = true; + + for (resource = 0; resource < (poll_src ? 2 : 1); resource++) { + u32 base; + + base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno; + for (prio = 0; prio < SPX5_PRIOS; prio++) { + value = spx5_rd(sparx5, + QRES_RES_STAT(base + prio)); + if (value) { + mem = resource == 0 ? + "DST-MEM" : "SRC-MEM"; + empty = false; + } + } + } + + if (empty) + break; + + if (delay_cnt++ == 2000) { + dev_err(sparx5->dev, + "Flush timeout port %u. %s queue not empty\n", + portno, mem); + return -EINVAL; + } + + usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US); + } + return 0; +} + +static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev) +{ + u32 tinst = high_spd_dev ? + sparx5_port_dev_index(port->portno) : port->portno; + u32 dev = high_spd_dev ? + sparx5_to_high_dev(port->portno) : TARGET_DEV2G5; + void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst); + u32 spd = port->conf.speed; + u32 spd_prm; + int err; + + if (high_spd_dev) { + /* 1: Reset the PCS Rx clock domain */ + spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST, + DEV10G_DEV_RST_CTRL_PCS_RX_RST, + devinst, + DEV10G_DEV_RST_CTRL(0)); + + /* 2: Disable MAC frame reception */ + spx5_inst_rmw(0, + DEV10G_MAC_ENA_CFG_RX_ENA, + devinst, + DEV10G_MAC_ENA_CFG(0)); + } else { + /* 1: Reset the PCS Rx clock domain */ + spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, + DEV2G5_DEV_RST_CTRL_PCS_RX_RST, + devinst, + DEV2G5_DEV_RST_CTRL(0)); + /* 2: Disable MAC frame reception */ + spx5_inst_rmw(0, + DEV2G5_MAC_ENA_CFG_RX_ENA, + devinst, + DEV2G5_MAC_ENA_CFG(0)); + } + /* 3: Disable traffic being sent to or from switch port->portno */ + spx5_rmw(0, + QFWD_SWITCH_PORT_MODE_PORT_ENA, + sparx5, + QFWD_SWITCH_PORT_MODE(port->portno)); + + /* 4: Disable dequeuing from the egress queues */ + spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS, + HSCH_PORT_MODE_DEQUEUE_DIS, + sparx5, + HSCH_PORT_MODE(port->portno)); + + /* 5: Disable Flowcontrol */ + spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1), + QSYS_PAUSE_CFG_PAUSE_STOP, + sparx5, + QSYS_PAUSE_CFG(port->portno)); + + spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10; + /* 6: Wait while the last frame is exiting the queues */ + usleep_range(8 * spd_prm, 10 * spd_prm); + + /* 7: Flush the queues accociated with the port->portno */ + spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) | + HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) | + HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) | + HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1), + HSCH_FLUSH_CTRL_FLUSH_PORT | + HSCH_FLUSH_CTRL_FLUSH_DST | + HSCH_FLUSH_CTRL_FLUSH_SRC | + HSCH_FLUSH_CTRL_FLUSH_ENA, + sparx5, + HSCH_FLUSH_CTRL); + + /* 8: Enable dequeuing from the egress queues */ + spx5_rmw(0, + HSCH_PORT_MODE_DEQUEUE_DIS, + sparx5, + HSCH_PORT_MODE(port->portno)); + + /* 9: Wait until flushing is complete */ + err = sparx5_port_flush_poll(sparx5, port->portno); + if (err) + return err; + + /* 10: Reset the MAC clock domain */ + if (high_spd_dev) { + spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) | + DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) | + DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1), + DEV10G_DEV_RST_CTRL_PCS_TX_RST | + DEV10G_DEV_RST_CTRL_MAC_RX_RST | + DEV10G_DEV_RST_CTRL_MAC_TX_RST, + devinst, + DEV10G_DEV_RST_CTRL(0)); + + } else { + spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) | + DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) | + DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) | + DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) | + DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1), + DEV2G5_DEV_RST_CTRL_SPEED_SEL | + DEV2G5_DEV_RST_CTRL_PCS_TX_RST | + DEV2G5_DEV_RST_CTRL_PCS_RX_RST | + DEV2G5_DEV_RST_CTRL_MAC_TX_RST | + DEV2G5_DEV_RST_CTRL_MAC_RX_RST, + devinst, + DEV2G5_DEV_RST_CTRL(0)); + } + /* 11: Clear flushing */ + spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) | + HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0), + HSCH_FLUSH_CTRL_FLUSH_PORT | + HSCH_FLUSH_CTRL_FLUSH_ENA, + sparx5, + HSCH_FLUSH_CTRL); + + if (high_spd_dev) { + u32 pcs = sparx5_to_pcs_dev(port->portno); + void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst); + + /* 12: Disable 5G/10G/25 BaseR PCS */ + spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0), + PCS10G_BR_PCS_CFG_PCS_ENA, + pcsinst, + PCS10G_BR_PCS_CFG(0)); + + if (sparx5_port_is_25g(port->portno)) + /* Disable 25G PCS */ + spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0), + DEV25G_PCS25G_CFG_PCS25G_ENA, + sparx5, + DEV25G_PCS25G_CFG(tinst)); + } else { + /* 12: Disable 1G PCS */ + spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0), + DEV2G5_PCS1G_CFG_PCS_ENA, + sparx5, + DEV2G5_PCS1G_CFG(port->portno)); + } + + /* The port is now flushed and disabled */ + return 0; +} + +static int sparx5_port_fifo_sz(struct sparx5 *sparx5, + u32 portno, u32 speed) +{ + u32 sys_clk = sparx5_clk_period(sparx5->coreclock); + const u32 taxi_dist[SPX5_PORTS_ALL] = { + 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10, + 4, 4, 4, 4, + 11, 12, 13, 14, 15, 16, 17, 18, + 11, 12, 13, 14, 15, 16, 17, 18, + 11, 12, 13, 14, 15, 16, 17, 18, + 11, 12, 13, 14, 15, 16, 17, 18, + 4, 6, 8, 4, 6, 8, 6, 8, + 2, 2, 2, 2, 2, 2, 2, 4, 2 + }; + u32 mac_per = 6400, tmp1, tmp2, tmp3; + u32 fifo_width = 16; + u32 mac_width = 8; + u32 addition = 0; + + switch (speed) { + case SPEED_25000: + return 0; + case SPEED_10000: + mac_per = 6400; + mac_width = 8; + addition = 1; + break; + case SPEED_5000: + mac_per = 12800; + mac_width = 8; + addition = 0; + break; + case SPEED_2500: + mac_per = 3200; + mac_width = 1; + addition = 0; + break; + case SPEED_1000: + mac_per = 8000; + mac_width = 1; + addition = 0; + break; + case SPEED_100: + case SPEED_10: + return 1; + default: + break; + } + + tmp1 = 1000 * mac_width / fifo_width; + tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000) + * sys_clk / mac_per); + tmp3 = tmp1 * tmp2 / 1000; + return (tmp3 + 2000 + 999) / 1000 + addition; +} + +/* Configure port muxing: + * QSGMII: 4x2G5 devices + */ +static int sparx5_port_mux_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 portno = port->portno; + u32 inst; + + if (port->conf.portmode == conf->portmode) + return 0; /* Nothing to do */ + + switch (conf->portmode) { + case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */ + inst = (portno - portno % 4) / 4; + spx5_rmw(BIT(inst), + BIT(inst), + sparx5, + PORT_CONF_QSGMII_ENA); + + if ((portno / 4 % 2) == 0) { + /* Affects d0-d3,d8-d11..d40-d43 */ + spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) | + PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) | + PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1), + PORT_CONF_USGMII_CFG_BYPASS_SCRAM | + PORT_CONF_USGMII_CFG_BYPASS_DESCRAM | + PORT_CONF_USGMII_CFG_QUAD_MODE, + sparx5, + PORT_CONF_USGMII_CFG((portno / 8))); + } + break; + default: + break; + } + return 0; +} + +static int sparx5_port_max_tags_set(struct sparx5 *sparx5, + struct sparx5_port *port) +{ + enum sparx5_port_max_tags max_tags = port->max_vlan_tags; + int tag_ct = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 : + max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0; + bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO; + enum sparx5_vlan_port_type vlan_type = port->vlan_type; + bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE; + u32 dev = sparx5_to_high_dev(port->portno); + u32 tinst = sparx5_port_dev_index(port->portno); + void __iomem *inst = spx5_inst_get(sparx5, dev, tinst); + u32 etype; + + etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ? + port->custom_etype : + vlan_type == SPX5_VLAN_PORT_TYPE_C ? + SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S); + + spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) | + DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) | + DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) | + DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag), + sparx5, + DEV2G5_MAC_TAGS_CFG(port->portno)); + + if (sparx5_port_is_2g5(port->portno)) + return 0; + + spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) | + DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag), + DEV10G_MAC_TAGS_CFG_TAG_ID | + DEV10G_MAC_TAGS_CFG_TAG_ENA, + inst, + DEV10G_MAC_TAGS_CFG(0, 0)); + + spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct), + DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, + inst, + DEV10G_MAC_NUM_TAGS_CFG(0)); + + spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag), + DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, + inst, + DEV10G_MAC_MAXLEN_CFG(0)); + return 0; +} + +int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed) +{ + u32 clk_period_ps = 1600; /* 625Mhz for now */ + u32 urg = 672000; + + switch (speed) { + case SPEED_10: + case SPEED_100: + case SPEED_1000: + urg = 672000; + break; + case SPEED_2500: + urg = 270000; + break; + case SPEED_5000: + urg = 135000; + break; + case SPEED_10000: + urg = 67200; + break; + case SPEED_25000: + urg = 27000; + break; + } + return urg / clk_period_ps - 1; +} + +static u16 sparx5_wm_enc(u16 value) +{ + if (value >= 2048) + return 2048 + value / 16; + + return value; +} + +static int sparx5_port_fc_setup(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0; + u32 pause_stop = 0xFFF - 1; /* FC gen disabled */ + + if (conf->pause & MLO_PAUSE_TX) + pause_stop = sparx5_wm_enc(4 * (ETH_MAXLEN / + SPX5_BUFFER_CELL_SZ)); + + /* Set HDX flowcontrol */ + spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF), + DSM_MAC_CFG_HDX_BACKPREASSURE, + sparx5, + DSM_MAC_CFG(port->portno)); + + /* Obey flowcontrol */ + spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey), + DSM_RX_PAUSE_CFG_RX_PAUSE_EN, + sparx5, + DSM_RX_PAUSE_CFG(port->portno)); + + /* Disable forward pressure */ + spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey), + QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, + sparx5, + QSYS_FWD_PRESSURE(port->portno)); + + /* Generate pause frames */ + spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop), + QSYS_PAUSE_CFG_PAUSE_STOP, + sparx5, + QSYS_PAUSE_CFG(port->portno)); + + return 0; +} + +static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf) +{ + if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */ + return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL); + else + return 1; /* Enable SGMII Aneg */ +} + +int sparx5_serdes_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + int portmode, err, speed = conf->speed; + + if (conf->portmode == PHY_INTERFACE_MODE_QSGMII && + ((port->portno % 4) != 0)) { + return 0; + } + if (sparx5_is_baser(conf->portmode)) { + if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) + speed = SPEED_25000; + else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER) + speed = SPEED_10000; + else + speed = SPEED_5000; + } + + err = phy_set_media(port->serdes, conf->media); + if (err) + return err; + if (speed > 0) { + err = phy_set_speed(port->serdes, speed); + if (err) + return err; + } + if (conf->serdes_reset) { + err = phy_reset(port->serdes); + if (err) + return err; + } + + /* Configure SerDes with port parameters + * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G + */ + portmode = conf->portmode; + if (sparx5_is_baser(conf->portmode)) + portmode = PHY_INTERFACE_MODE_10GBASER; + err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode); + if (err) + return err; + conf->serdes_reset = false; + return err; +} + +static int sparx5_port_pcs_low_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + bool sgmii = false, inband_aneg = false; + int err; + + if (port->conf.inband) { + if (conf->portmode == PHY_INTERFACE_MODE_SGMII || + conf->portmode == PHY_INTERFACE_MODE_QSGMII) + inband_aneg = true; /* Cisco-SGMII in-band-aneg */ + else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX && + conf->autoneg) + inband_aneg = true; /* Clause-37 in-band-aneg */ + + err = sparx5_serdes_set(sparx5, port, conf); + if (err) + return -EINVAL; + } else { + sgmii = true; /* Phy is connected to the MAC */ + } + + /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */ + spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii), + DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, + sparx5, + DEV2G5_PCS1G_MODE_CFG(port->portno)); + + /* Enable PCS */ + spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1), + sparx5, + DEV2G5_PCS1G_CFG(port->portno)); + + if (inband_aneg) { + u16 abil = sparx5_get_aneg_word(conf); + + /* Enable in-band aneg */ + spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) | + DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) | + DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) | + DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1), + sparx5, + DEV2G5_PCS1G_ANEG_CFG(port->portno)); + } else { + spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno)); + } + + /* Take PCS out of reset */ + spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) | + DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) | + DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0), + DEV2G5_DEV_RST_CTRL_SPEED_SEL | + DEV2G5_DEV_RST_CTRL_PCS_TX_RST | + DEV2G5_DEV_RST_CTRL_PCS_RX_RST, + sparx5, + DEV2G5_DEV_RST_CTRL(port->portno)); + + return 0; +} + +static int sparx5_port_pcs_high_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0; + u32 pix = sparx5_port_dev_index(port->portno); + u32 dev = sparx5_to_high_dev(port->portno); + u32 pcs = sparx5_to_pcs_dev(port->portno); + void __iomem *devinst; + void __iomem *pcsinst; + int err; + + devinst = spx5_inst_get(sparx5, dev, pix); + pcsinst = spx5_inst_get(sparx5, pcs, pix); + + /* SFI : No in-band-aneg. Speeds 5G/10G/25G */ + err = sparx5_serdes_set(sparx5, port, conf); + if (err) + return -EINVAL; + if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) { + /* Enable PCS for 25G device, speed 25G */ + spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1), + DEV25G_PCS25G_CFG_PCS25G_ENA, + sparx5, + DEV25G_PCS25G_CFG(pix)); + } else { + /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */ + spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1), + PCS10G_BR_PCS_CFG_PCS_ENA, + pcsinst, + PCS10G_BR_PCS_CFG(0)); + } + + /* Enable 5G/10G/25G MAC module */ + spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) | + DEV10G_MAC_ENA_CFG_TX_ENA_SET(1), + devinst, + DEV10G_MAC_ENA_CFG(0)); + + /* Take the device out of reset */ + spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) | + DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) | + DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) | + DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) | + DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd), + DEV10G_DEV_RST_CTRL_PCS_RX_RST | + DEV10G_DEV_RST_CTRL_PCS_TX_RST | + DEV10G_DEV_RST_CTRL_MAC_RX_RST | + DEV10G_DEV_RST_CTRL_MAC_TX_RST | + DEV10G_DEV_RST_CTRL_SPEED_SEL, + devinst, + DEV10G_DEV_RST_CTRL(0)); + + return 0; +} + +/* Switch between 1G/2500 and 5G/10G/25G devices */ +static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd) +{ + int bt_indx = BIT(sparx5_port_dev_index(port)); + + if (sparx5_port_is_5g(port)) { + spx5_rmw(hsd ? 0 : bt_indx, + bt_indx, + sparx5, + PORT_CONF_DEV5G_MODES); + } else if (sparx5_port_is_10g(port)) { + spx5_rmw(hsd ? 0 : bt_indx, + bt_indx, + sparx5, + PORT_CONF_DEV10G_MODES); + } else if (sparx5_port_is_25g(port)) { + spx5_rmw(hsd ? 0 : bt_indx, + bt_indx, + sparx5, + PORT_CONF_DEV25G_MODES); + } +} + +/* Configure speed/duplex dependent registers */ +static int sparx5_port_config_low_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2; + bool fdx = conf->duplex == DUPLEX_FULL; + int spd = conf->speed; + + clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2; + gig_mode = spd == SPEED_1000 || spd == SPEED_2500; + tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5; + hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2; + hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1; + + /* GIG/FDX mode */ + spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) | + DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx), + DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA | + DEV2G5_MAC_MODE_CFG_FDX_ENA, + sparx5, + DEV2G5_MAC_MODE_CFG(port->portno)); + + /* Set MAC IFG Gaps */ + spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) | + DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) | + DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2), + sparx5, + DEV2G5_MAC_IFG_CFG(port->portno)); + + /* Disabling frame aging when in HDX (due to HDX issue) */ + spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0), + HSCH_PORT_MODE_AGE_DIS, + sparx5, + HSCH_PORT_MODE(port->portno)); + + /* Enable MAC module */ + spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA | + DEV2G5_MAC_ENA_CFG_TX_ENA, + sparx5, + DEV2G5_MAC_ENA_CFG(port->portno)); + + /* Select speed and take MAC out of reset */ + spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) | + DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) | + DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0), + DEV2G5_DEV_RST_CTRL_SPEED_SEL | + DEV2G5_DEV_RST_CTRL_MAC_TX_RST | + DEV2G5_DEV_RST_CTRL_MAC_RX_RST, + sparx5, + DEV2G5_DEV_RST_CTRL(port->portno)); + + return 0; +} + +int sparx5_port_pcs_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) + +{ + bool high_speed_dev = sparx5_is_baser(conf->portmode); + int err; + + if (sparx5_dev_change(sparx5, port, conf)) { + /* switch device */ + sparx5_dev_switch(sparx5, port->portno, high_speed_dev); + + /* Disable the not-in-use device */ + err = sparx5_port_disable(sparx5, port, !high_speed_dev); + if (err) + return err; + } + /* Disable the port before re-configuring */ + err = sparx5_port_disable(sparx5, port, high_speed_dev); + if (err) + return -EINVAL; + + if (high_speed_dev) + err = sparx5_port_pcs_high_set(sparx5, port, conf); + else + err = sparx5_port_pcs_low_set(sparx5, port, conf); + + if (err) + return -EINVAL; + + if (port->conf.inband) { + /* Enable/disable 1G counters in ASM */ + spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev), + ASM_PORT_CFG_CSC_STAT_DIS, + sparx5, + ASM_PORT_CFG(port->portno)); + + /* Enable/disable 1G counters in DSM */ + spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev), + DSM_BUF_CFG_CSC_STAT_DIS, + sparx5, + DSM_BUF_CFG(port->portno)); + } + + port->conf = *conf; + + return 0; +} + +int sparx5_port_config(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + bool high_speed_dev = sparx5_is_baser(conf->portmode); + int err, urgency, stop_wm; + + err = sparx5_port_verify_speed(sparx5, port, conf); + if (err) + return err; + + /* high speed device is already configured */ + if (!high_speed_dev) + sparx5_port_config_low_set(sparx5, port, conf); + + /* Configure flow control */ + err = sparx5_port_fc_setup(sparx5, port, conf); + if (err) + return err; + + /* Set the DSM stop watermark */ + stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed); + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(port->portno)); + + /* Enable port in queue system */ + urgency = sparx5_port_fwd_urg(sparx5, conf->speed); + spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency), + QFWD_SWITCH_PORT_MODE_PORT_ENA | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY, + sparx5, + QFWD_SWITCH_PORT_MODE(port->portno)); + + /* Save the new values */ + port->conf = *conf; + + return 0; +} + +/* Initialize port config to default */ +int sparx5_port_init(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ)); + u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ)); + u32 devhigh = sparx5_to_high_dev(port->portno); + u32 pix = sparx5_port_dev_index(port->portno); + u32 pcs = sparx5_to_pcs_dev(port->portno); + bool sd_pol = port->signd_active_high; + bool sd_sel = !port->signd_internal; + bool sd_ena = port->signd_enable; + u32 pause_stop = 0xFFF - 1; /* FC generate disabled */ + void __iomem *devinst; + void __iomem *pcsinst; + int err; + + devinst = spx5_inst_get(sparx5, devhigh, pix); + pcsinst = spx5_inst_get(sparx5, pcs, pix); + + /* Set the mux port mode */ + err = sparx5_port_mux_set(sparx5, port, conf); + if (err) + return err; + + /* Configure MAC vlan awareness */ + err = sparx5_port_max_tags_set(sparx5, port); + if (err) + return err; + + /* Set Max Length */ + spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN), + DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, + sparx5, + DEV2G5_MAC_MAXLEN_CFG(port->portno)); + + /* 1G/2G5: Signal Detect configuration */ + spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) | + DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) | + DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena), + sparx5, + DEV2G5_PCS1G_SD_CFG(port->portno)); + + /* Set Pause WM hysteresis */ + spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) | + QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) | + QSYS_PAUSE_CFG_PAUSE_ENA_SET(1), + QSYS_PAUSE_CFG_PAUSE_START | + QSYS_PAUSE_CFG_PAUSE_STOP | + QSYS_PAUSE_CFG_PAUSE_ENA, + sparx5, + QSYS_PAUSE_CFG(port->portno)); + + /* Port ATOP. Frames are tail dropped when this WM is hit */ + spx5_wr(QSYS_ATOP_ATOP_SET(atop), + sparx5, + QSYS_ATOP(port->portno)); + + /* Discard pause frame 01-80-C2-00-00-01 */ + spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno)); + + if (conf->portmode == PHY_INTERFACE_MODE_QSGMII || + conf->portmode == PHY_INTERFACE_MODE_SGMII) { + err = sparx5_serdes_set(sparx5, port, conf); + if (err) + return err; + + if (!sparx5_port_is_2g5(port->portno)) + /* Enable shadow device */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1), + DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(port->portno)); + + sparx5_dev_switch(sparx5, port->portno, false); + } + if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) { + // All ports must be PCS enabled in QSGMII mode + spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0), + DEV2G5_DEV_RST_CTRL_PCS_TX_RST, + sparx5, + DEV2G5_DEV_RST_CTRL(port->portno)); + } + /* Default IFGs for 1G */ + spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) | + DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) | + DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0), + sparx5, + DEV2G5_MAC_IFG_CFG(port->portno)); + + if (sparx5_port_is_2g5(port->portno)) + return 0; /* Low speed device only - return */ + + /* Now setup the high speed device */ + if (conf->portmode == PHY_INTERFACE_MODE_NA) + conf->portmode = PHY_INTERFACE_MODE_10GBASER; + + if (sparx5_is_baser(conf->portmode)) + sparx5_dev_switch(sparx5, port->portno, true); + + /* Set Max Length */ + spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN), + DEV10G_MAC_MAXLEN_CFG_MAX_LEN, + devinst, + DEV10G_MAC_ENA_CFG(0)); + + /* Handle Signal Detect in 10G PCS */ + spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) | + PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) | + PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena), + pcsinst, + PCS10G_BR_PCS_SD_CFG(0)); + + if (sparx5_port_is_25g(port->portno)) { + /* Handle Signal Detect in 25G PCS */ + spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) | + DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) | + DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena), + sparx5, + DEV25G_PCS25G_SD_CFG(pix)); + } + + return 0; +} + +void sparx5_port_enable(struct sparx5_port *port, bool enable) +{ + struct sparx5 *sparx5 = port->sparx5; + + /* Enable port for frame transfer? */ + spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable), + QFWD_SWITCH_PORT_MODE_PORT_ENA, + sparx5, + QFWD_SWITCH_PORT_MODE(port->portno)); +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h new file mode 100644 index 000000000..2f8043eac --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#ifndef __SPARX5_PORT_H__ +#define __SPARX5_PORT_H__ + +#include "sparx5_main.h" + +static inline bool sparx5_port_is_2g5(int portno) +{ + return portno >= 16 && portno <= 47; +} + +static inline bool sparx5_port_is_5g(int portno) +{ + return portno <= 11 || portno == 64; +} + +static inline bool sparx5_port_is_10g(int portno) +{ + return (portno >= 12 && portno <= 15) || (portno >= 48 && portno <= 55); +} + +static inline bool sparx5_port_is_25g(int portno) +{ + return portno >= 56 && portno <= 63; +} + +static inline u32 sparx5_to_high_dev(int port) +{ + if (sparx5_port_is_5g(port)) + return TARGET_DEV5G; + if (sparx5_port_is_10g(port)) + return TARGET_DEV10G; + return TARGET_DEV25G; +} + +static inline u32 sparx5_to_pcs_dev(int port) +{ + if (sparx5_port_is_5g(port)) + return TARGET_PCS5G_BR; + if (sparx5_port_is_10g(port)) + return TARGET_PCS10G_BR; + return TARGET_PCS25G_BR; +} + +static inline int sparx5_port_dev_index(int port) +{ + if (sparx5_port_is_2g5(port)) + return port; + if (sparx5_port_is_5g(port)) + return (port <= 11 ? port : 12); + if (sparx5_port_is_10g(port)) + return (port >= 12 && port <= 15) ? + port - 12 : port - 44; + return (port - 56); +} + +int sparx5_port_init(struct sparx5 *sparx5, + struct sparx5_port *spx5_port, + struct sparx5_port_config *conf); + +int sparx5_port_config(struct sparx5 *sparx5, + struct sparx5_port *spx5_port, + struct sparx5_port_config *conf); + +int sparx5_port_pcs_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf); + +int sparx5_serdes_set(struct sparx5 *sparx5, + struct sparx5_port *spx5_port, + struct sparx5_port_config *conf); + +struct sparx5_port_status { + bool link; + bool link_down; + int speed; + bool an_complete; + int duplex; + int pause; +}; + +int sparx5_get_port_status(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_status *status); + +void sparx5_port_enable(struct sparx5_port *port, bool enable); +int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed); + +#endif /* __SPARX5_PORT_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c new file mode 100644 index 000000000..69e76634f --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c @@ -0,0 +1,685 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ +#include <linux/ptp_classify.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +#define SPARX5_MAX_PTP_ID 512 + +#define TOD_ACC_PIN 0x4 + +enum { + PTP_PIN_ACTION_IDLE = 0, + PTP_PIN_ACTION_LOAD, + PTP_PIN_ACTION_SAVE, + PTP_PIN_ACTION_CLOCK, + PTP_PIN_ACTION_DELTA, + PTP_PIN_ACTION_TOD +}; + +static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5) +{ + /* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625) + * 1.99609375000(500), 3.99218750000(250) as reference + * The value is calculated as following: + * (1/1000000)/((2^-59)/X) + */ + + u64 res = 0; + + switch (sparx5->coreclock) { + case SPX5_CORE_CLOCK_250MHZ: + res = 2301339409586; + break; + case SPX5_CORE_CLOCK_500MHZ: + res = 1150669704793; + break; + case SPX5_CORE_CLOCK_625MHZ: + res = 920535763834; + break; + default: + WARN(1, "Invalid core clock"); + break; + } + + return res; +} + +static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5) +{ + u64 res = 0; + + switch (sparx5->coreclock) { + case SPX5_CORE_CLOCK_250MHZ: + res = 0x1FF0000000000000; + break; + case SPX5_CORE_CLOCK_500MHZ: + res = 0x0FF8000000000000; + break; + case SPX5_CORE_CLOCK_625MHZ: + res = 0x0CC6666666666666; + break; + default: + WARN(1, "Invalid core clock"); + break; + } + + return res; +} + +int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr) +{ + struct sparx5 *sparx5 = port->sparx5; + struct hwtstamp_config cfg; + struct sparx5_phc *phc; + + /* For now don't allow to run ptp on ports that are part of a bridge, + * because in case of transparent clock the HW will still forward the + * frames, so there would be duplicate frames + */ + + if (test_bit(port->portno, sparx5->bridge_mask)) + return -EINVAL; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + switch (cfg.tx_type) { + case HWTSTAMP_TX_ON: + port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP; + break; + case HWTSTAMP_TX_OFF: + port->ptp_cmd = IFH_REW_OP_NOOP; + break; + default: + return -ERANGE; + } + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + cfg.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + /* Commit back the result & save it */ + mutex_lock(&sparx5->ptp_lock); + phc = &sparx5->phc[SPARX5_PHC_PORT]; + memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg)); + mutex_unlock(&sparx5->ptp_lock); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr) +{ + struct sparx5 *sparx5 = port->sparx5; + struct sparx5_phc *phc; + + phc = &sparx5->phc[SPARX5_PHC_PORT]; + return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config, + sizeof(phc->hwtstamp_config)) ? -EFAULT : 0; +} + +static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb, + u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset) +{ + struct ptp_header *header; + u8 msgtype; + int type; + + if (port->ptp_cmd == IFH_REW_OP_NOOP) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + *pdu_w16_offset = 0; + return; + } + + type = ptp_classify_raw(skb); + if (type == PTP_CLASS_NONE) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + *pdu_w16_offset = 0; + return; + } + + header = ptp_parse_header(skb, type); + if (!header) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + *pdu_w16_offset = 0; + return; + } + + *pdu_w16_offset = 7; + if (type & PTP_CLASS_L2) + *pdu_type = IFH_PDU_TYPE_PTP; + if (type & PTP_CLASS_IPV4) + *pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP; + if (type & PTP_CLASS_IPV6) + *pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP; + + if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { + *rew_op = IFH_REW_OP_TWO_STEP_PTP; + return; + } + + /* If it is sync and run 1 step then set the correct operation, + * otherwise run as 2 step + */ + msgtype = ptp_get_msgtype(header, type); + if ((msgtype & 0xf) == 0) { + *rew_op = IFH_REW_OP_ONE_STEP_PTP; + return; + } + + *rew_op = IFH_REW_OP_TWO_STEP_PTP; +} + +static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port) +{ + struct sk_buff *skb, *skb_tmp; + unsigned long flags; + + spin_lock_irqsave(&port->tx_skbs.lock, flags); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT, + jiffies) + break; + + __skb_unlink(skb, &port->tx_skbs); + dev_kfree_skb_any(skb); + } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); +} + +int sparx5_ptp_txtstamp_request(struct sparx5_port *port, + struct sk_buff *skb) +{ + struct sparx5 *sparx5 = port->sparx5; + u8 rew_op, pdu_type, pdu_w16_offset; + unsigned long flags; + + sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset); + SPARX5_SKB_CB(skb)->rew_op = rew_op; + SPARX5_SKB_CB(skb)->pdu_type = pdu_type; + SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset; + + if (rew_op != IFH_REW_OP_TWO_STEP_PTP) + return 0; + + sparx5_ptp_txtstamp_old_release(port); + + spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags); + if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) { + spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); + return -EBUSY; + } + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + skb_queue_tail(&port->tx_skbs, skb); + SPARX5_SKB_CB(skb)->ts_id = port->ts_id; + SPARX5_SKB_CB(skb)->jiffies = jiffies; + + sparx5->ptp_skbs++; + port->ts_id++; + if (port->ts_id == SPARX5_MAX_PTP_ID) + port->ts_id = 0; + + spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); + + return 0; +} + +void sparx5_ptp_txtstamp_release(struct sparx5_port *port, + struct sk_buff *skb) +{ + struct sparx5 *sparx5 = port->sparx5; + unsigned long flags; + + spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags); + port->ts_id--; + sparx5->ptp_skbs--; + skb_unlink(skb, &port->tx_skbs); + spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); +} + +static void sparx5_get_hwtimestamp(struct sparx5 *sparx5, + struct timespec64 *ts, + u32 nsec) +{ + /* Read current PTP time to get seconds */ + unsigned long flags; + u32 curr_nsec; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + + ts->tv_nsec = nsec; + + /* Sec has incremented since the ts was registered */ + if (curr_nsec < nsec) + ts->tv_sec--; + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); +} + +irqreturn_t sparx5_ptp_irq_handler(int irq, void *args) +{ + int budget = SPARX5_MAX_PTP_ID; + struct sparx5 *sparx5 = args; + + while (budget--) { + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; + struct skb_shared_hwtstamps shhwtstamps; + struct sparx5_port *port; + struct timespec64 ts; + unsigned long flags; + u32 val, id, txport; + u32 delay; + + val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL); + + /* Check if a timestamp can be retrieved */ + if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD)) + break; + + WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL); + + if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX)) + continue; + + /* Retrieve the ts Tx port */ + txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val); + + /* Retrieve its associated skb */ + port = sparx5->ports[txport]; + + /* Retrieve the delay */ + delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP); + delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay); + + /* Get next timestamp from fifo, which needs to be the + * rx timestamp which represents the id of the frame + */ + spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1), + REW_PTP_TWOSTEP_CTRL_PTP_NXT, + sparx5, REW_PTP_TWOSTEP_CTRL); + + val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL); + + /* Check if a timestamp can be retried */ + if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD)) + break; + + /* Read RX timestamping to get the ID */ + id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP); + id <<= 8; + id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS); + + spin_lock_irqsave(&port->tx_skbs.lock, flags); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if (SPARX5_SKB_CB(skb)->ts_id != id) + continue; + + __skb_unlink(skb, &port->tx_skbs); + skb_match = skb; + break; + } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); + + /* Next ts */ + spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1), + REW_PTP_TWOSTEP_CTRL_PTP_NXT, + sparx5, REW_PTP_TWOSTEP_CTRL); + + if (WARN_ON(!skb_match)) + continue; + + spin_lock(&sparx5->ptp_ts_id_lock); + sparx5->ptp_skbs--; + spin_unlock(&sparx5->ptp_ts_id_lock); + + /* Get the h/w timestamp */ + sparx5_get_hwtimestamp(sparx5, &ts, delay); + + /* Set the timestamp into the skb */ + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb_match, &shhwtstamps); + + dev_kfree_skb_any(skb_match); + } + + return IRQ_HANDLED; +} + +static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + unsigned long flags; + bool neg_adj = 0; + u64 tod_inc; + u64 ref; + + if (!scaled_ppm) + return 0; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + + tod_inc = sparx5_ptp_get_nominal_value(sparx5); + + /* The multiplication is split in 2 separate additions because of + * overflow issues. If scaled_ppm with 16bit fractional part was bigger + * than 20ppm then we got overflow. + */ + ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16); + ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16; + tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, + sparx5, PTP_PTP_DOM_CFG); + + spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5, + PTP_CLK_PER_CFG(phc->index, 0)); + spx5_wr((u32)(tod_inc >> 32), sparx5, + PTP_CLK_PER_CFG(phc->index, 1)); + + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5, + PTP_PTP_DOM_CFG); + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + + return 0; +} + +static int sparx5_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + unsigned long flags; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + /* Must be in IDLE mode before the time can be loaded */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + /* Set new value */ + spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)), + sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + spx5_wr(lower_32_bits(ts->tv_sec), + sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + + /* Apply new values */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + + return 0; +} + +static int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + unsigned long flags; + time64_t s; + s64 ns; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + s <<= 32; + s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC; + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + + /* Deal with negative values */ + if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) { + s--; + ns &= 0xf; + ns += 999999984; + } + + set_normalized_timespec64(ts, s, ns); + return 0; +} + +static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + + if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) { + unsigned long flags; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + /* Must be in IDLE mode before the time can be loaded */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta), + sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + + /* Adjust time with the value of PTP_TOD_NSEC */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + } else { + /* Fall back using sparx5_ptp_settime64 which is not exact */ + struct timespec64 ts; + u64 now; + + sparx5_ptp_gettime64(ptp, &ts); + + now = ktime_to_ns(timespec64_to_ktime(ts)); + ts = ns_to_timespec64(now + delta); + + sparx5_ptp_settime64(ptp, &ts); + } + + return 0; +} + +static struct ptp_clock_info sparx5_ptp_clock_info = { + .owner = THIS_MODULE, + .name = "sparx5 ptp", + .max_adj = 200000, + .gettime64 = sparx5_ptp_gettime64, + .settime64 = sparx5_ptp_settime64, + .adjtime = sparx5_ptp_adjtime, + .adjfine = sparx5_ptp_adjfine, +}; + +static int sparx5_ptp_phc_init(struct sparx5 *sparx5, + int index, + struct ptp_clock_info *clock_info) +{ + struct sparx5_phc *phc = &sparx5->phc[index]; + + phc->info = *clock_info; + phc->clock = ptp_clock_register(&phc->info, sparx5->dev); + if (IS_ERR(phc->clock)) + return PTR_ERR(phc->clock); + + phc->index = index; + phc->sparx5 = sparx5; + + /* PTP Rx stamping is always enabled. */ + phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + + return 0; +} + +int sparx5_ptp_init(struct sparx5 *sparx5) +{ + u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5); + struct sparx5_port *port; + int err, i; + + if (!sparx5->ptp) + return 0; + + for (i = 0; i < SPARX5_PHC_COUNT; ++i) { + err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info); + if (err) + return err; + } + + spin_lock_init(&sparx5->ptp_clock_lock); + spin_lock_init(&sparx5->ptp_ts_id_lock); + mutex_init(&sparx5->ptp_lock); + + /* Disable master counters */ + spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG); + + /* Configure the nominal TOD increment per clock cycle */ + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, + sparx5, PTP_PTP_DOM_CFG); + + for (i = 0; i < SPARX5_PHC_COUNT; ++i) { + spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5, + PTP_CLK_PER_CFG(i, 0)); + spx5_wr((u32)(tod_adj >> 32), sparx5, + PTP_CLK_PER_CFG(i, 1)); + } + + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, + sparx5, PTP_PTP_DOM_CFG); + + /* Enable master counters */ + spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG); + + for (i = 0; i < SPX5_PORTS; i++) { + port = sparx5->ports[i]; + if (!port) + continue; + + skb_queue_head_init(&port->tx_skbs); + } + + return 0; +} + +void sparx5_ptp_deinit(struct sparx5 *sparx5) +{ + struct sparx5_port *port; + int i; + + for (i = 0; i < SPX5_PORTS; i++) { + port = sparx5->ports[i]; + if (!port) + continue; + + skb_queue_purge(&port->tx_skbs); + } + + for (i = 0; i < SPARX5_PHC_COUNT; ++i) + ptp_clock_unregister(sparx5->phc[i].clock); +} + +void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb, + u64 timestamp) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct sparx5_phc *phc; + struct timespec64 ts; + u64 full_ts_in_ns; + + if (!sparx5->ptp) + return; + + phc = &sparx5->phc[SPARX5_PHC_PORT]; + sparx5_ptp_gettime64(&phc->info, &ts); + + if (ts.tv_nsec < timestamp) + ts.tv_sec--; + ts.tv_nsec = timestamp; + full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = full_ts_in_ns; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c new file mode 100644 index 000000000..1e79d0ef0 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c @@ -0,0 +1,513 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + */ + +#include <net/pkt_cls.h> + +#include "sparx5_main.h" +#include "sparx5_qos.h" + +/* Max rates for leak groups */ +static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = { + 1048568, /* 1.049 Gbps */ + 2621420, /* 2.621 Gbps */ + 10485680, /* 10.486 Gbps */ + 26214200 /* 26.214 Gbps */ +}; + +static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT]; + +static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group) +{ + u32 value; + + value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group)); + return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value); +} + +static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group, + u32 leak_time) +{ + spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5, + HSCH_HSCH_TIMER_CFG(layer, group)); +} + +static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group) +{ + u32 value; + + value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group)); + return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value); +} + +static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group, + u32 idx) + +{ + u32 value; + + value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx)); + return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value); +} + +static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group) +{ + u32 itr, next; + + itr = sparx5_lg_get_first(sparx5, layer, group); + + for (;;) { + next = sparx5_lg_get_next(sparx5, layer, group, itr); + if (itr == next) + return itr; + + itr = next; + } +} + +static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group, + u32 idx) +{ + return idx == sparx5_lg_get_next(sparx5, layer, group, idx); +} + +static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group, + u32 idx) +{ + return idx == sparx5_lg_get_first(sparx5, layer, group); +} + +static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group) +{ + return sparx5_lg_get_leak_time(sparx5, layer, group) == 0; +} + +static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group) +{ + if (sparx5_lg_is_empty(sparx5, layer, group)) + return false; + + return sparx5_lg_get_first(sparx5, layer, group) == + sparx5_lg_get_last(sparx5, layer, group); +} + +static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group, + u32 leak_time) +{ + sparx5_lg_set_leak_time(sparx5, layer, group, leak_time); +} + +static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group) +{ + sparx5_lg_set_leak_time(sparx5, layer, group, 0); +} + +static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer, + u32 idx, u32 *group) +{ + u32 itr, next; + int i; + + for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) { + if (sparx5_lg_is_empty(sparx5, layer, i)) + continue; + + itr = sparx5_lg_get_first(sparx5, layer, i); + + for (;;) { + next = sparx5_lg_get_next(sparx5, layer, i, itr); + + if (itr == idx) { + *group = i; + return 0; /* Found it */ + } + if (itr == next) + break; /* Was not found */ + + itr = next; + } + } + + return -1; +} + +static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group) +{ + struct sparx5_layer *l = &layers[layer]; + struct sparx5_lg *lg; + u32 i; + + for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) { + lg = &l->leak_groups[i]; + if (rate <= lg->max_rate) { + *group = i; + return 0; + } + } + + return -1; +} + +static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group, + u32 idx, u32 *prev, u32 *next, u32 *first) +{ + u32 itr; + + *first = sparx5_lg_get_first(sparx5, layer, group); + *prev = *first; + *next = *first; + itr = *first; + + for (;;) { + *next = sparx5_lg_get_next(sparx5, layer, group, itr); + + if (itr == idx) + return 0; /* Found it */ + + if (itr == *next) + return -1; /* Was not found */ + + *prev = itr; + itr = *next; + } + + return -1; +} + +static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group, + u32 se_first, u32 idx, u32 idx_next, bool empty) +{ + u32 leak_time = layers[layer].leak_groups[group].leak_time; + + /* Stop leaking */ + sparx5_lg_disable(sparx5, layer, group); + + if (empty) + return 0; + + /* Select layer */ + spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer), + HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG); + + /* Link elements */ + spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5, + HSCH_SE_CONNECT(idx)); + + /* Set the first element. */ + spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first), + HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5, + HSCH_HSCH_LEAK_CFG(layer, group)); + + /* Start leaking */ + sparx5_lg_enable(sparx5, layer, group, leak_time); + + return 0; +} + +static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx) +{ + u32 first, next, prev; + bool empty = false; + + /* idx *must* be present in the leak group */ + WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next, + &first) < 0); + + if (sparx5_lg_is_singular(sparx5, layer, group)) { + empty = true; + } else if (sparx5_lg_is_last(sparx5, layer, group, idx)) { + /* idx is removed, prev is now last */ + idx = prev; + next = prev; + } else if (sparx5_lg_is_first(sparx5, layer, group, idx)) { + /* idx is removed and points to itself, first is next */ + first = next; + next = idx; + } else { + /* Next is not touched */ + idx = prev; + } + + return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next, + empty); +} + +static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group, + u32 idx) +{ + u32 first, next, old_group; + + pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group, + idx); + + /* Is this SE already shaping ? */ + if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) { + if (old_group != new_group) { + /* Delete from old group */ + sparx5_lg_del(sparx5, layer, old_group, idx); + } else { + /* Nothing to do here */ + return 0; + } + } + + /* We always add to head of the list */ + first = idx; + + if (sparx5_lg_is_empty(sparx5, layer, new_group)) + next = idx; + else + next = sparx5_lg_get_first(sparx5, layer, new_group); + + return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next, + false); +} + +static int sparx5_shaper_conf_set(struct sparx5_port *port, + const struct sparx5_shaper *sh, u32 layer, + u32 idx, u32 group) +{ + int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32); + struct sparx5 *sparx5 = port->sparx5; + + if (!sh->rate && !sh->burst) + sparx5_lg_action = &sparx5_lg_del; + else + sparx5_lg_action = &sparx5_lg_add; + + /* Select layer */ + spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer), + HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG); + + /* Set frame mode */ + spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE, + sparx5, HSCH_SE_CFG(idx)); + + /* Set committed rate and burst */ + spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) | + HSCH_CIR_CFG_CIR_BURST_SET(sh->burst), + sparx5, HSCH_CIR_CFG(idx)); + + /* This has to be done after the shaper configuration has been set */ + sparx5_lg_action(sparx5, layer, group, idx); + + return 0; +} + +static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight) +{ + return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) - + 1; +} + +static int sparx5_dwrr_conf_set(struct sparx5_port *port, + struct sparx5_dwrr *dwrr) +{ + int i; + + spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) | + HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno), + HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX, + port->sparx5, HSCH_HSCH_CFG_CFG); + + /* Number of *lower* indexes that are arbitrated dwrr */ + spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count), + HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5, + HSCH_SE_CFG(port->portno)); + + for (i = 0; i < dwrr->count; i++) { + spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]), + HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5, + HSCH_DWRR_ENTRY(i)); + } + + return 0; +} + +static int sparx5_leak_groups_init(struct sparx5 *sparx5) +{ + struct sparx5_layer *layer; + u32 sys_clk_per_100ps; + struct sparx5_lg *lg; + u32 leak_time_us; + int i, ii; + + sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER); + + for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) { + layer = &layers[i]; + for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) { + lg = &layer->leak_groups[ii]; + lg->max_rate = spx5_hsch_max_group_rate[ii]; + + /* Calculate the leak time in us, to serve a maximum + * rate of 'max_rate' for this group + */ + leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate; + + /* Hardware wants leak time in ns */ + lg->leak_time = 1000 * leak_time_us; + + /* Calculate resolution */ + lg->resolution = 1000 / leak_time_us; + + /* Maximum number of shapers that can be served by + * this leak group + */ + lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps; + + /* Example: + * Wanted bandwidth is 100Mbit: + * + * 100 mbps can be served by leak group zero. + * + * leak_time is 125000 ns. + * resolution is: 8 + * + * cir = 100000 / 8 = 12500 + * leaks_pr_sec = 125000 / 10^9 = 8000 + * bw = 12500 * 8000 = 10^8 (100 Mbit) + */ + + /* Disable by default - this also indicates an empty + * leak group + */ + sparx5_lg_disable(sparx5, i, ii); + } + } + + return 0; +} + +int sparx5_qos_init(struct sparx5 *sparx5) +{ + int ret; + + ret = sparx5_leak_groups_init(sparx5); + if (ret < 0) + return ret; + + return 0; +} + +int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc) +{ + int i; + + if (num_tc != SPX5_PRIOS) { + netdev_err(ndev, "Only %d traffic classes supported\n", + SPX5_PRIOS); + return -EINVAL; + } + + netdev_set_num_tc(ndev, num_tc); + + for (i = 0; i < num_tc; i++) + netdev_set_tc_queue(ndev, i, 1, i); + + netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n", + ndev->num_tc, ndev->real_num_tx_queues); + + return 0; +} + +int sparx5_tc_mqprio_del(struct net_device *ndev) +{ + netdev_reset_tc(ndev); + + netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n", + ndev->num_tc, ndev->real_num_tx_queues); + + return 0; +} + +int sparx5_tc_tbf_add(struct sparx5_port *port, + struct tc_tbf_qopt_offload_replace_params *params, + u32 layer, u32 idx) +{ + struct sparx5_shaper sh = { + .mode = SPX5_SE_MODE_DATARATE, + .rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8, + .burst = params->max_size, + }; + struct sparx5_lg *lg; + u32 group; + + /* Find suitable group for this se */ + if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) { + pr_debug("Could not find leak group for se with rate: %d", + sh.rate); + return -EINVAL; + } + + lg = &layers[layer].leak_groups[group]; + + pr_debug("Found matching group (speed: %d)\n", lg->max_rate); + + if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN) + return -EINVAL; + + /* Calculate committed rate and burst */ + sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution); + sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT); + + if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX) + return -EINVAL; + + return sparx5_shaper_conf_set(port, &sh, layer, idx, group); +} + +int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx) +{ + struct sparx5_shaper sh = {0}; + u32 group; + + sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group); + + return sparx5_shaper_conf_set(port, &sh, layer, idx, group); +} + +int sparx5_tc_ets_add(struct sparx5_port *port, + struct tc_ets_qopt_offload_replace_params *params) +{ + struct sparx5_dwrr dwrr = {0}; + /* Minimum weight for each iteration */ + unsigned int w_min = 100; + int i; + + /* Find minimum weight for all dwrr bands */ + for (i = 0; i < SPX5_PRIOS; i++) { + if (params->quanta[i] == 0) + continue; + w_min = min(w_min, params->weights[i]); + } + + for (i = 0; i < SPX5_PRIOS; i++) { + /* Strict band; skip */ + if (params->quanta[i] == 0) + continue; + + dwrr.count++; + + /* On the sparx5, bands with higher indexes are preferred and + * arbitrated strict. Strict bands are put in the lower indexes, + * by tc, so we reverse the bands here. + * + * Also convert the weight to something the hardware + * understands. + */ + dwrr.cost[SPX5_PRIOS - i - 1] = + sparx5_weight_to_hw_cost(w_min, params->weights[i]); + } + + return sparx5_dwrr_conf_set(port, &dwrr); +} + +int sparx5_tc_ets_del(struct sparx5_port *port) +{ + struct sparx5_dwrr dwrr = {0}; + + return sparx5_dwrr_conf_set(port, &dwrr); +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h new file mode 100644 index 000000000..ced35033a --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + */ + +#ifndef __SPARX5_QOS_H__ +#define __SPARX5_QOS_H__ + +#include <linux/netdevice.h> + +/* Number of Layers */ +#define SPX5_HSCH_LAYER_CNT 3 + +/* Scheduling elements per layer */ +#define SPX5_HSCH_L0_SE_CNT 5040 +#define SPX5_HSCH_L1_SE_CNT 64 +#define SPX5_HSCH_L2_SE_CNT 64 + +/* Calculate Layer 0 Scheduler Element when using normal hierarchy */ +#define SPX5_HSCH_L0_GET_IDX(port, queue) ((64 * (port)) + (8 * (queue))) + +/* Number of leak groups */ +#define SPX5_HSCH_LEAK_GRP_CNT 4 + +/* Scheduler modes */ +#define SPX5_SE_MODE_LINERATE 0 +#define SPX5_SE_MODE_DATARATE 1 + +/* Rate and burst */ +#define SPX5_SE_RATE_MAX 262143 +#define SPX5_SE_BURST_MAX 127 +#define SPX5_SE_RATE_MIN 1 +#define SPX5_SE_BURST_MIN 1 +#define SPX5_SE_BURST_UNIT 4096 + +/* Dwrr */ +#define SPX5_DWRR_COST_MAX 63 + +struct sparx5_shaper { + u32 mode; + u32 rate; + u32 burst; +}; + +struct sparx5_lg { + u32 max_rate; + u32 resolution; + u32 leak_time; + u32 max_ses; +}; + +struct sparx5_layer { + struct sparx5_lg leak_groups[SPX5_HSCH_LEAK_GRP_CNT]; +}; + +struct sparx5_dwrr { + u32 count; /* Number of inputs running dwrr */ + u8 cost[SPX5_PRIOS]; +}; + +int sparx5_qos_init(struct sparx5 *sparx5); + +/* Multi-Queue Priority */ +int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc); +int sparx5_tc_mqprio_del(struct net_device *ndev); + +/* Token Bucket Filter */ +struct tc_tbf_qopt_offload_replace_params; +int sparx5_tc_tbf_add(struct sparx5_port *port, + struct tc_tbf_qopt_offload_replace_params *params, + u32 layer, u32 idx); +int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx); + +/* Enhanced Transmission Selection */ +struct tc_ets_qopt_offload_replace_params; +int sparx5_tc_ets_add(struct sparx5_port *port, + struct tc_ets_qopt_offload_replace_params *params); + +int sparx5_tc_ets_del(struct sparx5_port *port); + +#endif /* __SPARX5_QOS_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c new file mode 100644 index 000000000..4af85d108 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -0,0 +1,763 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/if_bridge.h> +#include <net/switchdev.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +static struct workqueue_struct *sparx5_owq; + +struct sparx5_switchdev_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + struct sparx5 *sparx5; + unsigned long event; +}; + +static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag) +{ + bool should_flood = flood_flag || port->is_mrouter; + int pgid; + + for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++) + sparx5_pgid_update_mask(port, pgid, should_flood); +} + +static void sparx5_port_attr_bridge_flags(struct sparx5_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & BR_MCAST_FLOOD) { + sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD)); + sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD)); + } + + if (flags.mask & BR_FLOOD) + sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD)); + if (flags.mask & BR_BCAST_FLOOD) + sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD)); +} + +static void sparx5_attr_stp_state_set(struct sparx5_port *port, + u8 state) +{ + struct sparx5 *sparx5 = port->sparx5; + + if (!test_bit(port->portno, sparx5->bridge_mask)) { + netdev_err(port->ndev, + "Controlling non-bridged port %d?\n", port->portno); + return; + } + + switch (state) { + case BR_STATE_FORWARDING: + set_bit(port->portno, sparx5->bridge_fwd_mask); + fallthrough; + case BR_STATE_LEARNING: + set_bit(port->portno, sparx5->bridge_lrn_mask); + break; + + default: + /* All other states treated as blocking */ + clear_bit(port->portno, sparx5->bridge_fwd_mask); + clear_bit(port->portno, sparx5->bridge_lrn_mask); + break; + } + + /* apply the bridge_fwd_mask to all the ports */ + sparx5_update_fwd(sparx5); +} + +static void sparx5_port_attr_ageing_set(struct sparx5_port *port, + unsigned long ageing_clock_t) +{ + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time = jiffies_to_msecs(ageing_jiffies); + + sparx5_set_ageing(port->sparx5, ageing_time); +} + +static void sparx5_port_attr_mrouter_set(struct sparx5_port *port, + struct net_device *orig_dev, + bool enable) +{ + struct sparx5 *sparx5 = port->sparx5; + struct sparx5_mdb_entry *e; + bool flood_flag; + + if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter)) + return; + + /* Add/del mrouter port on all active mdb entries in HW. + * Don't change entry port mask, since that represents + * ports that actually joined that group. + */ + mutex_lock(&sparx5->mdb_lock); + list_for_each_entry(e, &sparx5->mdb_entries, list) { + if (!test_bit(port->portno, e->port_mask) && + ether_addr_is_ip_mcast(e->addr)) + sparx5_pgid_update_mask(port, e->pgid_idx, enable); + } + mutex_unlock(&sparx5->mdb_lock); + + /* Enable/disable flooding depending on if port is mrouter port + * or if mcast flood is enabled. + */ + port->is_mrouter = enable; + flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD); + sparx5_port_update_mcast_ip_flood(port, flood_flag); +} + +static int sparx5_port_attr_set(struct net_device *dev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct sparx5_port *port = netdev_priv(dev); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + return sparx5_port_attr_pre_bridge_flags(port, + attr->u.brport_flags); + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + sparx5_port_attr_bridge_flags(port, attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + sparx5_attr_stp_state_set(port, attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + sparx5_port_attr_ageing_set(port, attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + /* Used PVID 1 when default_pvid is 0, to avoid + * collision with non-bridged ports. + */ + if (port->pvid == 0) + port->pvid = 1; + port->vlan_aware = attr->u.vlan_filtering; + sparx5_vlan_port_apply(port->sparx5, port); + break; + case SWITCHDEV_ATTR_ID_PORT_MROUTER: + sparx5_port_attr_mrouter_set(port, + attr->orig_dev, + attr->u.mrouter); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int sparx5_port_bridge_join(struct sparx5_port *port, + struct net_device *bridge, + struct netlink_ext_ack *extack) +{ + struct sparx5 *sparx5 = port->sparx5; + struct net_device *ndev = port->ndev; + int err; + + if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS)) + /* First bridged port */ + sparx5->hw_bridge_dev = bridge; + else + if (sparx5->hw_bridge_dev != bridge) + /* This is adding the port to a second bridge, this is + * unsupported + */ + return -ENODEV; + + set_bit(port->portno, sparx5->bridge_mask); + + err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, + false, extack); + if (err) + goto err_switchdev_offload; + + /* Remove standalone port entry */ + sparx5_mact_forget(sparx5, ndev->dev_addr, 0); + + /* Port enters in bridge mode therefor don't need to copy to CPU + * frames for multicast in case the bridge is not requesting them + */ + __dev_mc_unsync(ndev, sparx5_mc_unsync); + + return 0; + +err_switchdev_offload: + clear_bit(port->portno, sparx5->bridge_mask); + return err; +} + +static void sparx5_port_bridge_leave(struct sparx5_port *port, + struct net_device *bridge) +{ + struct sparx5 *sparx5 = port->sparx5; + + switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL); + + clear_bit(port->portno, sparx5->bridge_mask); + if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS)) + sparx5->hw_bridge_dev = NULL; + + /* Clear bridge vlan settings before updating the port settings */ + port->vlan_aware = 0; + port->pvid = NULL_VID; + port->vid = NULL_VID; + + /* Forward frames to CPU */ + sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0); + + /* Port enters in host more therefore restore mc list */ + __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync); +} + +static int sparx5_port_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct sparx5_port *port = netdev_priv(dev); + struct netlink_ext_ack *extack; + int err = 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + err = sparx5_port_bridge_join(port, info->upper_dev, + extack); + else + sparx5_port_bridge_leave(port, info->upper_dev); + + sparx5_vlan_port_apply(port->sparx5, port); + } + + return err; +} + +static int sparx5_port_add_addr(struct net_device *dev, bool up) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + u16 vid = port->pvid; + + if (up) + sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid); + else + sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid); + + return 0; +} + +static int sparx5_netdevice_port_event(struct net_device *dev, + struct notifier_block *nb, + unsigned long event, void *ptr) +{ + int err = 0; + + if (!sparx5_netdevice_check(dev)) + return 0; + + switch (event) { + case NETDEV_CHANGEUPPER: + err = sparx5_port_changeupper(dev, ptr); + break; + case NETDEV_PRE_UP: + err = sparx5_port_add_addr(dev, true); + break; + case NETDEV_DOWN: + err = sparx5_port_add_addr(dev, false); + break; + } + + return err; +} + +static int sparx5_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int ret = 0; + + ret = sparx5_netdevice_port_event(dev, nb, event, ptr); + + return notifier_from_errno(ret); +} + +static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work) +{ + struct sparx5_switchdev_event_work *switchdev_work = + container_of(work, struct sparx5_switchdev_event_work, work); + struct net_device *dev = switchdev_work->dev; + struct switchdev_notifier_fdb_info *fdb_info; + struct sparx5_port *port; + struct sparx5 *sparx5; + bool host_addr; + u16 vid; + + rtnl_lock(); + if (!sparx5_netdevice_check(dev)) { + host_addr = true; + sparx5 = switchdev_work->sparx5; + } else { + host_addr = false; + sparx5 = switchdev_work->sparx5; + port = netdev_priv(dev); + } + + fdb_info = &switchdev_work->fdb_info; + + /* Used PVID 1 when default_pvid is 0, to avoid + * collision with non-bridged ports. + */ + if (fdb_info->vid == 0) + vid = 1; + else + vid = fdb_info->vid; + + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + if (host_addr) + sparx5_add_mact_entry(sparx5, dev, PGID_CPU, + fdb_info->addr, vid); + else + sparx5_add_mact_entry(sparx5, port->ndev, port->portno, + fdb_info->addr, vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + sparx5_del_mact_entry(sparx5, fdb_info->addr, vid); + break; + } + + rtnl_unlock(); + kfree(switchdev_work->fdb_info.addr); + kfree(switchdev_work); + dev_put(dev); +} + +static void sparx5_schedule_work(struct work_struct *work) +{ + queue_work(sparx5_owq, work); +} + +static int sparx5_switchdev_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct sparx5_switchdev_event_work *switchdev_work; + struct switchdev_notifier_fdb_info *fdb_info; + struct switchdev_notifier_info *info = ptr; + struct sparx5 *spx5; + int err; + + spx5 = container_of(nb, struct sparx5, switchdev_nb); + + switch (event) { + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + sparx5_netdevice_check, + sparx5_port_attr_set); + return notifier_from_errno(err); + case SWITCHDEV_FDB_ADD_TO_DEVICE: + fallthrough; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (!switchdev_work) + return NOTIFY_BAD; + + switchdev_work->dev = dev; + switchdev_work->event = event; + switchdev_work->sparx5 = spx5; + + fdb_info = container_of(info, + struct switchdev_notifier_fdb_info, + info); + INIT_WORK(&switchdev_work->work, + sparx5_switchdev_bridge_fdb_event_work); + memcpy(&switchdev_work->fdb_info, ptr, + sizeof(switchdev_work->fdb_info)); + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!switchdev_work->fdb_info.addr) + goto err_addr_alloc; + + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, + fdb_info->addr); + dev_hold(dev); + + sparx5_schedule_work(&switchdev_work->work); + break; + } + + return NOTIFY_DONE; +err_addr_alloc: + kfree(switchdev_work); + return NOTIFY_BAD; +} + +static int sparx5_handle_port_vlan_add(struct net_device *dev, + struct notifier_block *nb, + const struct switchdev_obj_port_vlan *v) +{ + struct sparx5_port *port = netdev_priv(dev); + + if (netif_is_bridge_master(dev)) { + struct sparx5 *sparx5 = + container_of(nb, struct sparx5, + switchdev_blocking_nb); + + /* Flood broadcast to CPU */ + sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast, + v->vid); + return 0; + } + + if (!sparx5_netdevice_check(dev)) + return -EOPNOTSUPP; + + return sparx5_vlan_vid_add(port, v->vid, + v->flags & BRIDGE_VLAN_INFO_PVID, + v->flags & BRIDGE_VLAN_INFO_UNTAGGED); +} + +static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5, + const unsigned char *addr, + u16 vid, + struct sparx5_mdb_entry **entry_out) +{ + struct sparx5_mdb_entry *entry; + u16 pgid_idx; + int err; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx); + if (err) { + kfree(entry); + return err; + } + + memcpy(entry->addr, addr, ETH_ALEN); + entry->vid = vid; + entry->pgid_idx = pgid_idx; + + mutex_lock(&sparx5->mdb_lock); + list_add_tail(&entry->list, &sparx5->mdb_entries); + mutex_unlock(&sparx5->mdb_lock); + + *entry_out = entry; + return 0; +} + +static void sparx5_free_mdb_entry(struct sparx5 *sparx5, + const unsigned char *addr, + u16 vid) +{ + struct sparx5_mdb_entry *entry, *tmp; + + mutex_lock(&sparx5->mdb_lock); + list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) { + if ((vid == 0 || entry->vid == vid) && + ether_addr_equal(addr, entry->addr)) { + list_del(&entry->list); + + sparx5_pgid_free(sparx5, entry->pgid_idx); + kfree(entry); + goto out; + } + } + +out: + mutex_unlock(&sparx5->mdb_lock); +} + +static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5, + const unsigned char *addr, + u16 vid) +{ + struct sparx5_mdb_entry *e, *found = NULL; + + mutex_lock(&sparx5->mdb_lock); + list_for_each_entry(e, &sparx5->mdb_entries, list) { + if (ether_addr_equal(e->addr, addr) && e->vid == vid) { + found = e; + goto out; + } + } + +out: + mutex_unlock(&sparx5->mdb_lock); + return found; +} + +static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable) +{ + spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable), + ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5, + ANA_AC_PGID_MISC_CFG(pgid)); +} + +static int sparx5_handle_port_mdb_add(struct net_device *dev, + struct notifier_block *nb, + const struct switchdev_obj_port_mdb *v) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *spx5 = port->sparx5; + struct sparx5_mdb_entry *entry; + bool is_host, is_new; + int err, i; + u16 vid; + + if (!sparx5_netdevice_check(dev)) + return -EOPNOTSUPP; + + is_host = netif_is_bridge_master(v->obj.orig_dev); + + /* When VLAN unaware the vlan value is not parsed and we receive vid 0. + * Fall back to bridge vid 1. + */ + if (!br_vlan_enabled(spx5->hw_bridge_dev)) + vid = 1; + else + vid = v->vid; + + is_new = false; + entry = sparx5_mdb_get_entry(spx5, v->addr, vid); + if (!entry) { + err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry); + is_new = true; + if (err) + return err; + } + + mutex_lock(&spx5->mdb_lock); + + /* Add any mrouter ports to the new entry */ + if (is_new && ether_addr_is_ip_mcast(v->addr)) + for (i = 0; i < SPX5_PORTS; i++) + if (spx5->ports[i] && spx5->ports[i]->is_mrouter) + sparx5_pgid_update_mask(spx5->ports[i], + entry->pgid_idx, + true); + + if (is_host && !entry->cpu_copy) { + sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true); + entry->cpu_copy = true; + } else if (!is_host) { + sparx5_pgid_update_mask(port, entry->pgid_idx, true); + set_bit(port->portno, entry->port_mask); + } + mutex_unlock(&spx5->mdb_lock); + + sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid); + + return 0; +} + +static int sparx5_handle_port_mdb_del(struct net_device *dev, + struct notifier_block *nb, + const struct switchdev_obj_port_mdb *v) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *spx5 = port->sparx5; + struct sparx5_mdb_entry *entry; + bool is_host; + u16 vid; + + if (!sparx5_netdevice_check(dev)) + return -EOPNOTSUPP; + + is_host = netif_is_bridge_master(v->obj.orig_dev); + + if (!br_vlan_enabled(spx5->hw_bridge_dev)) + vid = 1; + else + vid = v->vid; + + entry = sparx5_mdb_get_entry(spx5, v->addr, vid); + if (!entry) + return 0; + + mutex_lock(&spx5->mdb_lock); + if (is_host && entry->cpu_copy) { + sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false); + entry->cpu_copy = false; + } else if (!is_host) { + clear_bit(port->portno, entry->port_mask); + + /* Port not mrouter port or addr is L2 mcast, remove port from mask. */ + if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr)) + sparx5_pgid_update_mask(port, entry->pgid_idx, false); + } + mutex_unlock(&spx5->mdb_lock); + + if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) { + /* Clear pgid in case mrouter ports exists + * that are not part of the group. + */ + sparx5_pgid_clear(spx5, entry->pgid_idx); + sparx5_mact_forget(spx5, entry->addr, entry->vid); + sparx5_free_mdb_entry(spx5, entry->addr, entry->vid); + } + return 0; +} + +static int sparx5_handle_port_obj_add(struct net_device *dev, + struct notifier_block *nb, + struct switchdev_notifier_port_obj_info *info) +{ + const struct switchdev_obj *obj = info->obj; + int err; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = sparx5_handle_port_vlan_add(dev, nb, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = sparx5_handle_port_mdb_add(dev, nb, + SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + info->handled = true; + return err; +} + +static int sparx5_handle_port_vlan_del(struct net_device *dev, + struct notifier_block *nb, + u16 vid) +{ + struct sparx5_port *port = netdev_priv(dev); + int ret; + + /* Master bridge? */ + if (netif_is_bridge_master(dev)) { + struct sparx5 *sparx5 = + container_of(nb, struct sparx5, + switchdev_blocking_nb); + + sparx5_mact_forget(sparx5, dev->broadcast, vid); + return 0; + } + + if (!sparx5_netdevice_check(dev)) + return -EOPNOTSUPP; + + ret = sparx5_vlan_vid_del(port, vid); + if (ret) + return ret; + + return 0; +} + +static int sparx5_handle_port_obj_del(struct net_device *dev, + struct notifier_block *nb, + struct switchdev_notifier_port_obj_info *info) +{ + const struct switchdev_obj *obj = info->obj; + int err; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = sparx5_handle_port_vlan_del(dev, nb, + SWITCHDEV_OBJ_PORT_VLAN(obj)->vid); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = sparx5_handle_port_mdb_del(dev, nb, + SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + info->handled = true; + return err; +} + +static int sparx5_switchdev_blocking_event(struct notifier_block *nb, + unsigned long event, + void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = sparx5_handle_port_obj_add(dev, nb, ptr); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = sparx5_handle_port_obj_del(dev, nb, ptr); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + sparx5_netdevice_check, + sparx5_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +int sparx5_register_notifier_blocks(struct sparx5 *s5) +{ + int err; + + s5->netdevice_nb.notifier_call = sparx5_netdevice_event; + err = register_netdevice_notifier(&s5->netdevice_nb); + if (err) + return err; + + s5->switchdev_nb.notifier_call = sparx5_switchdev_event; + err = register_switchdev_notifier(&s5->switchdev_nb); + if (err) + goto err_switchdev_nb; + + s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event; + err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb); + if (err) + goto err_switchdev_blocking_nb; + + sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0); + if (!sparx5_owq) { + err = -ENOMEM; + goto err_switchdev_blocking_nb; + } + + return 0; + +err_switchdev_blocking_nb: + unregister_switchdev_notifier(&s5->switchdev_nb); +err_switchdev_nb: + unregister_netdevice_notifier(&s5->netdevice_nb); + + return err; +} + +void sparx5_unregister_notifier_blocks(struct sparx5 *s5) +{ + destroy_workqueue(sparx5_owq); + + unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb); + unregister_switchdev_notifier(&s5->switchdev_nb); + unregister_netdevice_notifier(&s5->netdevice_nb); +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c new file mode 100644 index 000000000..dc2c3756e --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + */ + +#include <net/pkt_cls.h> + +#include "sparx5_tc.h" +#include "sparx5_main.h" +#include "sparx5_qos.h" + +static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer, + u32 *idx) +{ + if (parent == TC_H_ROOT) { + *layer = 2; + *idx = portno; + } else { + u32 queue = TC_H_MIN(parent) - 1; + *layer = 0; + *idx = SPX5_HSCH_L0_GET_IDX(portno, queue); + } +} + +static int sparx5_tc_setup_qdisc_mqprio(struct net_device *ndev, + struct tc_mqprio_qopt_offload *m) +{ + m->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + if (m->qopt.num_tc == 0) + return sparx5_tc_mqprio_del(ndev); + else + return sparx5_tc_mqprio_add(ndev, m->qopt.num_tc); +} + +static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev, + struct tc_tbf_qopt_offload *qopt) +{ + struct sparx5_port *port = netdev_priv(ndev); + u32 layer, se_idx; + + sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer, + &se_idx); + + switch (qopt->command) { + case TC_TBF_REPLACE: + return sparx5_tc_tbf_add(port, &qopt->replace_params, layer, + se_idx); + case TC_TBF_DESTROY: + return sparx5_tc_tbf_del(port, layer, se_idx); + case TC_TBF_STATS: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev, + struct tc_ets_qopt_offload *qopt) +{ + struct tc_ets_qopt_offload_replace_params *params = + &qopt->replace_params; + struct sparx5_port *port = netdev_priv(ndev); + int i; + + /* Only allow ets on ports */ + if (qopt->parent != TC_H_ROOT) + return -EOPNOTSUPP; + + switch (qopt->command) { + case TC_ETS_REPLACE: + + /* We support eight priorities */ + if (params->bands != SPX5_PRIOS) + return -EOPNOTSUPP; + + /* Sanity checks */ + for (i = 0; i < SPX5_PRIOS; ++i) { + /* Priority map is *always* reverse e.g: 7 6 5 .. 0 */ + if (params->priomap[i] != (7 - i)) + return -EOPNOTSUPP; + /* Throw an error if we receive zero weights by tc */ + if (params->quanta[i] && params->weights[i] == 0) { + pr_err("Invalid ets configuration; band %d has weight zero", + i); + return -EINVAL; + } + } + + return sparx5_tc_ets_add(port, params); + case TC_ETS_DESTROY: + + return sparx5_tc_ets_del(port); + case TC_ETS_GRAFT: + return -EOPNOTSUPP; + + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return sparx5_tc_setup_qdisc_mqprio(ndev, type_data); + case TC_SETUP_QDISC_TBF: + return sparx5_tc_setup_qdisc_tbf(ndev, type_data); + case TC_SETUP_QDISC_ETS: + return sparx5_tc_setup_qdisc_ets(ndev, type_data); + default: + return -EOPNOTSUPP; + } + + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h new file mode 100644 index 000000000..5b55e11b7 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + */ + +#ifndef __SPARX5_TC_H__ +#define __SPARX5_TC_H__ + +#include <linux/netdevice.h> + +int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data); + +#endif /* __SPARX5_TC_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c new file mode 100644 index 000000000..34f954bbf --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid) +{ + u32 mask[3]; + + /* Divide up mask in 32 bit words */ + bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS); + + /* Output mask to respective registers */ + spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid)); + spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid)); + spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid)); + + return 0; +} + +void sparx5_vlan_init(struct sparx5 *sparx5) +{ + u16 vid; + + spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1), + ANA_L3_VLAN_CTRL_VLAN_ENA, + sparx5, + ANA_L3_VLAN_CTRL); + + /* Map VLAN = FID */ + for (vid = NULL_VID; vid < VLAN_N_VID; vid++) + spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid), + ANA_L3_VLAN_CFG_VLAN_FID, + sparx5, + ANA_L3_VLAN_CFG(vid)); +} + +void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno) +{ + struct sparx5_port *port = sparx5->ports[portno]; + + /* Configure PVID */ + spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) | + ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid), + ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA | + ANA_CL_VLAN_CTRL_PORT_VID, + sparx5, + ANA_CL_VLAN_CTRL(port->portno)); +} + +int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, + bool untagged) +{ + struct sparx5 *sparx5 = port->sparx5; + int ret; + + /* Untagged egress vlan classification */ + if (untagged && port->vid != vid) { + if (port->vid) { + netdev_err(port->ndev, + "Port already has a native VLAN: %d\n", + port->vid); + return -EBUSY; + } + port->vid = vid; + } + + /* Make the port a member of the VLAN */ + set_bit(port->portno, sparx5->vlan_mask[vid]); + ret = sparx5_vlant_set_mask(sparx5, vid); + if (ret) + return ret; + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + + sparx5_vlan_port_apply(sparx5, port); + + return 0; +} + +int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid) +{ + struct sparx5 *sparx5 = port->sparx5; + int ret; + + /* 8021q removes VID 0 on module unload for all interfaces + * with VLAN filtering feature. We need to keep it to receive + * untagged traffic. + */ + if (vid == 0) + return 0; + + /* Stop the port from being a member of the vlan */ + clear_bit(port->portno, sparx5->vlan_mask[vid]); + ret = sparx5_vlant_set_mask(sparx5, vid); + if (ret) + return ret; + + /* Ingress */ + if (port->pvid == vid) + port->pvid = 0; + + /* Egress */ + if (port->vid == vid) + port->vid = 0; + + sparx5_vlan_port_apply(sparx5, port); + + return 0; +} + +void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable) +{ + struct sparx5 *sparx5 = port->sparx5; + u32 val, mask; + + /* mask is spread across 3 registers x 32 bit */ + if (port->portno < 32) { + mask = BIT(port->portno); + val = enable ? mask : 0; + spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid)); + } else if (port->portno < 64) { + mask = BIT(port->portno - 32); + val = enable ? mask : 0; + spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid)); + } else if (port->portno < SPX5_PORTS) { + mask = BIT(port->portno - 64); + val = enable ? mask : 0; + spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid)); + } else { + netdev_err(port->ndev, "Invalid port no: %d\n", port->portno); + } +} + +void sparx5_pgid_clear(struct sparx5 *spx5, int pgid) +{ + spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid)); + spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid)); + spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid)); +} + +void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3]) +{ + portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid)); + portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid)); + portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid)); +} + +void sparx5_update_fwd(struct sparx5 *sparx5) +{ + DECLARE_BITMAP(workmask, SPX5_PORTS); + u32 mask[3]; + int port; + + /* Divide up fwd mask in 32 bit words */ + bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS); + + /* Update flood masks */ + for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) { + spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port)); + spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port)); + spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port)); + } + + /* Update SRC masks */ + for (port = 0; port < SPX5_PORTS; port++) { + if (test_bit(port, sparx5->bridge_fwd_mask)) { + /* Allow to send to all bridged but self */ + bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS); + clear_bit(port, workmask); + bitmap_to_arr32(mask, workmask, SPX5_PORTS); + spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port)); + spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port)); + spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port)); + } else { + spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port)); + spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port)); + spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port)); + } + } + + /* Learning enabled only for bridged ports */ + bitmap_and(workmask, sparx5->bridge_fwd_mask, + sparx5->bridge_lrn_mask, SPX5_PORTS); + bitmap_to_arr32(mask, workmask, SPX5_PORTS); + + /* Apply learning mask */ + spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG); + spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1); + spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2); +} + +void sparx5_vlan_port_apply(struct sparx5 *sparx5, + struct sparx5_port *port) + +{ + u32 val; + + /* Configure PVID, vlan aware */ + val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) | + ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) | + ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid); + spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno)); + + val = 0; + if (port->vlan_aware && !port->pvid) + /* If port is vlan-aware and tagged, drop untagged and + * priority tagged frames. + */ + val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) | + ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) | + ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1); + spx5_wr(val, sparx5, + ANA_CL_VLAN_FILTER_CTRL(port->portno, 0)); + + /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */ + val = REW_TAG_CTRL_TAG_TPID_CFG_SET(0); + if (port->vlan_aware) { + if (port->vid) + /* Tag all frames except when VID == DEFAULT_VLAN */ + val |= REW_TAG_CTRL_TAG_CFG_SET(1); + else + val |= REW_TAG_CTRL_TAG_CFG_SET(3); + } + spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno)); + + /* Egress VID */ + spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid), + REW_PORT_VLAN_CFG_PORT_VID, + sparx5, + REW_PORT_VLAN_CFG(port->portno)); +} |