summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/microchip
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/microchip
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/microchip')
-rw-r--r--drivers/net/ethernet/microchip/Kconfig63
-rw-r--r--drivers/net/ethernet/microchip/Makefile14
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c1629
-rw-r--r--drivers/net/ethernet/microchip/enc28j60_hw.h310
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c516
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c1127
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h438
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c1413
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h106
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c3702
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1167
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c1792
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h101
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig23
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile22
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c70
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c365
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c727
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ets.c96
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c289
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c1073
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_goto.c50
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h174
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c363
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c592
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1324
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h792
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c551
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c137
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c226
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c570
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c1113
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h1888
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c664
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c528
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c85
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c142
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c620
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c91
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c3270
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c242
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c784
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c323
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c136
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile19
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c596
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c407
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c1264
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c598
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c503
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c957
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h693
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h7350
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c334
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c359
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c46
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c145
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c53
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pool.c81
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c1347
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h177
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c332
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c682
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c576
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h82
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c335
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c763
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c174
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.h108
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c1483
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c97
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c3874
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h18
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c471
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.h33
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c2111
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h207
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c238
-rw-r--r--drivers/net/ethernet/microchip/vcap/Kconfig53
-rw-r--r--drivers/net/ethernet/microchip/vcap/Makefile10
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h906
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c3599
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h280
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h282
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c468
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.h41
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c554
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c2357
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h124
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c4062
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h18
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.c412
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.h32
97 files changed, 69621 insertions, 0 deletions
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
new file mode 100644
index 0000000000..43ba71e822
--- /dev/null
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microchip network device configuration
+#
+
+config NET_VENDOR_MICROCHIP
+ bool "Microchip devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Microchip cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_MICROCHIP
+
+config ENC28J60
+ tristate "ENC28J60 support"
+ depends on SPI
+ select CRC32
+ help
+ Support for the Microchip EN28J60 ethernet chip.
+
+ To compile this driver as a module, choose M here. The module will be
+ called enc28j60.
+
+config ENC28J60_WRITEVERIFY
+ bool "Enable write verify"
+ depends on ENC28J60
+ help
+ Enable the verify after the buffer write useful for debugging purpose.
+ If unsure, say N.
+
+config ENCX24J600
+ tristate "ENCX24J600 support"
+ depends on SPI
+ help
+ Support for the Microchip ENC424J600/624J600 ethernet chip.
+
+ To compile this driver as a module, choose M here. The module will be
+ called encx24j600.
+
+config LAN743X
+ tristate "LAN743x support"
+ depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select PHYLIB
+ select FIXED_PHY
+ select CRC16
+ select CRC32
+ help
+ Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
+
+ To compile this driver as a module, choose M here. The module will be
+ called lan743x.
+
+source "drivers/net/ethernet/microchip/lan966x/Kconfig"
+source "drivers/net/ethernet/microchip/sparx5/Kconfig"
+source "drivers/net/ethernet/microchip/vcap/Kconfig"
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
new file mode 100644
index 0000000000..bbd349264e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip network device drivers.
+#
+
+obj-$(CONFIG_ENC28J60) += enc28j60.o
+obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
+obj-$(CONFIG_LAN743X) += lan743x.o
+
+lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
+
+obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
+obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
+obj-$(CONFIG_VCAP) += vcap/
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
new file mode 100644
index 0000000000..d6c9491537
--- /dev/null
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -0,0 +1,1629 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Microchip ENC28J60 ethernet driver (MAC + PHY)
+ *
+ * Copyright (C) 2007 Eurek srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ * based on enc28j60.c written by David Anders for 2.4 kernel version
+ *
+ * $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/property.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+
+#include "enc28j60_hw.h"
+
+#define DRV_NAME "enc28j60"
+#define DRV_VERSION "1.02"
+
+#define SPI_OPLEN 1
+
+#define ENC28J60_MSG_DEFAULT \
+ (NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK)
+
+/* Buffer size required for the largest SPI transfer (i.e., reading a
+ * frame).
+ */
+#define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN)
+
+#define TX_TIMEOUT (4 * HZ)
+
+/* Max TX retries in case of collision as suggested by errata datasheet */
+#define MAX_TX_RETRYCOUNT 16
+
+enum {
+ RXFILTER_NORMAL,
+ RXFILTER_MULTI,
+ RXFILTER_PROMISC
+};
+
+/* Driver local data */
+struct enc28j60_net {
+ struct net_device *netdev;
+ struct spi_device *spi;
+ struct mutex lock;
+ struct sk_buff *tx_skb;
+ struct work_struct tx_work;
+ struct work_struct setrx_work;
+ struct work_struct restart_work;
+ u8 bank; /* current register bank selected */
+ u16 next_pk_ptr; /* next packet pointer within FIFO */
+ u16 max_pk_counter; /* statistics: max packet counter */
+ u16 tx_retry_count;
+ bool hw_enable;
+ bool full_duplex;
+ int rxfilter;
+ u32 msg_enable;
+ u8 spi_transfer_buf[SPI_TRANSFER_BUF_LEN];
+};
+
+/* use ethtool to change the level for any given device */
+static struct {
+ u32 msg_enable;
+} debug = { -1 };
+
+/*
+ * SPI read buffer
+ * Wait for the SPI transfer and copy received data to destination.
+ */
+static int
+spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
+{
+ struct device *dev = &priv->spi->dev;
+ u8 *rx_buf = priv->spi_transfer_buf + 4;
+ u8 *tx_buf = priv->spi_transfer_buf;
+ struct spi_transfer tx = {
+ .tx_buf = tx_buf,
+ .len = SPI_OPLEN,
+ };
+ struct spi_transfer rx = {
+ .rx_buf = rx_buf,
+ .len = len,
+ };
+ struct spi_message msg;
+ int ret;
+
+ tx_buf[0] = ENC28J60_READ_BUF_MEM;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&tx, &msg);
+ spi_message_add_tail(&rx, &msg);
+
+ ret = spi_sync(priv->spi, &msg);
+ if (ret == 0) {
+ memcpy(data, rx_buf, len);
+ ret = msg.status;
+ }
+ if (ret && netif_msg_drv(priv))
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/*
+ * SPI write buffer
+ */
+static int spi_write_buf(struct enc28j60_net *priv, int len, const u8 *data)
+{
+ struct device *dev = &priv->spi->dev;
+ int ret;
+
+ if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0)
+ ret = -EINVAL;
+ else {
+ priv->spi_transfer_buf[0] = ENC28J60_WRITE_BUF_MEM;
+ memcpy(&priv->spi_transfer_buf[1], data, len);
+ ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1);
+ if (ret && netif_msg_drv(priv))
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
+ }
+ return ret;
+}
+
+/*
+ * basic SPI read operation
+ */
+static u8 spi_read_op(struct enc28j60_net *priv, u8 op, u8 addr)
+{
+ struct device *dev = &priv->spi->dev;
+ u8 tx_buf[2];
+ u8 rx_buf[4];
+ u8 val = 0;
+ int ret;
+ int slen = SPI_OPLEN;
+
+ /* do dummy read if needed */
+ if (addr & SPRD_MASK)
+ slen++;
+
+ tx_buf[0] = op | (addr & ADDR_MASK);
+ ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen);
+ if (ret)
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
+ else
+ val = rx_buf[slen - 1];
+
+ return val;
+}
+
+/*
+ * basic SPI write operation
+ */
+static int spi_write_op(struct enc28j60_net *priv, u8 op, u8 addr, u8 val)
+{
+ struct device *dev = &priv->spi->dev;
+ int ret;
+
+ priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK);
+ priv->spi_transfer_buf[1] = val;
+ ret = spi_write(priv->spi, priv->spi_transfer_buf, 2);
+ if (ret && netif_msg_drv(priv))
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static void enc28j60_soft_reset(struct enc28j60_net *priv)
+{
+ spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET);
+ /* Errata workaround #1, CLKRDY check is unreliable,
+ * delay at least 1 ms instead */
+ udelay(2000);
+}
+
+/*
+ * select the current register bank if necessary
+ */
+static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr)
+{
+ u8 b = (addr & BANK_MASK) >> 5;
+
+ /* These registers (EIE, EIR, ESTAT, ECON2, ECON1)
+ * are present in all banks, no need to switch bank.
+ */
+ if (addr >= EIE && addr <= ECON1)
+ return;
+
+ /* Clear or set each bank selection bit as needed */
+ if ((b & ECON1_BSEL0) != (priv->bank & ECON1_BSEL0)) {
+ if (b & ECON1_BSEL0)
+ spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1,
+ ECON1_BSEL0);
+ else
+ spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1,
+ ECON1_BSEL0);
+ }
+ if ((b & ECON1_BSEL1) != (priv->bank & ECON1_BSEL1)) {
+ if (b & ECON1_BSEL1)
+ spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1,
+ ECON1_BSEL1);
+ else
+ spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1,
+ ECON1_BSEL1);
+ }
+ priv->bank = b;
+}
+
+/*
+ * Register access routines through the SPI bus.
+ * Every register access comes in two flavours:
+ * - nolock_xxx: caller needs to invoke mutex_lock, usually to access
+ * atomically more than one register
+ * - locked_xxx: caller doesn't need to invoke mutex_lock, single access
+ *
+ * Some registers can be accessed through the bit field clear and
+ * bit field set to avoid a read modify write cycle.
+ */
+
+/*
+ * Register bit field Set
+ */
+static void nolock_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask)
+{
+ enc28j60_set_bank(priv, addr);
+ spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask);
+}
+
+static void locked_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask)
+{
+ mutex_lock(&priv->lock);
+ nolock_reg_bfset(priv, addr, mask);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Register bit field Clear
+ */
+static void nolock_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask)
+{
+ enc28j60_set_bank(priv, addr);
+ spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask);
+}
+
+static void locked_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask)
+{
+ mutex_lock(&priv->lock);
+ nolock_reg_bfclr(priv, addr, mask);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Register byte read
+ */
+static int nolock_regb_read(struct enc28j60_net *priv, u8 address)
+{
+ enc28j60_set_bank(priv, address);
+ return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address);
+}
+
+static int locked_regb_read(struct enc28j60_net *priv, u8 address)
+{
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = nolock_regb_read(priv, address);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/*
+ * Register word read
+ */
+static int nolock_regw_read(struct enc28j60_net *priv, u8 address)
+{
+ int rl, rh;
+
+ enc28j60_set_bank(priv, address);
+ rl = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address);
+ rh = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address + 1);
+
+ return (rh << 8) | rl;
+}
+
+static int locked_regw_read(struct enc28j60_net *priv, u8 address)
+{
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = nolock_regw_read(priv, address);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/*
+ * Register byte write
+ */
+static void nolock_regb_write(struct enc28j60_net *priv, u8 address, u8 data)
+{
+ enc28j60_set_bank(priv, address);
+ spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data);
+}
+
+static void locked_regb_write(struct enc28j60_net *priv, u8 address, u8 data)
+{
+ mutex_lock(&priv->lock);
+ nolock_regb_write(priv, address, data);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Register word write
+ */
+static void nolock_regw_write(struct enc28j60_net *priv, u8 address, u16 data)
+{
+ enc28j60_set_bank(priv, address);
+ spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data);
+ spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address + 1,
+ (u8) (data >> 8));
+}
+
+static void locked_regw_write(struct enc28j60_net *priv, u8 address, u16 data)
+{
+ mutex_lock(&priv->lock);
+ nolock_regw_write(priv, address, data);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Buffer memory read
+ * Select the starting address and execute a SPI buffer read.
+ */
+static void enc28j60_mem_read(struct enc28j60_net *priv, u16 addr, int len,
+ u8 *data)
+{
+ mutex_lock(&priv->lock);
+ nolock_regw_write(priv, ERDPTL, addr);
+#ifdef CONFIG_ENC28J60_WRITEVERIFY
+ if (netif_msg_drv(priv)) {
+ struct device *dev = &priv->spi->dev;
+ u16 reg;
+
+ reg = nolock_regw_read(priv, ERDPTL);
+ if (reg != addr)
+ dev_printk(KERN_DEBUG, dev,
+ "%s() error writing ERDPT (0x%04x - 0x%04x)\n",
+ __func__, reg, addr);
+ }
+#endif
+ spi_read_buf(priv, len, data);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Write packet to enc28j60 TX buffer memory
+ */
+static void
+enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
+{
+ struct device *dev = &priv->spi->dev;
+
+ mutex_lock(&priv->lock);
+ /* Set the write pointer to start of transmit buffer area */
+ nolock_regw_write(priv, EWRPTL, TXSTART_INIT);
+#ifdef CONFIG_ENC28J60_WRITEVERIFY
+ if (netif_msg_drv(priv)) {
+ u16 reg;
+ reg = nolock_regw_read(priv, EWRPTL);
+ if (reg != TXSTART_INIT)
+ dev_printk(KERN_DEBUG, dev,
+ "%s() ERWPT:0x%04x != 0x%04x\n",
+ __func__, reg, TXSTART_INIT);
+ }
+#endif
+ /* Set the TXND pointer to correspond to the packet size given */
+ nolock_regw_write(priv, ETXNDL, TXSTART_INIT + len);
+ /* write per-packet control byte */
+ spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00);
+ if (netif_msg_hw(priv))
+ dev_printk(KERN_DEBUG, dev,
+ "%s() after control byte ERWPT:0x%04x\n",
+ __func__, nolock_regw_read(priv, EWRPTL));
+ /* copy the packet into the transmit buffer */
+ spi_write_buf(priv, len, data);
+ if (netif_msg_hw(priv))
+ dev_printk(KERN_DEBUG, dev,
+ "%s() after write packet ERWPT:0x%04x, len=%d\n",
+ __func__, nolock_regw_read(priv, EWRPTL), len);
+ mutex_unlock(&priv->lock);
+}
+
+static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val)
+{
+ struct device *dev = &priv->spi->dev;
+ unsigned long timeout = jiffies + msecs_to_jiffies(20);
+
+ /* 20 msec timeout read */
+ while ((nolock_regb_read(priv, reg) & mask) != val) {
+ if (time_after(jiffies, timeout)) {
+ if (netif_msg_drv(priv))
+ dev_dbg(dev, "reg %02x ready timeout!\n", reg);
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
+/*
+ * Wait until the PHY operation is complete.
+ */
+static int wait_phy_ready(struct enc28j60_net *priv)
+{
+ return poll_ready(priv, MISTAT, MISTAT_BUSY, 0) ? 0 : 1;
+}
+
+/*
+ * PHY register read
+ * PHY registers are not accessed directly, but through the MII.
+ */
+static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address)
+{
+ u16 ret;
+
+ mutex_lock(&priv->lock);
+ /* set the PHY register address */
+ nolock_regb_write(priv, MIREGADR, address);
+ /* start the register read operation */
+ nolock_regb_write(priv, MICMD, MICMD_MIIRD);
+ /* wait until the PHY read completes */
+ wait_phy_ready(priv);
+ /* quit reading */
+ nolock_regb_write(priv, MICMD, 0x00);
+ /* return the data */
+ ret = nolock_regw_read(priv, MIRDL);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int enc28j60_phy_write(struct enc28j60_net *priv, u8 address, u16 data)
+{
+ int ret;
+
+ mutex_lock(&priv->lock);
+ /* set the PHY register address */
+ nolock_regb_write(priv, MIREGADR, address);
+ /* write the PHY data */
+ nolock_regw_write(priv, MIWRL, data);
+ /* wait until the PHY write completes and return */
+ ret = wait_phy_ready(priv);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/*
+ * Program the hardware MAC address from dev->dev_addr.
+ */
+static int enc28j60_set_hw_macaddr(struct net_device *ndev)
+{
+ int ret;
+ struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
+
+ mutex_lock(&priv->lock);
+ if (!priv->hw_enable) {
+ if (netif_msg_drv(priv))
+ dev_info(dev, "%s: Setting MAC address to %pM\n",
+ ndev->name, ndev->dev_addr);
+ /* NOTE: MAC address in ENC28J60 is byte-backward */
+ nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]);
+ nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]);
+ nolock_regb_write(priv, MAADR3, ndev->dev_addr[2]);
+ nolock_regb_write(priv, MAADR2, ndev->dev_addr[3]);
+ nolock_regb_write(priv, MAADR1, ndev->dev_addr[4]);
+ nolock_regb_write(priv, MAADR0, ndev->dev_addr[5]);
+ ret = 0;
+ } else {
+ if (netif_msg_drv(priv))
+ dev_printk(KERN_DEBUG, dev,
+ "%s() Hardware must be disabled to set Mac address\n",
+ __func__);
+ ret = -EBUSY;
+ }
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+/*
+ * Store the new hardware address in dev->dev_addr, and update the MAC.
+ */
+static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *address = addr;
+
+ if (netif_running(dev))
+ return -EBUSY;
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(dev, address->sa_data);
+ return enc28j60_set_hw_macaddr(dev);
+}
+
+/*
+ * Debug routine to dump useful register contents
+ */
+static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg)
+{
+ struct device *dev = &priv->spi->dev;
+
+ mutex_lock(&priv->lock);
+ dev_printk(KERN_DEBUG, dev,
+ " %s\n"
+ "HwRevID: 0x%02x\n"
+ "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n"
+ " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n"
+ "MAC : MACON1 MACON3 MACON4\n"
+ " 0x%02x 0x%02x 0x%02x\n"
+ "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n"
+ " 0x%04x 0x%04x 0x%04x 0x%04x "
+ "0x%02x 0x%02x 0x%04x\n"
+ "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n"
+ " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n",
+ msg, nolock_regb_read(priv, EREVID),
+ nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2),
+ nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR),
+ nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1),
+ nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4),
+ nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL),
+ nolock_regw_read(priv, ERXWRPTL),
+ nolock_regw_read(priv, ERXRDPTL),
+ nolock_regb_read(priv, ERXFCON),
+ nolock_regb_read(priv, EPKTCNT),
+ nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL),
+ nolock_regw_read(priv, ETXNDL),
+ nolock_regb_read(priv, MACLCON1),
+ nolock_regb_read(priv, MACLCON2),
+ nolock_regb_read(priv, MAPHSUP));
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * ERXRDPT need to be set always at odd addresses, refer to errata datasheet
+ */
+static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
+{
+ u16 erxrdpt;
+
+ if ((next_packet_ptr - 1 < start) || (next_packet_ptr - 1 > end))
+ erxrdpt = end;
+ else
+ erxrdpt = next_packet_ptr - 1;
+
+ return erxrdpt;
+}
+
+/*
+ * Calculate wrap around when reading beyond the end of the RX buffer
+ */
+static u16 rx_packet_start(u16 ptr)
+{
+ if (ptr + RSV_SIZE > RXEND_INIT)
+ return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
+ else
+ return ptr + RSV_SIZE;
+}
+
+static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
+{
+ struct device *dev = &priv->spi->dev;
+ u16 erxrdpt;
+
+ if (start > 0x1FFF || end > 0x1FFF || start > end) {
+ if (netif_msg_drv(priv))
+ dev_err(dev, "%s(%d, %d) RXFIFO bad parameters!\n",
+ __func__, start, end);
+ return;
+ }
+ /* set receive buffer start + end */
+ priv->next_pk_ptr = start;
+ nolock_regw_write(priv, ERXSTL, start);
+ erxrdpt = erxrdpt_workaround(priv->next_pk_ptr, start, end);
+ nolock_regw_write(priv, ERXRDPTL, erxrdpt);
+ nolock_regw_write(priv, ERXNDL, end);
+}
+
+static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
+{
+ struct device *dev = &priv->spi->dev;
+
+ if (start > 0x1FFF || end > 0x1FFF || start > end) {
+ if (netif_msg_drv(priv))
+ dev_err(dev, "%s(%d, %d) TXFIFO bad parameters!\n",
+ __func__, start, end);
+ return;
+ }
+ /* set transmit buffer start + end */
+ nolock_regw_write(priv, ETXSTL, start);
+ nolock_regw_write(priv, ETXNDL, end);
+}
+
+/*
+ * Low power mode shrinks power consumption about 100x, so we'd like
+ * the chip to be in that mode whenever it's inactive. (However, we
+ * can't stay in low power mode during suspend with WOL active.)
+ */
+static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low)
+{
+ struct device *dev = &priv->spi->dev;
+
+ if (netif_msg_drv(priv))
+ dev_dbg(dev, "%s power...\n", is_low ? "low" : "high");
+
+ mutex_lock(&priv->lock);
+ if (is_low) {
+ nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
+ poll_ready(priv, ESTAT, ESTAT_RXBUSY, 0);
+ poll_ready(priv, ECON1, ECON1_TXRTS, 0);
+ /* ECON2_VRPS was set during initialization */
+ nolock_reg_bfset(priv, ECON2, ECON2_PWRSV);
+ } else {
+ nolock_reg_bfclr(priv, ECON2, ECON2_PWRSV);
+ poll_ready(priv, ESTAT, ESTAT_CLKRDY, ESTAT_CLKRDY);
+ /* caller sets ECON1_RXEN */
+ }
+ mutex_unlock(&priv->lock);
+}
+
+static int enc28j60_hw_init(struct enc28j60_net *priv)
+{
+ struct device *dev = &priv->spi->dev;
+ u8 reg;
+
+ if (netif_msg_drv(priv))
+ dev_printk(KERN_DEBUG, dev, "%s() - %s\n", __func__,
+ priv->full_duplex ? "FullDuplex" : "HalfDuplex");
+
+ mutex_lock(&priv->lock);
+ /* first reset the chip */
+ enc28j60_soft_reset(priv);
+ /* Clear ECON1 */
+ spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, ECON1, 0x00);
+ priv->bank = 0;
+ priv->hw_enable = false;
+ priv->tx_retry_count = 0;
+ priv->max_pk_counter = 0;
+ priv->rxfilter = RXFILTER_NORMAL;
+ /* enable address auto increment and voltage regulator powersave */
+ nolock_regb_write(priv, ECON2, ECON2_AUTOINC | ECON2_VRPS);
+
+ nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT);
+ nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT);
+ mutex_unlock(&priv->lock);
+
+ /*
+ * Check the RevID.
+ * If it's 0x00 or 0xFF probably the enc28j60 is not mounted or
+ * damaged.
+ */
+ reg = locked_regb_read(priv, EREVID);
+ if (netif_msg_drv(priv))
+ dev_info(dev, "chip RevID: 0x%02x\n", reg);
+ if (reg == 0x00 || reg == 0xff) {
+ if (netif_msg_drv(priv))
+ dev_printk(KERN_DEBUG, dev, "%s() Invalid RevId %d\n",
+ __func__, reg);
+ return 0;
+ }
+
+ /* default filter mode: (unicast OR broadcast) AND crc valid */
+ locked_regb_write(priv, ERXFCON,
+ ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN);
+
+ /* enable MAC receive */
+ locked_regb_write(priv, MACON1,
+ MACON1_MARXEN | MACON1_TXPAUS | MACON1_RXPAUS);
+ /* enable automatic padding and CRC operations */
+ if (priv->full_duplex) {
+ locked_regb_write(priv, MACON3,
+ MACON3_PADCFG0 | MACON3_TXCRCEN |
+ MACON3_FRMLNEN | MACON3_FULDPX);
+ /* set inter-frame gap (non-back-to-back) */
+ locked_regb_write(priv, MAIPGL, 0x12);
+ /* set inter-frame gap (back-to-back) */
+ locked_regb_write(priv, MABBIPG, 0x15);
+ } else {
+ locked_regb_write(priv, MACON3,
+ MACON3_PADCFG0 | MACON3_TXCRCEN |
+ MACON3_FRMLNEN);
+ locked_regb_write(priv, MACON4, 1 << 6); /* DEFER bit */
+ /* set inter-frame gap (non-back-to-back) */
+ locked_regw_write(priv, MAIPGL, 0x0C12);
+ /* set inter-frame gap (back-to-back) */
+ locked_regb_write(priv, MABBIPG, 0x12);
+ }
+ /*
+ * MACLCON1 (default)
+ * MACLCON2 (default)
+ * Set the maximum packet size which the controller will accept.
+ */
+ locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN);
+
+ /* Configure LEDs */
+ if (!enc28j60_phy_write(priv, PHLCON, ENC28J60_LAMPS_MODE))
+ return 0;
+
+ if (priv->full_duplex) {
+ if (!enc28j60_phy_write(priv, PHCON1, PHCON1_PDPXMD))
+ return 0;
+ if (!enc28j60_phy_write(priv, PHCON2, 0x00))
+ return 0;
+ } else {
+ if (!enc28j60_phy_write(priv, PHCON1, 0x00))
+ return 0;
+ if (!enc28j60_phy_write(priv, PHCON2, PHCON2_HDLDIS))
+ return 0;
+ }
+ if (netif_msg_hw(priv))
+ enc28j60_dump_regs(priv, "Hw initialized.");
+
+ return 1;
+}
+
+static void enc28j60_hw_enable(struct enc28j60_net *priv)
+{
+ struct device *dev = &priv->spi->dev;
+
+ /* enable interrupts */
+ if (netif_msg_hw(priv))
+ dev_printk(KERN_DEBUG, dev, "%s() enabling interrupts.\n",
+ __func__);
+
+ enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE);
+
+ mutex_lock(&priv->lock);
+ nolock_reg_bfclr(priv, EIR, EIR_DMAIF | EIR_LINKIF |
+ EIR_TXIF | EIR_TXERIF | EIR_RXERIF | EIR_PKTIF);
+ nolock_regb_write(priv, EIE, EIE_INTIE | EIE_PKTIE | EIE_LINKIE |
+ EIE_TXIE | EIE_TXERIE | EIE_RXERIE);
+
+ /* enable receive logic */
+ nolock_reg_bfset(priv, ECON1, ECON1_RXEN);
+ priv->hw_enable = true;
+ mutex_unlock(&priv->lock);
+}
+
+static void enc28j60_hw_disable(struct enc28j60_net *priv)
+{
+ mutex_lock(&priv->lock);
+ /* disable interrupts and packet reception */
+ nolock_regb_write(priv, EIE, 0x00);
+ nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
+ priv->hw_enable = false;
+ mutex_unlock(&priv->lock);
+}
+
+static int
+enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ if (!priv->hw_enable) {
+ /* link is in low power mode now; duplex setting
+ * will take effect on next enc28j60_hw_init().
+ */
+ if (autoneg == AUTONEG_DISABLE && speed == SPEED_10)
+ priv->full_duplex = (duplex == DUPLEX_FULL);
+ else {
+ if (netif_msg_link(priv))
+ netdev_warn(ndev, "unsupported link setting\n");
+ ret = -EOPNOTSUPP;
+ }
+ } else {
+ if (netif_msg_link(priv))
+ netdev_warn(ndev, "Warning: hw must be disabled to set link mode\n");
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+/*
+ * Read the Transmit Status Vector
+ */
+static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
+{
+ struct device *dev = &priv->spi->dev;
+ int endptr;
+
+ endptr = locked_regw_read(priv, ETXNDL);
+ if (netif_msg_hw(priv))
+ dev_printk(KERN_DEBUG, dev, "reading TSV at addr:0x%04x\n",
+ endptr + 1);
+ enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
+}
+
+static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
+ u8 tsv[TSV_SIZE])
+{
+ struct device *dev = &priv->spi->dev;
+ u16 tmp1, tmp2;
+
+ dev_printk(KERN_DEBUG, dev, "%s - TSV:\n", msg);
+ tmp1 = tsv[1];
+ tmp1 <<= 8;
+ tmp1 |= tsv[0];
+
+ tmp2 = tsv[5];
+ tmp2 <<= 8;
+ tmp2 |= tsv[4];
+
+ dev_printk(KERN_DEBUG, dev,
+ "ByteCount: %d, CollisionCount: %d, TotByteOnWire: %d\n",
+ tmp1, tsv[2] & 0x0f, tmp2);
+ dev_printk(KERN_DEBUG, dev,
+ "TxDone: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ TSV_GETBIT(tsv, TSV_TXDONE),
+ TSV_GETBIT(tsv, TSV_TXCRCERROR),
+ TSV_GETBIT(tsv, TSV_TXLENCHKERROR),
+ TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE));
+ dev_printk(KERN_DEBUG, dev,
+ "Multicast: %d, Broadcast: %d, PacketDefer: %d, ExDefer: %d\n",
+ TSV_GETBIT(tsv, TSV_TXMULTICAST),
+ TSV_GETBIT(tsv, TSV_TXBROADCAST),
+ TSV_GETBIT(tsv, TSV_TXPACKETDEFER),
+ TSV_GETBIT(tsv, TSV_TXEXDEFER));
+ dev_printk(KERN_DEBUG, dev,
+ "ExCollision: %d, LateCollision: %d, Giant: %d, Underrun: %d\n",
+ TSV_GETBIT(tsv, TSV_TXEXCOLLISION),
+ TSV_GETBIT(tsv, TSV_TXLATECOLLISION),
+ TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN));
+ dev_printk(KERN_DEBUG, dev,
+ "ControlFrame: %d, PauseFrame: %d, BackPressApp: %d, VLanTagFrame: %d\n",
+ TSV_GETBIT(tsv, TSV_TXCONTROLFRAME),
+ TSV_GETBIT(tsv, TSV_TXPAUSEFRAME),
+ TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP),
+ TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME));
+}
+
+/*
+ * Receive Status vector
+ */
+static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg,
+ u16 pk_ptr, int len, u16 sts)
+{
+ struct device *dev = &priv->spi->dev;
+
+ dev_printk(KERN_DEBUG, dev, "%s - NextPk: 0x%04x - RSV:\n", msg, pk_ptr);
+ dev_printk(KERN_DEBUG, dev, "ByteCount: %d, DribbleNibble: %d\n",
+ len, RSV_GETBIT(sts, RSV_DRIBBLENIBBLE));
+ dev_printk(KERN_DEBUG, dev,
+ "RxOK: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ RSV_GETBIT(sts, RSV_RXOK),
+ RSV_GETBIT(sts, RSV_CRCERROR),
+ RSV_GETBIT(sts, RSV_LENCHECKERR),
+ RSV_GETBIT(sts, RSV_LENOUTOFRANGE));
+ dev_printk(KERN_DEBUG, dev,
+ "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
+ RSV_GETBIT(sts, RSV_RXMULTICAST),
+ RSV_GETBIT(sts, RSV_RXBROADCAST),
+ RSV_GETBIT(sts, RSV_RXLONGEVDROPEV),
+ RSV_GETBIT(sts, RSV_CARRIEREV));
+ dev_printk(KERN_DEBUG, dev,
+ "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
+ RSV_GETBIT(sts, RSV_RXCONTROLFRAME),
+ RSV_GETBIT(sts, RSV_RXPAUSEFRAME),
+ RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE),
+ RSV_GETBIT(sts, RSV_RXTYPEVLAN));
+}
+
+static void dump_packet(const char *msg, int len, const char *data)
+{
+ printk(KERN_DEBUG DRV_NAME ": %s - packet len:%d\n", msg, len);
+ print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1,
+ data, len, true);
+}
+
+/*
+ * Hardware receive function.
+ * Read the buffer memory, update the FIFO pointer to free the buffer,
+ * check the status vector and decrement the packet counter.
+ */
+static void enc28j60_hw_rx(struct net_device *ndev)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
+ struct sk_buff *skb = NULL;
+ u16 erxrdpt, next_packet, rxstat;
+ u8 rsv[RSV_SIZE];
+ int len;
+
+ if (netif_msg_rx_status(priv))
+ netdev_printk(KERN_DEBUG, ndev, "RX pk_addr:0x%04x\n",
+ priv->next_pk_ptr);
+
+ if (unlikely(priv->next_pk_ptr > RXEND_INIT)) {
+ if (netif_msg_rx_err(priv))
+ netdev_err(ndev, "%s() Invalid packet address!! 0x%04x\n",
+ __func__, priv->next_pk_ptr);
+ /* packet address corrupted: reset RX logic */
+ mutex_lock(&priv->lock);
+ nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
+ nolock_reg_bfset(priv, ECON1, ECON1_RXRST);
+ nolock_reg_bfclr(priv, ECON1, ECON1_RXRST);
+ nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT);
+ nolock_reg_bfclr(priv, EIR, EIR_RXERIF);
+ nolock_reg_bfset(priv, ECON1, ECON1_RXEN);
+ mutex_unlock(&priv->lock);
+ ndev->stats.rx_errors++;
+ return;
+ }
+ /* Read next packet pointer and rx status vector */
+ enc28j60_mem_read(priv, priv->next_pk_ptr, sizeof(rsv), rsv);
+
+ next_packet = rsv[1];
+ next_packet <<= 8;
+ next_packet |= rsv[0];
+
+ len = rsv[3];
+ len <<= 8;
+ len |= rsv[2];
+
+ rxstat = rsv[5];
+ rxstat <<= 8;
+ rxstat |= rsv[4];
+
+ if (netif_msg_rx_status(priv))
+ enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat);
+
+ if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) {
+ if (netif_msg_rx_err(priv))
+ netdev_err(ndev, "Rx Error (%04x)\n", rxstat);
+ ndev->stats.rx_errors++;
+ if (RSV_GETBIT(rxstat, RSV_CRCERROR))
+ ndev->stats.rx_crc_errors++;
+ if (RSV_GETBIT(rxstat, RSV_LENCHECKERR))
+ ndev->stats.rx_frame_errors++;
+ if (len > MAX_FRAMELEN)
+ ndev->stats.rx_over_errors++;
+ } else {
+ skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN);
+ if (!skb) {
+ if (netif_msg_rx_err(priv))
+ netdev_err(ndev, "out of memory for Rx'd frame\n");
+ ndev->stats.rx_dropped++;
+ } else {
+ skb_reserve(skb, NET_IP_ALIGN);
+ /* copy the packet from the receive buffer */
+ enc28j60_mem_read(priv,
+ rx_packet_start(priv->next_pk_ptr),
+ len, skb_put(skb, len));
+ if (netif_msg_pktdata(priv))
+ dump_packet(__func__, skb->len, skb->data);
+ skb->protocol = eth_type_trans(skb, ndev);
+ /* update statistics */
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ netif_rx(skb);
+ }
+ }
+ /*
+ * Move the RX read pointer to the start of the next
+ * received packet.
+ * This frees the memory we just read out.
+ */
+ erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT);
+ if (netif_msg_hw(priv))
+ dev_printk(KERN_DEBUG, dev, "%s() ERXRDPT:0x%04x\n",
+ __func__, erxrdpt);
+
+ mutex_lock(&priv->lock);
+ nolock_regw_write(priv, ERXRDPTL, erxrdpt);
+#ifdef CONFIG_ENC28J60_WRITEVERIFY
+ if (netif_msg_drv(priv)) {
+ u16 reg;
+ reg = nolock_regw_read(priv, ERXRDPTL);
+ if (reg != erxrdpt)
+ dev_printk(KERN_DEBUG, dev,
+ "%s() ERXRDPT verify error (0x%04x - 0x%04x)\n",
+ __func__, reg, erxrdpt);
+ }
+#endif
+ priv->next_pk_ptr = next_packet;
+ /* we are done with this packet, decrement the packet counter */
+ nolock_reg_bfset(priv, ECON2, ECON2_PKTDEC);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Calculate free space in RxFIFO
+ */
+static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
+{
+ struct net_device *ndev = priv->netdev;
+ int epkcnt, erxst, erxnd, erxwr, erxrd;
+ int free_space;
+
+ mutex_lock(&priv->lock);
+ epkcnt = nolock_regb_read(priv, EPKTCNT);
+ if (epkcnt >= 255)
+ free_space = -1;
+ else {
+ erxst = nolock_regw_read(priv, ERXSTL);
+ erxnd = nolock_regw_read(priv, ERXNDL);
+ erxwr = nolock_regw_read(priv, ERXWRPTL);
+ erxrd = nolock_regw_read(priv, ERXRDPTL);
+
+ if (erxwr > erxrd)
+ free_space = (erxnd - erxst) - (erxwr - erxrd);
+ else if (erxwr == erxrd)
+ free_space = (erxnd - erxst);
+ else
+ free_space = erxrd - erxwr - 1;
+ }
+ mutex_unlock(&priv->lock);
+ if (netif_msg_rx_status(priv))
+ netdev_printk(KERN_DEBUG, ndev, "%s() free_space = %d\n",
+ __func__, free_space);
+ return free_space;
+}
+
+/*
+ * Access the PHY to determine link status
+ */
+static void enc28j60_check_link_status(struct net_device *ndev)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
+ u16 reg;
+ int duplex;
+
+ reg = enc28j60_phy_read(priv, PHSTAT2);
+ if (netif_msg_hw(priv))
+ dev_printk(KERN_DEBUG, dev,
+ "%s() PHSTAT1: %04x, PHSTAT2: %04x\n", __func__,
+ enc28j60_phy_read(priv, PHSTAT1), reg);
+ duplex = reg & PHSTAT2_DPXSTAT;
+
+ if (reg & PHSTAT2_LSTAT) {
+ netif_carrier_on(ndev);
+ if (netif_msg_ifup(priv))
+ netdev_info(ndev, "link up - %s\n",
+ duplex ? "Full duplex" : "Half duplex");
+ } else {
+ if (netif_msg_ifdown(priv))
+ netdev_info(ndev, "link down\n");
+ netif_carrier_off(ndev);
+ }
+}
+
+static void enc28j60_tx_clear(struct net_device *ndev, bool err)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+
+ if (err)
+ ndev->stats.tx_errors++;
+ else
+ ndev->stats.tx_packets++;
+
+ if (priv->tx_skb) {
+ if (!err)
+ ndev->stats.tx_bytes += priv->tx_skb->len;
+ dev_kfree_skb(priv->tx_skb);
+ priv->tx_skb = NULL;
+ }
+ locked_reg_bfclr(priv, ECON1, ECON1_TXRTS);
+ netif_wake_queue(ndev);
+}
+
+/*
+ * RX handler
+ * Ignore PKTIF because is unreliable! (Look at the errata datasheet)
+ * Check EPKTCNT is the suggested workaround.
+ * We don't need to clear interrupt flag, automatically done when
+ * enc28j60_hw_rx() decrements the packet counter.
+ * Returns how many packet processed.
+ */
+static int enc28j60_rx_interrupt(struct net_device *ndev)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+ int pk_counter, ret;
+
+ pk_counter = locked_regb_read(priv, EPKTCNT);
+ if (pk_counter && netif_msg_intr(priv))
+ netdev_printk(KERN_DEBUG, ndev, "intRX, pk_cnt: %d\n",
+ pk_counter);
+ if (pk_counter > priv->max_pk_counter) {
+ /* update statistics */
+ priv->max_pk_counter = pk_counter;
+ if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1)
+ netdev_printk(KERN_DEBUG, ndev, "RX max_pk_cnt: %d\n",
+ priv->max_pk_counter);
+ }
+ ret = pk_counter;
+ while (pk_counter-- > 0)
+ enc28j60_hw_rx(ndev);
+
+ return ret;
+}
+
+static irqreturn_t enc28j60_irq(int irq, void *dev_id)
+{
+ struct enc28j60_net *priv = dev_id;
+ struct net_device *ndev = priv->netdev;
+ int intflags, loop;
+
+ /* disable further interrupts */
+ locked_reg_bfclr(priv, EIE, EIE_INTIE);
+
+ do {
+ loop = 0;
+ intflags = locked_regb_read(priv, EIR);
+ /* DMA interrupt handler (not currently used) */
+ if ((intflags & EIR_DMAIF) != 0) {
+ loop++;
+ if (netif_msg_intr(priv))
+ netdev_printk(KERN_DEBUG, ndev, "intDMA(%d)\n",
+ loop);
+ locked_reg_bfclr(priv, EIR, EIR_DMAIF);
+ }
+ /* LINK changed handler */
+ if ((intflags & EIR_LINKIF) != 0) {
+ loop++;
+ if (netif_msg_intr(priv))
+ netdev_printk(KERN_DEBUG, ndev, "intLINK(%d)\n",
+ loop);
+ enc28j60_check_link_status(ndev);
+ /* read PHIR to clear the flag */
+ enc28j60_phy_read(priv, PHIR);
+ }
+ /* TX complete handler */
+ if (((intflags & EIR_TXIF) != 0) &&
+ ((intflags & EIR_TXERIF) == 0)) {
+ bool err = false;
+ loop++;
+ if (netif_msg_intr(priv))
+ netdev_printk(KERN_DEBUG, ndev, "intTX(%d)\n",
+ loop);
+ priv->tx_retry_count = 0;
+ if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) {
+ if (netif_msg_tx_err(priv))
+ netdev_err(ndev, "Tx Error (aborted)\n");
+ err = true;
+ }
+ if (netif_msg_tx_done(priv)) {
+ u8 tsv[TSV_SIZE];
+ enc28j60_read_tsv(priv, tsv);
+ enc28j60_dump_tsv(priv, "Tx Done", tsv);
+ }
+ enc28j60_tx_clear(ndev, err);
+ locked_reg_bfclr(priv, EIR, EIR_TXIF);
+ }
+ /* TX Error handler */
+ if ((intflags & EIR_TXERIF) != 0) {
+ u8 tsv[TSV_SIZE];
+
+ loop++;
+ if (netif_msg_intr(priv))
+ netdev_printk(KERN_DEBUG, ndev, "intTXErr(%d)\n",
+ loop);
+ locked_reg_bfclr(priv, ECON1, ECON1_TXRTS);
+ enc28j60_read_tsv(priv, tsv);
+ if (netif_msg_tx_err(priv))
+ enc28j60_dump_tsv(priv, "Tx Error", tsv);
+ /* Reset TX logic */
+ mutex_lock(&priv->lock);
+ nolock_reg_bfset(priv, ECON1, ECON1_TXRST);
+ nolock_reg_bfclr(priv, ECON1, ECON1_TXRST);
+ nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT);
+ mutex_unlock(&priv->lock);
+ /* Transmit Late collision check for retransmit */
+ if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) {
+ if (netif_msg_tx_err(priv))
+ netdev_printk(KERN_DEBUG, ndev,
+ "LateCollision TXErr (%d)\n",
+ priv->tx_retry_count);
+ if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT)
+ locked_reg_bfset(priv, ECON1,
+ ECON1_TXRTS);
+ else
+ enc28j60_tx_clear(ndev, true);
+ } else
+ enc28j60_tx_clear(ndev, true);
+ locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
+ }
+ /* RX Error handler */
+ if ((intflags & EIR_RXERIF) != 0) {
+ loop++;
+ if (netif_msg_intr(priv))
+ netdev_printk(KERN_DEBUG, ndev, "intRXErr(%d)\n",
+ loop);
+ /* Check free FIFO space to flag RX overrun */
+ if (enc28j60_get_free_rxfifo(priv) <= 0) {
+ if (netif_msg_rx_err(priv))
+ netdev_printk(KERN_DEBUG, ndev, "RX Overrun\n");
+ ndev->stats.rx_dropped++;
+ }
+ locked_reg_bfclr(priv, EIR, EIR_RXERIF);
+ }
+ /* RX handler */
+ if (enc28j60_rx_interrupt(ndev))
+ loop++;
+ } while (loop);
+
+ /* re-enable interrupts */
+ locked_reg_bfset(priv, EIE, EIE_INTIE);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Hardware transmit function.
+ * Fill the buffer memory and send the contents of the transmit buffer
+ * onto the network
+ */
+static void enc28j60_hw_tx(struct enc28j60_net *priv)
+{
+ struct net_device *ndev = priv->netdev;
+
+ BUG_ON(!priv->tx_skb);
+
+ if (netif_msg_tx_queued(priv))
+ netdev_printk(KERN_DEBUG, ndev, "Tx Packet Len:%d\n",
+ priv->tx_skb->len);
+
+ if (netif_msg_pktdata(priv))
+ dump_packet(__func__,
+ priv->tx_skb->len, priv->tx_skb->data);
+ enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data);
+
+#ifdef CONFIG_ENC28J60_WRITEVERIFY
+ /* readback and verify written data */
+ if (netif_msg_drv(priv)) {
+ struct device *dev = &priv->spi->dev;
+ int test_len, k;
+ u8 test_buf[64]; /* limit the test to the first 64 bytes */
+ int okflag;
+
+ test_len = priv->tx_skb->len;
+ if (test_len > sizeof(test_buf))
+ test_len = sizeof(test_buf);
+
+ /* + 1 to skip control byte */
+ enc28j60_mem_read(priv, TXSTART_INIT + 1, test_len, test_buf);
+ okflag = 1;
+ for (k = 0; k < test_len; k++) {
+ if (priv->tx_skb->data[k] != test_buf[k]) {
+ dev_printk(KERN_DEBUG, dev,
+ "Error, %d location differ: 0x%02x-0x%02x\n",
+ k, priv->tx_skb->data[k], test_buf[k]);
+ okflag = 0;
+ }
+ }
+ if (!okflag)
+ dev_printk(KERN_DEBUG, dev, "Tx write buffer, verify ERROR!\n");
+ }
+#endif
+ /* set TX request flag */
+ locked_reg_bfset(priv, ECON1, ECON1_TXRTS);
+}
+
+static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+
+ /* If some error occurs while trying to transmit this
+ * packet, you should return '1' from this function.
+ * In such a case you _may not_ do anything to the
+ * SKB, it is still owned by the network queueing
+ * layer when an error is returned. This means you
+ * may not modify any SKB fields, you may not free
+ * the SKB, etc.
+ */
+ netif_stop_queue(dev);
+
+ /* Remember the skb for deferred processing */
+ priv->tx_skb = skb;
+ schedule_work(&priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void enc28j60_tx_work_handler(struct work_struct *work)
+{
+ struct enc28j60_net *priv =
+ container_of(work, struct enc28j60_net, tx_work);
+
+ /* actual delivery of data */
+ enc28j60_hw_tx(priv);
+}
+
+static void enc28j60_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+
+ if (netif_msg_timer(priv))
+ netdev_err(ndev, "tx timeout\n");
+
+ ndev->stats.tx_errors++;
+ /* can't restart safely under softirq */
+ schedule_work(&priv->restart_work);
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int enc28j60_net_open(struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (netif_msg_ifup(priv))
+ netdev_err(dev, "invalid MAC address %pM\n", dev->dev_addr);
+ return -EADDRNOTAVAIL;
+ }
+ /* Reset the hardware here (and take it out of low power mode) */
+ enc28j60_lowpower(priv, false);
+ enc28j60_hw_disable(priv);
+ if (!enc28j60_hw_init(priv)) {
+ if (netif_msg_ifup(priv))
+ netdev_err(dev, "hw_reset() failed\n");
+ return -EINVAL;
+ }
+ /* Update the MAC address (in case user has changed it) */
+ enc28j60_set_hw_macaddr(dev);
+ /* Enable interrupts */
+ enc28j60_hw_enable(priv);
+ /* check link status */
+ enc28j60_check_link_status(dev);
+ /* We are now ready to accept transmit requests from
+ * the queueing layer of the networking.
+ */
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* The inverse routine to net_open(). */
+static int enc28j60_net_close(struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+
+ enc28j60_hw_disable(priv);
+ enc28j60_lowpower(priv, true);
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adapter
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, filter out multicast packets
+ * num_addrs > 0 Multicast mode, receive normal and MC packets
+ */
+static void enc28j60_set_multicast_list(struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+ int oldfilter = priv->rxfilter;
+
+ if (dev->flags & IFF_PROMISC) {
+ if (netif_msg_link(priv))
+ netdev_info(dev, "promiscuous mode\n");
+ priv->rxfilter = RXFILTER_PROMISC;
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
+ if (netif_msg_link(priv))
+ netdev_info(dev, "%smulticast mode\n",
+ (dev->flags & IFF_ALLMULTI) ? "all-" : "");
+ priv->rxfilter = RXFILTER_MULTI;
+ } else {
+ if (netif_msg_link(priv))
+ netdev_info(dev, "normal mode\n");
+ priv->rxfilter = RXFILTER_NORMAL;
+ }
+
+ if (oldfilter != priv->rxfilter)
+ schedule_work(&priv->setrx_work);
+}
+
+static void enc28j60_setrx_work_handler(struct work_struct *work)
+{
+ struct enc28j60_net *priv =
+ container_of(work, struct enc28j60_net, setrx_work);
+ struct device *dev = &priv->spi->dev;
+
+ if (priv->rxfilter == RXFILTER_PROMISC) {
+ if (netif_msg_drv(priv))
+ dev_printk(KERN_DEBUG, dev, "promiscuous mode\n");
+ locked_regb_write(priv, ERXFCON, 0x00);
+ } else if (priv->rxfilter == RXFILTER_MULTI) {
+ if (netif_msg_drv(priv))
+ dev_printk(KERN_DEBUG, dev, "multicast mode\n");
+ locked_regb_write(priv, ERXFCON,
+ ERXFCON_UCEN | ERXFCON_CRCEN |
+ ERXFCON_BCEN | ERXFCON_MCEN);
+ } else {
+ if (netif_msg_drv(priv))
+ dev_printk(KERN_DEBUG, dev, "normal mode\n");
+ locked_regb_write(priv, ERXFCON,
+ ERXFCON_UCEN | ERXFCON_CRCEN |
+ ERXFCON_BCEN);
+ }
+}
+
+static void enc28j60_restart_work_handler(struct work_struct *work)
+{
+ struct enc28j60_net *priv =
+ container_of(work, struct enc28j60_net, restart_work);
+ struct net_device *ndev = priv->netdev;
+ int ret;
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ enc28j60_net_close(ndev);
+ ret = enc28j60_net_open(ndev);
+ if (unlikely(ret)) {
+ netdev_info(ndev, "could not restart %d\n", ret);
+ dev_close(ndev);
+ }
+ }
+ rtnl_unlock();
+}
+
+/* ......................... ETHTOOL SUPPORT ........................... */
+
+static void
+enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info,
+ dev_name(dev->dev.parent), sizeof(info->bus_info));
+}
+
+static int
+enc28j60_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.port = PORT_TP;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int
+enc28j60_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ return enc28j60_setlink(dev, cmd->base.autoneg,
+ cmd->base.speed, cmd->base.duplex);
+}
+
+static u32 enc28j60_get_msglevel(struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void enc28j60_set_msglevel(struct net_device *dev, u32 val)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+ priv->msg_enable = val;
+}
+
+static const struct ethtool_ops enc28j60_ethtool_ops = {
+ .get_drvinfo = enc28j60_get_drvinfo,
+ .get_msglevel = enc28j60_get_msglevel,
+ .set_msglevel = enc28j60_set_msglevel,
+ .get_link_ksettings = enc28j60_get_link_ksettings,
+ .set_link_ksettings = enc28j60_set_link_ksettings,
+};
+
+static int enc28j60_chipset_init(struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+
+ return enc28j60_hw_init(priv);
+}
+
+static const struct net_device_ops enc28j60_netdev_ops = {
+ .ndo_open = enc28j60_net_open,
+ .ndo_stop = enc28j60_net_close,
+ .ndo_start_xmit = enc28j60_send_packet,
+ .ndo_set_rx_mode = enc28j60_set_multicast_list,
+ .ndo_set_mac_address = enc28j60_set_mac_address,
+ .ndo_tx_timeout = enc28j60_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int enc28j60_probe(struct spi_device *spi)
+{
+ struct net_device *dev;
+ struct enc28j60_net *priv;
+ int ret = 0;
+
+ if (netif_msg_drv(&debug))
+ dev_info(&spi->dev, "Ethernet driver %s loaded\n", DRV_VERSION);
+
+ dev = alloc_etherdev(sizeof(struct enc28j60_net));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ priv = netdev_priv(dev);
+
+ priv->netdev = dev; /* priv to netdev reference */
+ priv->spi = spi; /* priv to spi reference */
+ priv->msg_enable = netif_msg_init(debug.msg_enable, ENC28J60_MSG_DEFAULT);
+ mutex_init(&priv->lock);
+ INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler);
+ INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler);
+ INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler);
+ spi_set_drvdata(spi, priv); /* spi to priv reference */
+ SET_NETDEV_DEV(dev, &spi->dev);
+
+ if (!enc28j60_chipset_init(dev)) {
+ if (netif_msg_probe(priv))
+ dev_info(&spi->dev, "chip not found\n");
+ ret = -EIO;
+ goto error_irq;
+ }
+
+ if (device_get_ethdev_address(&spi->dev, dev))
+ eth_hw_addr_random(dev);
+ enc28j60_set_hw_macaddr(dev);
+
+ /* Board setup must set the relevant edge trigger type;
+ * level triggers won't currently work.
+ */
+ ret = request_threaded_irq(spi->irq, NULL, enc28j60_irq, IRQF_ONESHOT,
+ DRV_NAME, priv);
+ if (ret < 0) {
+ if (netif_msg_probe(priv))
+ dev_err(&spi->dev, "request irq %d failed (ret = %d)\n",
+ spi->irq, ret);
+ goto error_irq;
+ }
+
+ dev->if_port = IF_PORT_10BASET;
+ dev->irq = spi->irq;
+ dev->netdev_ops = &enc28j60_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->ethtool_ops = &enc28j60_ethtool_ops;
+
+ enc28j60_lowpower(priv, true);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ if (netif_msg_probe(priv))
+ dev_err(&spi->dev, "register netdev failed (ret = %d)\n",
+ ret);
+ goto error_register;
+ }
+
+ return 0;
+
+error_register:
+ free_irq(spi->irq, priv);
+error_irq:
+ free_netdev(dev);
+error_alloc:
+ return ret;
+}
+
+static void enc28j60_remove(struct spi_device *spi)
+{
+ struct enc28j60_net *priv = spi_get_drvdata(spi);
+
+ unregister_netdev(priv->netdev);
+ free_irq(spi->irq, priv);
+ free_netdev(priv->netdev);
+}
+
+static const struct of_device_id enc28j60_dt_ids[] = {
+ { .compatible = "microchip,enc28j60" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, enc28j60_dt_ids);
+
+static struct spi_driver enc28j60_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = enc28j60_dt_ids,
+ },
+ .probe = enc28j60_probe,
+ .remove = enc28j60_remove,
+};
+module_spi_driver(enc28j60_driver);
+
+MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
+MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
+MODULE_LICENSE("GPL");
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level in amount of bits set (0=none, ..., 31=all)");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/enc28j60_hw.h b/drivers/net/ethernet/microchip/enc28j60_hw.h
new file mode 100644
index 0000000000..da4ab17252
--- /dev/null
+++ b/drivers/net/ethernet/microchip/enc28j60_hw.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * enc28j60_hw.h: EDTP FrameThrower style enc28j60 registers
+ *
+ * $Id: enc28j60_hw.h,v 1.9 2007/12/14 11:59:16 claudio Exp $
+ */
+
+#ifndef _ENC28J60_HW_H
+#define _ENC28J60_HW_H
+
+/*
+ * ENC28J60 Control Registers
+ * Control register definitions are a combination of address,
+ * bank number, and Ethernet/MAC/PHY indicator bits.
+ * - Register address (bits 0-4)
+ * - Bank number (bits 5-6)
+ * - MAC/MII indicator (bit 7)
+ */
+#define ADDR_MASK 0x1F
+#define BANK_MASK 0x60
+#define SPRD_MASK 0x80
+/* All-bank registers */
+#define EIE 0x1B
+#define EIR 0x1C
+#define ESTAT 0x1D
+#define ECON2 0x1E
+#define ECON1 0x1F
+/* Bank 0 registers */
+#define ERDPTL (0x00|0x00)
+#define ERDPTH (0x01|0x00)
+#define EWRPTL (0x02|0x00)
+#define EWRPTH (0x03|0x00)
+#define ETXSTL (0x04|0x00)
+#define ETXSTH (0x05|0x00)
+#define ETXNDL (0x06|0x00)
+#define ETXNDH (0x07|0x00)
+#define ERXSTL (0x08|0x00)
+#define ERXSTH (0x09|0x00)
+#define ERXNDL (0x0A|0x00)
+#define ERXNDH (0x0B|0x00)
+#define ERXRDPTL (0x0C|0x00)
+#define ERXRDPTH (0x0D|0x00)
+#define ERXWRPTL (0x0E|0x00)
+#define ERXWRPTH (0x0F|0x00)
+#define EDMASTL (0x10|0x00)
+#define EDMASTH (0x11|0x00)
+#define EDMANDL (0x12|0x00)
+#define EDMANDH (0x13|0x00)
+#define EDMADSTL (0x14|0x00)
+#define EDMADSTH (0x15|0x00)
+#define EDMACSL (0x16|0x00)
+#define EDMACSH (0x17|0x00)
+/* Bank 1 registers */
+#define EHT0 (0x00|0x20)
+#define EHT1 (0x01|0x20)
+#define EHT2 (0x02|0x20)
+#define EHT3 (0x03|0x20)
+#define EHT4 (0x04|0x20)
+#define EHT5 (0x05|0x20)
+#define EHT6 (0x06|0x20)
+#define EHT7 (0x07|0x20)
+#define EPMM0 (0x08|0x20)
+#define EPMM1 (0x09|0x20)
+#define EPMM2 (0x0A|0x20)
+#define EPMM3 (0x0B|0x20)
+#define EPMM4 (0x0C|0x20)
+#define EPMM5 (0x0D|0x20)
+#define EPMM6 (0x0E|0x20)
+#define EPMM7 (0x0F|0x20)
+#define EPMCSL (0x10|0x20)
+#define EPMCSH (0x11|0x20)
+#define EPMOL (0x14|0x20)
+#define EPMOH (0x15|0x20)
+#define EWOLIE (0x16|0x20)
+#define EWOLIR (0x17|0x20)
+#define ERXFCON (0x18|0x20)
+#define EPKTCNT (0x19|0x20)
+/* Bank 2 registers */
+#define MACON1 (0x00|0x40|SPRD_MASK)
+/* #define MACON2 (0x01|0x40|SPRD_MASK) */
+#define MACON3 (0x02|0x40|SPRD_MASK)
+#define MACON4 (0x03|0x40|SPRD_MASK)
+#define MABBIPG (0x04|0x40|SPRD_MASK)
+#define MAIPGL (0x06|0x40|SPRD_MASK)
+#define MAIPGH (0x07|0x40|SPRD_MASK)
+#define MACLCON1 (0x08|0x40|SPRD_MASK)
+#define MACLCON2 (0x09|0x40|SPRD_MASK)
+#define MAMXFLL (0x0A|0x40|SPRD_MASK)
+#define MAMXFLH (0x0B|0x40|SPRD_MASK)
+#define MAPHSUP (0x0D|0x40|SPRD_MASK)
+#define MICON (0x11|0x40|SPRD_MASK)
+#define MICMD (0x12|0x40|SPRD_MASK)
+#define MIREGADR (0x14|0x40|SPRD_MASK)
+#define MIWRL (0x16|0x40|SPRD_MASK)
+#define MIWRH (0x17|0x40|SPRD_MASK)
+#define MIRDL (0x18|0x40|SPRD_MASK)
+#define MIRDH (0x19|0x40|SPRD_MASK)
+/* Bank 3 registers */
+#define MAADR1 (0x00|0x60|SPRD_MASK)
+#define MAADR0 (0x01|0x60|SPRD_MASK)
+#define MAADR3 (0x02|0x60|SPRD_MASK)
+#define MAADR2 (0x03|0x60|SPRD_MASK)
+#define MAADR5 (0x04|0x60|SPRD_MASK)
+#define MAADR4 (0x05|0x60|SPRD_MASK)
+#define EBSTSD (0x06|0x60)
+#define EBSTCON (0x07|0x60)
+#define EBSTCSL (0x08|0x60)
+#define EBSTCSH (0x09|0x60)
+#define MISTAT (0x0A|0x60|SPRD_MASK)
+#define EREVID (0x12|0x60)
+#define ECOCON (0x15|0x60)
+#define EFLOCON (0x17|0x60)
+#define EPAUSL (0x18|0x60)
+#define EPAUSH (0x19|0x60)
+/* PHY registers */
+#define PHCON1 0x00
+#define PHSTAT1 0x01
+#define PHHID1 0x02
+#define PHHID2 0x03
+#define PHCON2 0x10
+#define PHSTAT2 0x11
+#define PHIE 0x12
+#define PHIR 0x13
+#define PHLCON 0x14
+
+/* ENC28J60 EIE Register Bit Definitions */
+#define EIE_INTIE 0x80
+#define EIE_PKTIE 0x40
+#define EIE_DMAIE 0x20
+#define EIE_LINKIE 0x10
+#define EIE_TXIE 0x08
+/* #define EIE_WOLIE 0x04 (reserved) */
+#define EIE_TXERIE 0x02
+#define EIE_RXERIE 0x01
+/* ENC28J60 EIR Register Bit Definitions */
+#define EIR_PKTIF 0x40
+#define EIR_DMAIF 0x20
+#define EIR_LINKIF 0x10
+#define EIR_TXIF 0x08
+/* #define EIR_WOLIF 0x04 (reserved) */
+#define EIR_TXERIF 0x02
+#define EIR_RXERIF 0x01
+/* ENC28J60 ESTAT Register Bit Definitions */
+#define ESTAT_INT 0x80
+#define ESTAT_LATECOL 0x10
+#define ESTAT_RXBUSY 0x04
+#define ESTAT_TXABRT 0x02
+#define ESTAT_CLKRDY 0x01
+/* ENC28J60 ECON2 Register Bit Definitions */
+#define ECON2_AUTOINC 0x80
+#define ECON2_PKTDEC 0x40
+#define ECON2_PWRSV 0x20
+#define ECON2_VRPS 0x08
+/* ENC28J60 ECON1 Register Bit Definitions */
+#define ECON1_TXRST 0x80
+#define ECON1_RXRST 0x40
+#define ECON1_DMAST 0x20
+#define ECON1_CSUMEN 0x10
+#define ECON1_TXRTS 0x08
+#define ECON1_RXEN 0x04
+#define ECON1_BSEL1 0x02
+#define ECON1_BSEL0 0x01
+/* ENC28J60 MACON1 Register Bit Definitions */
+#define MACON1_LOOPBK 0x10
+#define MACON1_TXPAUS 0x08
+#define MACON1_RXPAUS 0x04
+#define MACON1_PASSALL 0x02
+#define MACON1_MARXEN 0x01
+/* ENC28J60 MACON2 Register Bit Definitions */
+#define MACON2_MARST 0x80
+#define MACON2_RNDRST 0x40
+#define MACON2_MARXRST 0x08
+#define MACON2_RFUNRST 0x04
+#define MACON2_MATXRST 0x02
+#define MACON2_TFUNRST 0x01
+/* ENC28J60 MACON3 Register Bit Definitions */
+#define MACON3_PADCFG2 0x80
+#define MACON3_PADCFG1 0x40
+#define MACON3_PADCFG0 0x20
+#define MACON3_TXCRCEN 0x10
+#define MACON3_PHDRLEN 0x08
+#define MACON3_HFRMLEN 0x04
+#define MACON3_FRMLNEN 0x02
+#define MACON3_FULDPX 0x01
+/* ENC28J60 MICMD Register Bit Definitions */
+#define MICMD_MIISCAN 0x02
+#define MICMD_MIIRD 0x01
+/* ENC28J60 MISTAT Register Bit Definitions */
+#define MISTAT_NVALID 0x04
+#define MISTAT_SCAN 0x02
+#define MISTAT_BUSY 0x01
+/* ENC28J60 ERXFCON Register Bit Definitions */
+#define ERXFCON_UCEN 0x80
+#define ERXFCON_ANDOR 0x40
+#define ERXFCON_CRCEN 0x20
+#define ERXFCON_PMEN 0x10
+#define ERXFCON_MPEN 0x08
+#define ERXFCON_HTEN 0x04
+#define ERXFCON_MCEN 0x02
+#define ERXFCON_BCEN 0x01
+
+/* ENC28J60 PHY PHCON1 Register Bit Definitions */
+#define PHCON1_PRST 0x8000
+#define PHCON1_PLOOPBK 0x4000
+#define PHCON1_PPWRSV 0x0800
+#define PHCON1_PDPXMD 0x0100
+/* ENC28J60 PHY PHSTAT1 Register Bit Definitions */
+#define PHSTAT1_PFDPX 0x1000
+#define PHSTAT1_PHDPX 0x0800
+#define PHSTAT1_LLSTAT 0x0004
+#define PHSTAT1_JBSTAT 0x0002
+/* ENC28J60 PHY PHSTAT2 Register Bit Definitions */
+#define PHSTAT2_TXSTAT (1 << 13)
+#define PHSTAT2_RXSTAT (1 << 12)
+#define PHSTAT2_COLSTAT (1 << 11)
+#define PHSTAT2_LSTAT (1 << 10)
+#define PHSTAT2_DPXSTAT (1 << 9)
+#define PHSTAT2_PLRITY (1 << 5)
+/* ENC28J60 PHY PHCON2 Register Bit Definitions */
+#define PHCON2_FRCLINK 0x4000
+#define PHCON2_TXDIS 0x2000
+#define PHCON2_JABBER 0x0400
+#define PHCON2_HDLDIS 0x0100
+/* ENC28J60 PHY PHIE Register Bit Definitions */
+#define PHIE_PLNKIE (1 << 4)
+#define PHIE_PGEIE (1 << 1)
+/* ENC28J60 PHY PHIR Register Bit Definitions */
+#define PHIR_PLNKIF (1 << 4)
+#define PHIR_PGEIF (1 << 1)
+
+/* ENC28J60 Packet Control Byte Bit Definitions */
+#define PKTCTRL_PHUGEEN 0x08
+#define PKTCTRL_PPADEN 0x04
+#define PKTCTRL_PCRCEN 0x02
+#define PKTCTRL_POVERRIDE 0x01
+
+/* ENC28J60 Transmit Status Vector */
+#define TSV_TXBYTECNT 0
+#define TSV_TXCOLLISIONCNT 16
+#define TSV_TXCRCERROR 20
+#define TSV_TXLENCHKERROR 21
+#define TSV_TXLENOUTOFRANGE 22
+#define TSV_TXDONE 23
+#define TSV_TXMULTICAST 24
+#define TSV_TXBROADCAST 25
+#define TSV_TXPACKETDEFER 26
+#define TSV_TXEXDEFER 27
+#define TSV_TXEXCOLLISION 28
+#define TSV_TXLATECOLLISION 29
+#define TSV_TXGIANT 30
+#define TSV_TXUNDERRUN 31
+#define TSV_TOTBYTETXONWIRE 32
+#define TSV_TXCONTROLFRAME 48
+#define TSV_TXPAUSEFRAME 49
+#define TSV_BACKPRESSUREAPP 50
+#define TSV_TXVLANTAGFRAME 51
+
+#define TSV_SIZE 7
+#define TSV_BYTEOF(x) ((x) / 8)
+#define TSV_BITMASK(x) (1 << ((x) % 8))
+#define TSV_GETBIT(x, y) (((x)[TSV_BYTEOF(y)] & TSV_BITMASK(y)) ? 1 : 0)
+
+/* ENC28J60 Receive Status Vector */
+#define RSV_RXLONGEVDROPEV 16
+#define RSV_CARRIEREV 18
+#define RSV_CRCERROR 20
+#define RSV_LENCHECKERR 21
+#define RSV_LENOUTOFRANGE 22
+#define RSV_RXOK 23
+#define RSV_RXMULTICAST 24
+#define RSV_RXBROADCAST 25
+#define RSV_DRIBBLENIBBLE 26
+#define RSV_RXCONTROLFRAME 27
+#define RSV_RXPAUSEFRAME 28
+#define RSV_RXUNKNOWNOPCODE 29
+#define RSV_RXTYPEVLAN 30
+
+#define RSV_SIZE 6
+#define RSV_BITMASK(x) (1 << ((x) - 16))
+#define RSV_GETBIT(x, y) (((x) & RSV_BITMASK(y)) ? 1 : 0)
+
+
+/* SPI operation codes */
+#define ENC28J60_READ_CTRL_REG 0x00
+#define ENC28J60_READ_BUF_MEM 0x3A
+#define ENC28J60_WRITE_CTRL_REG 0x40
+#define ENC28J60_WRITE_BUF_MEM 0x7A
+#define ENC28J60_BIT_FIELD_SET 0x80
+#define ENC28J60_BIT_FIELD_CLR 0xA0
+#define ENC28J60_SOFT_RESET 0xFF
+
+
+/* buffer boundaries applied to internal 8K ram
+ * entire available packet buffer space is allocated.
+ * Give TX buffer space for one full ethernet frame (~1500 bytes)
+ * receive buffer gets the rest */
+#define TXSTART_INIT 0x1A00
+#define TXEND_INIT 0x1FFF
+
+/* Put RX buffer at 0 as suggested by the Errata datasheet */
+#define RXSTART_INIT 0x0000
+#define RXEND_INIT 0x19FF
+
+/* maximum ethernet frame length */
+#define MAX_FRAMELEN 1518
+
+/* Preferred half duplex: LEDA: Link status LEDB: Rx/Tx activity */
+#define ENC28J60_LAMPS_MODE 0x3476
+
+#endif
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
new file mode 100644
index 0000000000..5693784eec
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Register map access API - ENCX24J600 support
+ *
+ * Copyright 2015 Gridpoint
+ *
+ * Author: Jon Ringle <jringle@gridpoint.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "encx24j600_hw.h"
+
+static int encx24j600_switch_bank(struct encx24j600_context *ctx,
+ int bank)
+{
+ int ret = 0;
+ int bank_opcode = BANK_SELECT(bank);
+
+ ret = spi_write(ctx->spi, &bank_opcode, 1);
+ if (ret == 0)
+ ctx->bank = bank;
+
+ return ret;
+}
+
+static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
+ const void *buf, size_t len)
+{
+ struct spi_message m;
+ struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, },
+ { .tx_buf = buf, .len = len }, };
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ return spi_sync(ctx->spi, &m);
+}
+
+static void regmap_lock_mutex(void *context)
+{
+ struct encx24j600_context *ctx = context;
+
+ mutex_lock(&ctx->mutex);
+}
+
+static void regmap_unlock_mutex(void *context)
+{
+ struct encx24j600_context *ctx = context;
+
+ mutex_unlock(&ctx->mutex);
+}
+
+static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val,
+ size_t len)
+{
+ struct encx24j600_context *ctx = context;
+ u8 banked_reg = reg & ADDR_MASK;
+ u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT);
+ u8 cmd = RCRU;
+ int ret = 0;
+ int i = 0;
+ u8 tx_buf[2];
+
+ if (reg < 0x80) {
+ cmd = RCRCODE | banked_reg;
+ if ((banked_reg < 0x16) && (ctx->bank != bank))
+ ret = encx24j600_switch_bank(ctx, bank);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ /* Translate registers that are more effecient using
+ * 3-byte SPI commands
+ */
+ switch (reg) {
+ case EGPRDPT:
+ cmd = RGPRDPT; break;
+ case EGPWRPT:
+ cmd = RGPWRPT; break;
+ case ERXRDPT:
+ cmd = RRXRDPT; break;
+ case ERXWRPT:
+ cmd = RRXWRPT; break;
+ case EUDARDPT:
+ cmd = RUDARDPT; break;
+ case EUDAWRPT:
+ cmd = RUDAWRPT; break;
+ case EGPDATA:
+ case ERXDATA:
+ case EUDADATA:
+ default:
+ return -EINVAL;
+ }
+ }
+
+ tx_buf[i++] = cmd;
+ if (cmd == RCRU)
+ tx_buf[i++] = reg;
+
+ ret = spi_write_then_read(ctx->spi, tx_buf, i, val, len);
+
+ return ret;
+}
+
+static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
+ u8 reg, u8 *val, size_t len,
+ u8 unbanked_cmd, u8 banked_code)
+{
+ u8 banked_reg = reg & ADDR_MASK;
+ u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT);
+ u8 cmd = unbanked_cmd;
+ struct spi_message m;
+ struct spi_transfer t[3] = { { .tx_buf = &cmd, .len = sizeof(cmd), },
+ { .tx_buf = &reg, .len = sizeof(reg), },
+ { .tx_buf = val, .len = len }, };
+
+ if (reg < 0x80) {
+ int ret = 0;
+
+ cmd = banked_code | banked_reg;
+ if ((banked_reg < 0x16) && (ctx->bank != bank))
+ ret = encx24j600_switch_bank(ctx, bank);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ /* Translate registers that are more effecient using
+ * 3-byte SPI commands
+ */
+ switch (reg) {
+ case EGPRDPT:
+ cmd = WGPRDPT; break;
+ case EGPWRPT:
+ cmd = WGPWRPT; break;
+ case ERXRDPT:
+ cmd = WRXRDPT; break;
+ case ERXWRPT:
+ cmd = WRXWRPT; break;
+ case EUDARDPT:
+ cmd = WUDARDPT; break;
+ case EUDAWRPT:
+ cmd = WUDAWRPT; break;
+ case EGPDATA:
+ case ERXDATA:
+ case EUDADATA:
+ default:
+ return -EINVAL;
+ }
+ }
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+
+ if (cmd == unbanked_cmd) {
+ t[1].tx_buf = &reg;
+ spi_message_add_tail(&t[1], &m);
+ }
+
+ spi_message_add_tail(&t[2], &m);
+ return spi_sync(ctx->spi, &m);
+}
+
+static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val,
+ size_t len)
+{
+ struct encx24j600_context *ctx = context;
+
+ return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE);
+}
+
+static int regmap_encx24j600_sfr_set_bits(struct encx24j600_context *ctx,
+ u8 reg, u8 val)
+{
+ return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFSU, BFSCODE);
+}
+
+static int regmap_encx24j600_sfr_clr_bits(struct encx24j600_context *ctx,
+ u8 reg, u8 val)
+{
+ return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFCU, BFCCODE);
+}
+
+static int regmap_encx24j600_reg_update_bits(void *context, unsigned int reg,
+ unsigned int mask,
+ unsigned int val)
+{
+ struct encx24j600_context *ctx = context;
+
+ int ret = 0;
+ unsigned int set_mask = mask & val;
+ unsigned int clr_mask = mask & ~val;
+
+ if ((reg >= 0x40 && reg < 0x6c) || reg >= 0x80)
+ return -EINVAL;
+
+ if (set_mask & 0xff)
+ ret = regmap_encx24j600_sfr_set_bits(ctx, reg, set_mask);
+
+ set_mask = (set_mask & 0xff00) >> 8;
+
+ if ((set_mask & 0xff) && (ret == 0))
+ ret = regmap_encx24j600_sfr_set_bits(ctx, reg + 1, set_mask);
+
+ if ((clr_mask & 0xff) && (ret == 0))
+ ret = regmap_encx24j600_sfr_clr_bits(ctx, reg, clr_mask);
+
+ clr_mask = (clr_mask & 0xff00) >> 8;
+
+ if ((clr_mask & 0xff) && (ret == 0))
+ ret = regmap_encx24j600_sfr_clr_bits(ctx, reg + 1, clr_mask);
+
+ return ret;
+}
+
+int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
+ size_t count)
+{
+ struct encx24j600_context *ctx = context;
+
+ if (reg < 0xc0)
+ return encx24j600_cmdn(ctx, reg, data, count);
+
+ /* SPI 1-byte command. Ignore data */
+ return spi_write(ctx->spi, &reg, 1);
+}
+EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write);
+
+int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count)
+{
+ struct encx24j600_context *ctx = context;
+
+ if (reg == RBSEL && count > 1)
+ count = 1;
+
+ return spi_write_then_read(ctx->spi, &reg, sizeof(reg), data, count);
+}
+EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_read);
+
+static int regmap_encx24j600_write(void *context, const void *data,
+ size_t len)
+{
+ u8 *dout = (u8 *)data;
+ u8 reg = dout[0];
+ ++dout;
+ --len;
+
+ if (reg > 0xa0)
+ return regmap_encx24j600_spi_write(context, reg, dout, len);
+
+ if (len > 2)
+ return -EINVAL;
+
+ return regmap_encx24j600_sfr_write(context, reg, dout, len);
+}
+
+static int regmap_encx24j600_read(void *context,
+ const void *reg_buf, size_t reg_size,
+ void *val, size_t val_size)
+{
+ u8 reg = *(const u8 *)reg_buf;
+
+ if (reg_size != 1) {
+ pr_err("%s: reg=%02x reg_size=%zu\n", __func__, reg, reg_size);
+ return -EINVAL;
+ }
+
+ if (reg > 0xa0)
+ return regmap_encx24j600_spi_read(context, reg, val, val_size);
+
+ if (val_size > 2) {
+ pr_err("%s: reg=%02x val_size=%zu\n", __func__, reg, val_size);
+ return -EINVAL;
+ }
+
+ return regmap_encx24j600_sfr_read(context, reg, val, val_size);
+}
+
+static bool encx24j600_regmap_readable(struct device *dev, unsigned int reg)
+{
+ if ((reg < 0x36) ||
+ ((reg >= 0x40) && (reg < 0x4c)) ||
+ ((reg >= 0x52) && (reg < 0x56)) ||
+ ((reg >= 0x60) && (reg < 0x66)) ||
+ ((reg >= 0x68) && (reg < 0x80)) ||
+ ((reg >= 0x86) && (reg < 0x92)) ||
+ (reg == 0xc8))
+ return true;
+ else
+ return false;
+}
+
+static bool encx24j600_regmap_writeable(struct device *dev, unsigned int reg)
+{
+ if ((reg < 0x12) ||
+ ((reg >= 0x14) && (reg < 0x1a)) ||
+ ((reg >= 0x1c) && (reg < 0x36)) ||
+ ((reg >= 0x40) && (reg < 0x4c)) ||
+ ((reg >= 0x52) && (reg < 0x56)) ||
+ ((reg >= 0x60) && (reg < 0x68)) ||
+ ((reg >= 0x6c) && (reg < 0x80)) ||
+ ((reg >= 0x86) && (reg < 0x92)) ||
+ ((reg >= 0xc0) && (reg < 0xc8)) ||
+ ((reg >= 0xca) && (reg < 0xf0)))
+ return true;
+ else
+ return false;
+}
+
+static bool encx24j600_regmap_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ERXHEAD:
+ case EDMACS:
+ case ETXSTAT:
+ case ETXWIRE:
+ case ECON1: /* Can be modified via single byte cmds */
+ case ECON2: /* Can be modified via single byte cmds */
+ case ESTAT:
+ case EIR: /* Can be modified via single byte cmds */
+ case MIRD:
+ case MISTAT:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool encx24j600_regmap_precious(struct device *dev, unsigned int reg)
+{
+ /* single byte cmds are precious */
+ if (((reg >= 0xc0) && (reg < 0xc8)) ||
+ ((reg >= 0xca) && (reg < 0xf0)))
+ return true;
+ else
+ return false;
+}
+
+static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct encx24j600_context *ctx = context;
+ int ret;
+ unsigned int mistat;
+
+ reg = MIREGADR_VAL | (reg & PHREG_MASK);
+ ret = regmap_write(ctx->regmap, MIREGADR, reg);
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_write(ctx->regmap, MICMD, MIIRD);
+ if (unlikely(ret))
+ goto err_out;
+
+ usleep_range(26, 100);
+ while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
+ (mistat & BUSY))
+ cpu_relax();
+
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_write(ctx->regmap, MICMD, 0);
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_read(ctx->regmap, MIRD, val);
+
+err_out:
+ if (ret)
+ pr_err("%s: error %d reading reg %02x\n", __func__, ret,
+ reg & PHREG_MASK);
+
+ return ret;
+}
+
+static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct encx24j600_context *ctx = context;
+ int ret;
+ unsigned int mistat;
+
+ reg = MIREGADR_VAL | (reg & PHREG_MASK);
+ ret = regmap_write(ctx->regmap, MIREGADR, reg);
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_write(ctx->regmap, MIWR, val);
+ if (unlikely(ret))
+ goto err_out;
+
+ usleep_range(26, 100);
+ while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
+ (mistat & BUSY))
+ cpu_relax();
+
+err_out:
+ if (ret)
+ pr_err("%s: error %d writing reg %02x=%04x\n", __func__, ret,
+ reg & PHREG_MASK, val);
+
+ return ret;
+}
+
+static bool encx24j600_phymap_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PHCON1:
+ case PHSTAT1:
+ case PHANA:
+ case PHANLPA:
+ case PHANE:
+ case PHCON2:
+ case PHSTAT2:
+ case PHSTAT3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool encx24j600_phymap_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PHCON1:
+ case PHCON2:
+ case PHANA:
+ return true;
+ case PHSTAT1:
+ case PHSTAT2:
+ case PHSTAT3:
+ case PHANLPA:
+ case PHANE:
+ default:
+ return false;
+ }
+}
+
+static bool encx24j600_phymap_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PHSTAT1:
+ case PHSTAT2:
+ case PHSTAT3:
+ case PHANLPA:
+ case PHANE:
+ case PHCON2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct regmap_config regcfg = {
+ .name = "reg",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0xee,
+ .reg_stride = 2,
+ .cache_type = REGCACHE_RBTREE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .readable_reg = encx24j600_regmap_readable,
+ .writeable_reg = encx24j600_regmap_writeable,
+ .volatile_reg = encx24j600_regmap_volatile,
+ .precious_reg = encx24j600_regmap_precious,
+ .lock = regmap_lock_mutex,
+ .unlock = regmap_unlock_mutex,
+};
+
+static struct regmap_bus regmap_encx24j600 = {
+ .write = regmap_encx24j600_write,
+ .read = regmap_encx24j600_read,
+ .reg_update_bits = regmap_encx24j600_reg_update_bits,
+};
+
+static struct regmap_config phycfg = {
+ .name = "phy",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0x1f,
+ .cache_type = REGCACHE_RBTREE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .readable_reg = encx24j600_phymap_readable,
+ .writeable_reg = encx24j600_phymap_writeable,
+ .volatile_reg = encx24j600_phymap_volatile,
+};
+
+static struct regmap_bus phymap_encx24j600 = {
+ .reg_write = regmap_encx24j600_phy_reg_write,
+ .reg_read = regmap_encx24j600_phy_reg_read,
+};
+
+int devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx)
+{
+ mutex_init(&ctx->mutex);
+ regcfg.lock_arg = ctx;
+ ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
+ ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg);
+ if (IS_ERR(ctx->phymap))
+ return PTR_ERR(ctx->phymap);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
new file mode 100644
index 0000000000..d7c8aa77ec
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip ENCX24J600 ethernet driver
+ *
+ * Copyright (C) 2015 Gridpoint
+ * Author: Jon Ringle <jringle@gridpoint.com>
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+
+#include "encx24j600_hw.h"
+
+#define DRV_NAME "encx24j600"
+#define DRV_VERSION "1.0"
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0000);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/* SRAM memory layout:
+ *
+ * 0x0000-0x05ff TX buffers 1.5KB (1*1536) reside in the GP area in SRAM
+ * 0x0600-0x5fff RX buffers 22.5KB (15*1536) reside in the RX area in SRAM
+ */
+#define ENC_TX_BUF_START 0x0000U
+#define ENC_RX_BUF_START 0x0600U
+#define ENC_RX_BUF_END 0x5fffU
+#define ENC_SRAM_SIZE 0x6000U
+
+enum {
+ RXFILTER_NORMAL,
+ RXFILTER_MULTI,
+ RXFILTER_PROMISC
+};
+
+struct encx24j600_priv {
+ struct net_device *ndev;
+ struct mutex lock; /* device access lock */
+ struct encx24j600_context ctx;
+ struct sk_buff *tx_skb;
+ struct task_struct *kworker_task;
+ struct kthread_worker kworker;
+ struct kthread_work tx_work;
+ struct kthread_work setrx_work;
+ u16 next_packet;
+ bool hw_enabled;
+ bool full_duplex;
+ bool autoneg;
+ u16 speed;
+ int rxfilter;
+ u32 msg_enable;
+};
+
+static void dump_packet(const char *msg, int len, const char *data)
+{
+ pr_debug(DRV_NAME ": %s - packet len:%d\n", msg, len);
+ print_hex_dump_bytes("pk data: ", DUMP_PREFIX_OFFSET, data, len);
+}
+
+static void encx24j600_dump_rsv(struct encx24j600_priv *priv, const char *msg,
+ struct rsv *rsv)
+{
+ struct net_device *dev = priv->ndev;
+
+ netdev_info(dev, "RX packet Len:%d\n", rsv->len);
+ netdev_dbg(dev, "%s - NextPk: 0x%04x\n", msg,
+ rsv->next_packet);
+ netdev_dbg(dev, "RxOK: %d, DribbleNibble: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_RXOK),
+ RSV_GETBIT(rsv->rxstat, RSV_DRIBBLENIBBLE));
+ netdev_dbg(dev, "CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_CRCERROR),
+ RSV_GETBIT(rsv->rxstat, RSV_LENCHECKERR),
+ RSV_GETBIT(rsv->rxstat, RSV_LENOUTOFRANGE));
+ netdev_dbg(dev, "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_RXMULTICAST),
+ RSV_GETBIT(rsv->rxstat, RSV_RXBROADCAST),
+ RSV_GETBIT(rsv->rxstat, RSV_RXLONGEVDROPEV),
+ RSV_GETBIT(rsv->rxstat, RSV_CARRIEREV));
+ netdev_dbg(dev, "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_RXCONTROLFRAME),
+ RSV_GETBIT(rsv->rxstat, RSV_RXPAUSEFRAME),
+ RSV_GETBIT(rsv->rxstat, RSV_RXUNKNOWNOPCODE),
+ RSV_GETBIT(rsv->rxstat, RSV_RXTYPEVLAN));
+}
+
+static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
+{
+ struct net_device *dev = priv->ndev;
+ unsigned int val = 0;
+ int ret = regmap_read(priv->ctx.regmap, reg, &val);
+
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
+ __func__, ret, reg);
+ return val;
+}
+
+static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_write(priv->ctx.regmap, reg, val);
+
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
+ __func__, ret, reg, val);
+}
+
+static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
+ u16 mask, u16 val)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
+
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
+ __func__, ret, reg, val, mask);
+}
+
+static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
+{
+ struct net_device *dev = priv->ndev;
+ unsigned int val = 0;
+ int ret = regmap_read(priv->ctx.phymap, reg, &val);
+
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
+ __func__, ret, reg);
+ return val;
+}
+
+static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_write(priv->ctx.phymap, reg, val);
+
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
+ __func__, ret, reg, val);
+}
+
+static void encx24j600_clr_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
+{
+ encx24j600_update_reg(priv, reg, mask, 0);
+}
+
+static void encx24j600_set_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
+{
+ encx24j600_update_reg(priv, reg, mask, mask);
+}
+
+static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_write(priv->ctx.regmap, cmd, 0);
+
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
+ __func__, ret, cmd);
+}
+
+static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
+ size_t count)
+{
+ int ret;
+
+ mutex_lock(&priv->ctx.mutex);
+ ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
+ mutex_unlock(&priv->ctx.mutex);
+
+ return ret;
+}
+
+static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
+ const u8 *data, size_t count)
+{
+ int ret;
+
+ mutex_lock(&priv->ctx.mutex);
+ ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
+ mutex_unlock(&priv->ctx.mutex);
+
+ return ret;
+}
+
+static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
+{
+ u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
+
+ if (priv->autoneg == AUTONEG_ENABLE) {
+ phcon1 |= ANEN | RENEG;
+ } else {
+ phcon1 &= ~ANEN;
+ if (priv->speed == SPEED_100)
+ phcon1 |= SPD100;
+ else
+ phcon1 &= ~SPD100;
+
+ if (priv->full_duplex)
+ phcon1 |= PFULDPX;
+ else
+ phcon1 &= ~PFULDPX;
+ }
+ encx24j600_write_phy(priv, PHCON1, phcon1);
+}
+
+/* Waits for autonegotiation to complete. */
+static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ unsigned long timeout = jiffies + msecs_to_jiffies(2000);
+ u16 phstat1;
+ u16 estat;
+
+ phstat1 = encx24j600_read_phy(priv, PHSTAT1);
+ while ((phstat1 & ANDONE) == 0) {
+ if (time_after(jiffies, timeout)) {
+ u16 phstat3;
+
+ netif_notice(priv, drv, dev, "timeout waiting for autoneg done\n");
+
+ priv->autoneg = AUTONEG_DISABLE;
+ phstat3 = encx24j600_read_phy(priv, PHSTAT3);
+ priv->speed = (phstat3 & PHY3SPD100)
+ ? SPEED_100 : SPEED_10;
+ priv->full_duplex = (phstat3 & PHY3DPX) ? 1 : 0;
+ encx24j600_update_phcon1(priv);
+ netif_notice(priv, drv, dev, "Using parallel detection: %s/%s",
+ priv->speed == SPEED_100 ? "100" : "10",
+ priv->full_duplex ? "Full" : "Half");
+
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ phstat1 = encx24j600_read_phy(priv, PHSTAT1);
+ }
+
+ estat = encx24j600_read_reg(priv, ESTAT);
+ if (estat & PHYDPX) {
+ encx24j600_set_bits(priv, MACON2, FULDPX);
+ encx24j600_write_reg(priv, MABBIPG, 0x15);
+ } else {
+ encx24j600_clr_bits(priv, MACON2, FULDPX);
+ encx24j600_write_reg(priv, MABBIPG, 0x12);
+ /* Max retransmittions attempt */
+ encx24j600_write_reg(priv, MACLCON, 0x370f);
+ }
+
+ return 0;
+}
+
+/* Access the PHY to determine link status */
+static void encx24j600_check_link_status(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u16 estat;
+
+ estat = encx24j600_read_reg(priv, ESTAT);
+
+ if (estat & PHYLNK) {
+ if (priv->autoneg == AUTONEG_ENABLE)
+ encx24j600_wait_for_autoneg(priv);
+
+ netif_carrier_on(dev);
+ netif_info(priv, ifup, dev, "link up\n");
+ } else {
+ netif_info(priv, ifdown, dev, "link down\n");
+
+ /* Re-enable autoneg since we won't know what we might be
+ * connected to when the link is brought back up again.
+ */
+ priv->autoneg = AUTONEG_ENABLE;
+ priv->full_duplex = true;
+ priv->speed = SPEED_100;
+ netif_carrier_off(dev);
+ }
+}
+
+static void encx24j600_int_link_handler(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+
+ netif_dbg(priv, intr, dev, "%s", __func__);
+ encx24j600_check_link_status(priv);
+ encx24j600_clr_bits(priv, EIR, LINKIF);
+}
+
+static void encx24j600_tx_complete(struct encx24j600_priv *priv, bool err)
+{
+ struct net_device *dev = priv->ndev;
+
+ if (!priv->tx_skb) {
+ BUG();
+ return;
+ }
+
+ mutex_lock(&priv->lock);
+
+ if (err)
+ dev->stats.tx_errors++;
+ else
+ dev->stats.tx_packets++;
+
+ dev->stats.tx_bytes += priv->tx_skb->len;
+
+ encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
+
+ netif_dbg(priv, tx_done, dev, "TX Done%s\n", err ? ": Err" : "");
+
+ dev_kfree_skb(priv->tx_skb);
+ priv->tx_skb = NULL;
+
+ netif_wake_queue(dev);
+
+ mutex_unlock(&priv->lock);
+}
+
+static int encx24j600_receive_packet(struct encx24j600_priv *priv,
+ struct rsv *rsv)
+{
+ struct net_device *dev = priv->ndev;
+ struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
+
+ if (!skb) {
+ pr_err_ratelimited("RX: OOM: packet dropped\n");
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ encx24j600_raw_read(priv, RRXDATA, skb_put(skb, rsv->len), rsv->len);
+
+ if (netif_msg_pktdata(priv))
+ dump_packet("RX", skb->len, skb->data);
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
+ /* Maintain stats */
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += rsv->len;
+
+ netif_rx(skb);
+
+ return 0;
+}
+
+static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
+{
+ struct net_device *dev = priv->ndev;
+
+ while (packet_count--) {
+ struct rsv rsv;
+ u16 newrxtail;
+
+ encx24j600_write_reg(priv, ERXRDPT, priv->next_packet);
+ encx24j600_raw_read(priv, RRXDATA, (u8 *)&rsv, sizeof(rsv));
+
+ if (netif_msg_rx_status(priv))
+ encx24j600_dump_rsv(priv, __func__, &rsv);
+
+ if (!RSV_GETBIT(rsv.rxstat, RSV_RXOK) ||
+ (rsv.len > MAX_FRAMELEN)) {
+ netif_err(priv, rx_err, dev, "RX Error %04x\n",
+ rsv.rxstat);
+ dev->stats.rx_errors++;
+
+ if (RSV_GETBIT(rsv.rxstat, RSV_CRCERROR))
+ dev->stats.rx_crc_errors++;
+ if (RSV_GETBIT(rsv.rxstat, RSV_LENCHECKERR))
+ dev->stats.rx_frame_errors++;
+ if (rsv.len > MAX_FRAMELEN)
+ dev->stats.rx_over_errors++;
+ } else {
+ encx24j600_receive_packet(priv, &rsv);
+ }
+
+ priv->next_packet = rsv.next_packet;
+
+ newrxtail = priv->next_packet - 2;
+ if (newrxtail == ENC_RX_BUF_START)
+ newrxtail = SRAM_SIZE - 2;
+
+ encx24j600_cmd(priv, SETPKTDEC);
+ encx24j600_write_reg(priv, ERXTAIL, newrxtail);
+ }
+}
+
+static irqreturn_t encx24j600_isr(int irq, void *dev_id)
+{
+ struct encx24j600_priv *priv = dev_id;
+ struct net_device *dev = priv->ndev;
+ int eir;
+
+ /* Clear interrupts */
+ encx24j600_cmd(priv, CLREIE);
+
+ eir = encx24j600_read_reg(priv, EIR);
+
+ if (eir & LINKIF)
+ encx24j600_int_link_handler(priv);
+
+ if (eir & TXIF)
+ encx24j600_tx_complete(priv, false);
+
+ if (eir & TXABTIF)
+ encx24j600_tx_complete(priv, true);
+
+ if (eir & RXABTIF) {
+ if (eir & PCFULIF) {
+ /* Packet counter is full */
+ netif_err(priv, rx_err, dev, "Packet counter full\n");
+ }
+ dev->stats.rx_dropped++;
+ encx24j600_clr_bits(priv, EIR, RXABTIF);
+ }
+
+ if (eir & PKTIF) {
+ u8 packet_count;
+
+ mutex_lock(&priv->lock);
+
+ packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
+ while (packet_count) {
+ encx24j600_rx_packets(priv, packet_count);
+ packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
+ }
+
+ mutex_unlock(&priv->lock);
+ }
+
+ /* Enable interrupts */
+ encx24j600_cmd(priv, SETEIE);
+
+ return IRQ_HANDLED;
+}
+
+static int encx24j600_soft_reset(struct encx24j600_priv *priv)
+{
+ int ret = 0;
+ int timeout;
+ u16 eudast;
+
+ /* Write and verify a test value to EUDAST */
+ regcache_cache_bypass(priv->ctx.regmap, true);
+ timeout = 10;
+ do {
+ encx24j600_write_reg(priv, EUDAST, EUDAST_TEST_VAL);
+ eudast = encx24j600_read_reg(priv, EUDAST);
+ usleep_range(25, 100);
+ } while ((eudast != EUDAST_TEST_VAL) && --timeout);
+ regcache_cache_bypass(priv->ctx.regmap, false);
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto err_out;
+ }
+
+ /* Wait for CLKRDY to become set */
+ timeout = 10;
+ while (!(encx24j600_read_reg(priv, ESTAT) & CLKRDY) && --timeout)
+ usleep_range(25, 100);
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto err_out;
+ }
+
+ /* Issue a System Reset command */
+ encx24j600_cmd(priv, SETETHRST);
+ usleep_range(25, 100);
+
+ /* Confirm that EUDAST has 0000h after system reset */
+ if (encx24j600_read_reg(priv, EUDAST) != 0) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ /* Wait for PHY register and status bits to become available */
+ usleep_range(256, 1000);
+
+err_out:
+ return ret;
+}
+
+static int encx24j600_hw_reset(struct encx24j600_priv *priv)
+{
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = encx24j600_soft_reset(priv);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static void encx24j600_reset_hw_tx(struct encx24j600_priv *priv)
+{
+ encx24j600_set_bits(priv, ECON2, TXRST);
+ encx24j600_clr_bits(priv, ECON2, TXRST);
+}
+
+static void encx24j600_hw_init_tx(struct encx24j600_priv *priv)
+{
+ /* Reset TX */
+ encx24j600_reset_hw_tx(priv);
+
+ /* Clear the TXIF flag if were previously set */
+ encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
+
+ /* Write the Tx Buffer pointer */
+ encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
+}
+
+static void encx24j600_hw_init_rx(struct encx24j600_priv *priv)
+{
+ encx24j600_cmd(priv, DISABLERX);
+
+ /* Set up RX packet start address in the SRAM */
+ encx24j600_write_reg(priv, ERXST, ENC_RX_BUF_START);
+
+ /* Preload the RX Data pointer to the beginning of the RX area */
+ encx24j600_write_reg(priv, ERXRDPT, ENC_RX_BUF_START);
+
+ priv->next_packet = ENC_RX_BUF_START;
+
+ /* Set up RX end address in the SRAM */
+ encx24j600_write_reg(priv, ERXTAIL, ENC_SRAM_SIZE - 2);
+
+ /* Reset the user data pointers */
+ encx24j600_write_reg(priv, EUDAST, ENC_SRAM_SIZE);
+ encx24j600_write_reg(priv, EUDAND, ENC_SRAM_SIZE + 1);
+
+ /* Set Max Frame length */
+ encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
+}
+
+static void encx24j600_dump_config(struct encx24j600_priv *priv,
+ const char *msg)
+{
+ pr_info(DRV_NAME ": %s\n", msg);
+
+ /* CHIP configuration */
+ pr_info(DRV_NAME " ECON1: %04X\n", encx24j600_read_reg(priv, ECON1));
+ pr_info(DRV_NAME " ECON2: %04X\n", encx24j600_read_reg(priv, ECON2));
+ pr_info(DRV_NAME " ERXFCON: %04X\n", encx24j600_read_reg(priv,
+ ERXFCON));
+ pr_info(DRV_NAME " ESTAT: %04X\n", encx24j600_read_reg(priv, ESTAT));
+ pr_info(DRV_NAME " EIR: %04X\n", encx24j600_read_reg(priv, EIR));
+ pr_info(DRV_NAME " EIDLED: %04X\n", encx24j600_read_reg(priv, EIDLED));
+
+ /* MAC layer configuration */
+ pr_info(DRV_NAME " MACON1: %04X\n", encx24j600_read_reg(priv, MACON1));
+ pr_info(DRV_NAME " MACON2: %04X\n", encx24j600_read_reg(priv, MACON2));
+ pr_info(DRV_NAME " MAIPG: %04X\n", encx24j600_read_reg(priv, MAIPG));
+ pr_info(DRV_NAME " MACLCON: %04X\n", encx24j600_read_reg(priv,
+ MACLCON));
+ pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv,
+ MABBIPG));
+
+ /* PHY configuation */
+ pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1));
+ pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2));
+ pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA));
+ pr_info(DRV_NAME " PHANLPA: %04X\n", encx24j600_read_phy(priv,
+ PHANLPA));
+ pr_info(DRV_NAME " PHANE: %04X\n", encx24j600_read_phy(priv, PHANE));
+ pr_info(DRV_NAME " PHSTAT1: %04X\n", encx24j600_read_phy(priv,
+ PHSTAT1));
+ pr_info(DRV_NAME " PHSTAT2: %04X\n", encx24j600_read_phy(priv,
+ PHSTAT2));
+ pr_info(DRV_NAME " PHSTAT3: %04X\n", encx24j600_read_phy(priv,
+ PHSTAT3));
+}
+
+static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
+{
+ switch (priv->rxfilter) {
+ case RXFILTER_PROMISC:
+ encx24j600_set_bits(priv, MACON1, PASSALL);
+ encx24j600_write_reg(priv, ERXFCON, UCEN | MCEN | NOTMEEN);
+ break;
+ case RXFILTER_MULTI:
+ encx24j600_clr_bits(priv, MACON1, PASSALL);
+ encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN | MCEN);
+ break;
+ case RXFILTER_NORMAL:
+ default:
+ encx24j600_clr_bits(priv, MACON1, PASSALL);
+ encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN);
+ break;
+ }
+}
+
+static void encx24j600_hw_init(struct encx24j600_priv *priv)
+{
+ u16 macon2;
+
+ priv->hw_enabled = false;
+
+ /* PHY Leds: link status,
+ * LEDA: Link State + collision events
+ * LEDB: Link State + transmit/receive events
+ */
+ encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00);
+
+ /* Loopback disabled */
+ encx24j600_write_reg(priv, MACON1, 0x9);
+
+ /* interpacket gap value */
+ encx24j600_write_reg(priv, MAIPG, 0x0c12);
+
+ /* Write the auto negotiation pattern */
+ encx24j600_write_phy(priv, PHANA, PHANA_DEFAULT);
+
+ encx24j600_update_phcon1(priv);
+ encx24j600_check_link_status(priv);
+
+ macon2 = MACON2_RSV1 | TXCRCEN | PADCFG0 | PADCFG2 | MACON2_DEFER;
+ if ((priv->autoneg == AUTONEG_DISABLE) && priv->full_duplex)
+ macon2 |= FULDPX;
+
+ encx24j600_set_bits(priv, MACON2, macon2);
+
+ priv->rxfilter = RXFILTER_NORMAL;
+ encx24j600_set_rxfilter_mode(priv);
+
+ /* Program the Maximum frame length */
+ encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
+
+ /* Init Tx pointers */
+ encx24j600_hw_init_tx(priv);
+
+ /* Init Rx pointers */
+ encx24j600_hw_init_rx(priv);
+
+ if (netif_msg_hw(priv))
+ encx24j600_dump_config(priv, "Hw is initialized");
+}
+
+static void encx24j600_hw_enable(struct encx24j600_priv *priv)
+{
+ /* Clear the interrupt flags in case was set */
+ encx24j600_clr_bits(priv, EIR, (PCFULIF | RXABTIF | TXABTIF | TXIF |
+ PKTIF | LINKIF));
+
+ /* Enable the interrupts */
+ encx24j600_write_reg(priv, EIE, (PCFULIE | RXABTIE | TXABTIE | TXIE |
+ PKTIE | LINKIE | INTIE));
+
+ /* Enable RX */
+ encx24j600_cmd(priv, ENABLERX);
+
+ priv->hw_enabled = true;
+}
+
+static void encx24j600_hw_disable(struct encx24j600_priv *priv)
+{
+ /* Disable all interrupts */
+ encx24j600_write_reg(priv, EIE, 0);
+
+ /* Disable RX */
+ encx24j600_cmd(priv, DISABLERX);
+
+ priv->hw_enabled = false;
+}
+
+static int encx24j600_setlink(struct net_device *dev, u8 autoneg, u16 speed,
+ u8 duplex)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!priv->hw_enabled) {
+ /* link is in low power mode now; duplex setting
+ * will take effect on next encx24j600_hw_init()
+ */
+ if (speed == SPEED_10 || speed == SPEED_100) {
+ priv->autoneg = (autoneg == AUTONEG_ENABLE);
+ priv->full_duplex = (duplex == DUPLEX_FULL);
+ priv->speed = (speed == SPEED_100);
+ } else {
+ netif_warn(priv, link, dev, "unsupported link speed setting\n");
+ /*speeds other than SPEED_10 and SPEED_100 */
+ /*are not supported by chip */
+ ret = -EOPNOTSUPP;
+ }
+ } else {
+ netif_warn(priv, link, dev, "Warning: hw must be disabled to set link mode\n");
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static void encx24j600_hw_get_macaddr(struct encx24j600_priv *priv,
+ unsigned char *ethaddr)
+{
+ unsigned short val;
+
+ val = encx24j600_read_reg(priv, MAADR1);
+
+ ethaddr[0] = val & 0x00ff;
+ ethaddr[1] = (val & 0xff00) >> 8;
+
+ val = encx24j600_read_reg(priv, MAADR2);
+
+ ethaddr[2] = val & 0x00ffU;
+ ethaddr[3] = (val & 0xff00U) >> 8;
+
+ val = encx24j600_read_reg(priv, MAADR3);
+
+ ethaddr[4] = val & 0x00ffU;
+ ethaddr[5] = (val & 0xff00U) >> 8;
+}
+
+/* Program the hardware MAC address from dev->dev_addr.*/
+static int encx24j600_set_hw_macaddr(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ if (priv->hw_enabled) {
+ netif_info(priv, drv, dev, "Hardware must be disabled to set Mac address\n");
+ return -EBUSY;
+ }
+
+ mutex_lock(&priv->lock);
+
+ netif_info(priv, drv, dev, "%s: Setting MAC address to %pM\n",
+ dev->name, dev->dev_addr);
+
+ encx24j600_write_reg(priv, MAADR3, (dev->dev_addr[4] |
+ dev->dev_addr[5] << 8));
+ encx24j600_write_reg(priv, MAADR2, (dev->dev_addr[2] |
+ dev->dev_addr[3] << 8));
+ encx24j600_write_reg(priv, MAADR1, (dev->dev_addr[0] |
+ dev->dev_addr[1] << 8));
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* Store the new hardware address in dev->dev_addr, and update the MAC.*/
+static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *address = addr;
+
+ if (netif_running(dev))
+ return -EBUSY;
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(dev, address->sa_data);
+ return encx24j600_set_hw_macaddr(dev);
+}
+
+static int encx24j600_open(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ int ret = request_threaded_irq(priv->ctx.spi->irq, NULL, encx24j600_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ DRV_NAME, priv);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "request irq %d failed (ret = %d)\n",
+ priv->ctx.spi->irq, ret);
+ return ret;
+ }
+
+ encx24j600_hw_disable(priv);
+ encx24j600_hw_init(priv);
+ encx24j600_hw_enable(priv);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int encx24j600_stop(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ free_irq(priv->ctx.spi->irq, priv);
+ return 0;
+}
+
+static void encx24j600_setrx_proc(struct kthread_work *ws)
+{
+ struct encx24j600_priv *priv =
+ container_of(ws, struct encx24j600_priv, setrx_work);
+
+ mutex_lock(&priv->lock);
+ encx24j600_set_rxfilter_mode(priv);
+ mutex_unlock(&priv->lock);
+}
+
+static void encx24j600_set_multicast_list(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ int oldfilter = priv->rxfilter;
+
+ if (dev->flags & IFF_PROMISC) {
+ netif_dbg(priv, link, dev, "promiscuous mode\n");
+ priv->rxfilter = RXFILTER_PROMISC;
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
+ netif_dbg(priv, link, dev, "%smulticast mode\n",
+ (dev->flags & IFF_ALLMULTI) ? "all-" : "");
+ priv->rxfilter = RXFILTER_MULTI;
+ } else {
+ netif_dbg(priv, link, dev, "normal mode\n");
+ priv->rxfilter = RXFILTER_NORMAL;
+ }
+
+ if (oldfilter != priv->rxfilter)
+ kthread_queue_work(&priv->kworker, &priv->setrx_work);
+}
+
+static void encx24j600_hw_tx(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+
+ netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
+ priv->tx_skb->len);
+
+ if (netif_msg_pktdata(priv))
+ dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data);
+
+ if (encx24j600_read_reg(priv, EIR) & TXABTIF)
+ /* Last transmition aborted due to error. Reset TX interface */
+ encx24j600_reset_hw_tx(priv);
+
+ /* Clear the TXIF flag if were previously set */
+ encx24j600_clr_bits(priv, EIR, TXIF);
+
+ /* Set the data pointer to the TX buffer address in the SRAM */
+ encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
+
+ /* Copy the packet into the SRAM */
+ encx24j600_raw_write(priv, WGPDATA, (u8 *)priv->tx_skb->data,
+ priv->tx_skb->len);
+
+ /* Program the Tx buffer start pointer */
+ encx24j600_write_reg(priv, ETXST, ENC_TX_BUF_START);
+
+ /* Program the packet length */
+ encx24j600_write_reg(priv, ETXLEN, priv->tx_skb->len);
+
+ /* Start the transmission */
+ encx24j600_cmd(priv, SETTXRTS);
+}
+
+static void encx24j600_tx_proc(struct kthread_work *ws)
+{
+ struct encx24j600_priv *priv =
+ container_of(ws, struct encx24j600_priv, tx_work);
+
+ mutex_lock(&priv->lock);
+ encx24j600_hw_tx(priv);
+ mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ /* save the timestamp */
+ netif_trans_update(dev);
+
+ /* Remember the skb for deferred processing */
+ priv->tx_skb = skb;
+
+ kthread_queue_work(&priv->kworker, &priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+/* Deal with a transmit timeout */
+static void encx24j600_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
+ jiffies, jiffies - dev_trans_start(dev));
+
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+static int encx24j600_get_regs_len(struct net_device *dev)
+{
+ return SFR_REG_COUNT;
+}
+
+static void encx24j600_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ u16 *buff = p;
+ u8 reg;
+
+ regs->version = 1;
+ mutex_lock(&priv->lock);
+ for (reg = 0; reg < SFR_REG_COUNT; reg += 2) {
+ unsigned int val = 0;
+ /* ignore errors for unreadable registers */
+ regmap_read(priv->ctx.regmap, reg, &val);
+ buff[reg] = val & 0xffff;
+ }
+ mutex_unlock(&priv->lock);
+}
+
+static void encx24j600_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static int encx24j600_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ u32 supported;
+
+ supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+
+ cmd->base.speed = priv->speed;
+ cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.port = PORT_TP;
+ cmd->base.autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int
+encx24j600_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ return encx24j600_setlink(dev, cmd->base.autoneg,
+ cmd->base.speed, cmd->base.duplex);
+}
+
+static u32 encx24j600_get_msglevel(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = val;
+}
+
+static const struct ethtool_ops encx24j600_ethtool_ops = {
+ .get_drvinfo = encx24j600_get_drvinfo,
+ .get_msglevel = encx24j600_get_msglevel,
+ .set_msglevel = encx24j600_set_msglevel,
+ .get_regs_len = encx24j600_get_regs_len,
+ .get_regs = encx24j600_get_regs,
+ .get_link_ksettings = encx24j600_get_link_ksettings,
+ .set_link_ksettings = encx24j600_set_link_ksettings,
+};
+
+static const struct net_device_ops encx24j600_netdev_ops = {
+ .ndo_open = encx24j600_open,
+ .ndo_stop = encx24j600_stop,
+ .ndo_start_xmit = encx24j600_tx,
+ .ndo_set_rx_mode = encx24j600_set_multicast_list,
+ .ndo_set_mac_address = encx24j600_set_mac_address,
+ .ndo_tx_timeout = encx24j600_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int encx24j600_spi_probe(struct spi_device *spi)
+{
+ int ret;
+
+ struct net_device *ndev;
+ struct encx24j600_priv *priv;
+ u16 eidled;
+ u8 addr[ETH_ALEN];
+
+ ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
+
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
+
+ priv = netdev_priv(ndev);
+ spi_set_drvdata(spi, priv);
+ dev_set_drvdata(&spi->dev, priv);
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ priv->ndev = ndev;
+
+ /* Default configuration PHY configuration */
+ priv->full_duplex = true;
+ priv->autoneg = AUTONEG_ENABLE;
+ priv->speed = SPEED_100;
+
+ priv->ctx.spi = spi;
+ ndev->irq = spi->irq;
+ ndev->netdev_ops = &encx24j600_netdev_ops;
+
+ ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
+ if (ret)
+ goto out_free;
+
+ mutex_init(&priv->lock);
+
+ /* Reset device and check if it is connected */
+ if (encx24j600_hw_reset(priv)) {
+ netif_err(priv, probe, ndev,
+ DRV_NAME ": Chip is not detected\n");
+ ret = -EIO;
+ goto out_free;
+ }
+
+ /* Initialize the device HW to the consistent state */
+ encx24j600_hw_init(priv);
+
+ kthread_init_worker(&priv->kworker);
+ kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
+ kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc);
+
+ priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
+ "encx24j600");
+
+ if (IS_ERR(priv->kworker_task)) {
+ ret = PTR_ERR(priv->kworker_task);
+ goto out_free;
+ }
+
+ /* Get the MAC address from the chip */
+ encx24j600_hw_get_macaddr(priv, addr);
+ eth_hw_addr_set(ndev, addr);
+
+ ndev->ethtool_ops = &encx24j600_ethtool_ops;
+
+ ret = register_netdev(ndev);
+ if (unlikely(ret)) {
+ netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
+ ret);
+ goto out_stop;
+ }
+
+ eidled = encx24j600_read_reg(priv, EIDLED);
+ if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) {
+ ret = -EINVAL;
+ goto out_unregister;
+ }
+
+ netif_info(priv, probe, ndev, "Silicon rev ID: 0x%02x\n",
+ (eidled & REVID_MASK) >> REVID_SHIFT);
+
+ netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr);
+
+ return ret;
+
+out_unregister:
+ unregister_netdev(priv->ndev);
+out_stop:
+ kthread_stop(priv->kworker_task);
+out_free:
+ free_netdev(ndev);
+
+error_out:
+ return ret;
+}
+
+static void encx24j600_spi_remove(struct spi_device *spi)
+{
+ struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
+
+ unregister_netdev(priv->ndev);
+ kthread_stop(priv->kworker_task);
+
+ free_netdev(priv->ndev);
+}
+
+static const struct spi_device_id encx24j600_spi_id_table[] = {
+ { .name = "encx24j600" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table);
+
+static struct spi_driver encx24j600_spi_net_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .bus = &spi_bus_type,
+ },
+ .probe = encx24j600_spi_probe,
+ .remove = encx24j600_spi_remove,
+ .id_table = encx24j600_spi_id_table,
+};
+
+module_spi_driver(encx24j600_spi_net_driver);
+
+MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
+MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
new file mode 100644
index 0000000000..34c5a28989
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -0,0 +1,438 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * encx24j600_hw.h: Register definitions
+ *
+ */
+
+#ifndef _ENCX24J600_HW_H
+#define _ENCX24J600_HW_H
+
+struct encx24j600_context {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct regmap *phymap;
+ struct mutex mutex; /* mutex to protect access to regmap */
+ int bank;
+};
+
+int devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx);
+
+/* Single-byte instructions */
+#define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
+#define B0SEL 0xC0 /* Bank 0 Select */
+#define B1SEL 0xC2 /* Bank 1 Select */
+#define B2SEL 0xC4 /* Bank 2 Select */
+#define B3SEL 0xC6 /* Bank 3 Select */
+#define SETETHRST 0xCA /* System Reset */
+#define FCDISABLE 0xE0 /* Flow Control Disable */
+#define FCSINGLE 0xE2 /* Flow Control Single */
+#define FCMULTIPLE 0xE4 /* Flow Control Multiple */
+#define FCCLEAR 0xE6 /* Flow Control Clear */
+#define SETPKTDEC 0xCC /* Decrement Packet Counter */
+#define DMASTOP 0xD2 /* DMA Stop */
+#define DMACKSUM 0xD8 /* DMA Start Checksum */
+#define DMACKSUMS 0xDA /* DMA Start Checksum with Seed */
+#define DMACOPY 0xDC /* DMA Start Copy */
+#define DMACOPYS 0xDE /* DMA Start Copy and Checksum with Seed */
+#define SETTXRTS 0xD4 /* Request Packet Transmission */
+#define ENABLERX 0xE8 /* Enable RX */
+#define DISABLERX 0xEA /* Disable RX */
+#define SETEIE 0xEC /* Enable Interrupts */
+#define CLREIE 0xEE /* Disable Interrupts */
+
+/* Two byte instructions */
+#define RBSEL 0xC8 /* Read Bank Select */
+
+/* Three byte instructions */
+#define WGPRDPT 0x60 /* Write EGPRDPT */
+#define RGPRDPT 0x62 /* Read EGPRDPT */
+#define WRXRDPT 0x64 /* Write ERXRDPT */
+#define RRXRDPT 0x66 /* Read ERXRDPT */
+#define WUDARDPT 0x68 /* Write EUDARDPT */
+#define RUDARDPT 0x6A /* Read EUDARDPT */
+#define WGPWRPT 0x6C /* Write EGPWRPT */
+#define RGPWRPT 0x6E /* Read EGPWRPT */
+#define WRXWRPT 0x70 /* Write ERXWRPT */
+#define RRXWRPT 0x72 /* Read ERXWRPT */
+#define WUDAWRPT 0x74 /* Write EUDAWRPT */
+#define RUDAWRPT 0x76 /* Read EUDAWRPT */
+
+/* n byte instructions */
+#define RCRCODE 0x00
+#define WCRCODE 0x40
+#define BFSCODE 0x80
+#define BFCCODE 0xA0
+#define RCR(addr) (RCRCODE | (addr & ADDR_MASK)) /* Read Control Register */
+#define WCR(addr) (WCRCODE | (addr & ADDR_MASK)) /* Write Control Register */
+#define RCRU 0x20 /* Read Control Register Unbanked */
+#define WCRU 0x22 /* Write Control Register Unbanked */
+#define BFS(addr) (BFSCODE | (addr & ADDR_MASK)) /* Bit Field Set */
+#define BFC(addr) (BFCCODE | (addr & ADDR_MASK)) /* Bit Field Clear */
+#define BFSU 0x24 /* Bit Field Set Unbanked */
+#define BFCU 0x26 /* Bit Field Clear Unbanked */
+#define RGPDATA 0x28 /* Read EGPDATA */
+#define WGPDATA 0x2A /* Write EGPDATA */
+#define RRXDATA 0x2C /* Read ERXDATA */
+#define WRXDATA 0x2E /* Write ERXDATA */
+#define RUDADATA 0x30 /* Read EUDADATA */
+#define WUDADATA 0x32 /* Write EUDADATA */
+
+#define SFR_REG_COUNT 0xA0
+
+/* ENC424J600 Control Registers
+ * Control register definitions are a combination of address
+ * and bank number
+ * - Register address (bits 0-4)
+ * - Bank number (bits 5-6)
+ */
+#define ADDR_MASK 0x1F
+#define BANK_MASK 0x60
+#define BANK_SHIFT 5
+
+/* All-bank registers */
+#define EUDAST 0x16
+#define EUDAND 0x18
+#define ESTAT 0x1A
+#define EIR 0x1C
+#define ECON1 0x1E
+
+/* Bank 0 registers */
+#define ETXST (0x00 | 0x00)
+#define ETXLEN (0x02 | 0x00)
+#define ERXST (0x04 | 0x00)
+#define ERXTAIL (0x06 | 0x00)
+#define ERXHEAD (0x08 | 0x00)
+#define EDMAST (0x0A | 0x00)
+#define EDMALEN (0x0C | 0x00)
+#define EDMADST (0x0E | 0x00)
+#define EDMACS (0x10 | 0x00)
+#define ETXSTAT (0x12 | 0x00)
+#define ETXWIRE (0x14 | 0x00)
+
+/* Bank 1 registers */
+#define EHT1 (0x00 | 0x20)
+#define EHT2 (0x02 | 0x20)
+#define EHT3 (0x04 | 0x20)
+#define EHT4 (0x06 | 0x20)
+#define EPMM1 (0x08 | 0x20)
+#define EPMM2 (0x0A | 0x20)
+#define EPMM3 (0x0C | 0x20)
+#define EPMM4 (0x0E | 0x20)
+#define EPMCS (0x10 | 0x20)
+#define EPMO (0x12 | 0x20)
+#define ERXFCON (0x14 | 0x20)
+
+/* Bank 2 registers */
+#define MACON1 (0x00 | 0x40)
+#define MACON2 (0x02 | 0x40)
+#define MABBIPG (0x04 | 0x40)
+#define MAIPG (0x06 | 0x40)
+#define MACLCON (0x08 | 0x40)
+#define MAMXFL (0x0A | 0x40)
+#define MICMD (0x12 | 0x40)
+#define MIREGADR (0x14 | 0x40)
+
+/* Bank 3 registers */
+#define MAADR3 (0x00 | 0x60)
+#define MAADR2 (0x02 | 0x60)
+#define MAADR1 (0x04 | 0x60)
+#define MIWR (0x06 | 0x60)
+#define MIRD (0x08 | 0x60)
+#define MISTAT (0x0A | 0x60)
+#define EPAUS (0x0C | 0x60)
+#define ECON2 (0x0E | 0x60)
+#define ERXWM (0x10 | 0x60)
+#define EIE (0x12 | 0x60)
+#define EIDLED (0x14 | 0x60)
+
+/* Unbanked registers */
+#define EGPDATA (0x00 | 0x80)
+#define ERXDATA (0x02 | 0x80)
+#define EUDADATA (0x04 | 0x80)
+#define EGPRDPT (0x06 | 0x80)
+#define EGPWRPT (0x08 | 0x80)
+#define ERXRDPT (0x0A | 0x80)
+#define ERXWRPT (0x0C | 0x80)
+#define EUDARDPT (0x0E | 0x80)
+#define EUDAWRPT (0x10 | 0x80)
+
+
+/* Register bit definitions */
+/* ESTAT */
+#define INT (1 << 15)
+#define FCIDLE (1 << 14)
+#define RXBUSY (1 << 13)
+#define CLKRDY (1 << 12)
+#define PHYDPX (1 << 10)
+#define PHYLNK (1 << 8)
+
+/* EIR */
+#define CRYPTEN (1 << 15)
+#define MODEXIF (1 << 14)
+#define HASHIF (1 << 13)
+#define AESIF (1 << 12)
+#define LINKIF (1 << 11)
+#define PKTIF (1 << 6)
+#define DMAIF (1 << 5)
+#define TXIF (1 << 3)
+#define TXABTIF (1 << 2)
+#define RXABTIF (1 << 1)
+#define PCFULIF (1 << 0)
+
+/* ECON1 */
+#define MODEXST (1 << 15)
+#define HASHEN (1 << 14)
+#define HASHOP (1 << 13)
+#define HASHLST (1 << 12)
+#define AESST (1 << 11)
+#define AESOP1 (1 << 10)
+#define AESOP0 (1 << 9)
+#define PKTDEC (1 << 8)
+#define FCOP1 (1 << 7)
+#define FCOP0 (1 << 6)
+#define DMAST (1 << 5)
+#define DMACPY (1 << 4)
+#define DMACSSD (1 << 3)
+#define DMANOCS (1 << 2)
+#define TXRTS (1 << 1)
+#define RXEN (1 << 0)
+
+/* ETXSTAT */
+#define LATECOL (1 << 10)
+#define MAXCOL (1 << 9)
+#define EXDEFER (1 << 8)
+#define ETXSTATL_DEFER (1 << 7)
+#define CRCBAD (1 << 4)
+#define COLCNT_MASK 0xF
+
+/* ERXFCON */
+#define HTEN (1 << 15)
+#define MPEN (1 << 14)
+#define NOTPM (1 << 12)
+#define PMEN3 (1 << 11)
+#define PMEN2 (1 << 10)
+#define PMEN1 (1 << 9)
+#define PMEN0 (1 << 8)
+#define CRCEEN (1 << 7)
+#define CRCEN (1 << 6)
+#define RUNTEEN (1 << 5)
+#define RUNTEN (1 << 4)
+#define UCEN (1 << 3)
+#define NOTMEEN (1 << 2)
+#define MCEN (1 << 1)
+#define BCEN (1 << 0)
+
+/* MACON1 */
+#define LOOPBK (1 << 4)
+#define RXPAUS (1 << 2)
+#define PASSALL (1 << 1)
+
+/* MACON2 */
+#define MACON2_DEFER (1 << 14)
+#define BPEN (1 << 13)
+#define NOBKOFF (1 << 12)
+#define PADCFG2 (1 << 7)
+#define PADCFG1 (1 << 6)
+#define PADCFG0 (1 << 5)
+#define TXCRCEN (1 << 4)
+#define PHDREN (1 << 3)
+#define HFRMEN (1 << 2)
+#define MACON2_RSV1 (1 << 1)
+#define FULDPX (1 << 0)
+
+/* MAIPG */
+/* value of the high byte is given by the reserved bits,
+ * value of the low byte is recomended setting of the
+ * IPG parameter.
+ */
+#define MAIPGH_VAL 0x0C
+#define MAIPGL_VAL 0x12
+
+/* MIREGADRH */
+#define MIREGADR_VAL (1 << 8)
+
+/* MIREGADRL */
+#define PHREG_MASK 0x1F
+
+/* MICMD */
+#define MIISCAN (1 << 1)
+#define MIIRD (1 << 0)
+
+/* MISTAT */
+#define NVALID (1 << 2)
+#define SCAN (1 << 1)
+#define BUSY (1 << 0)
+
+/* ECON2 */
+#define ETHEN (1 << 15)
+#define STRCH (1 << 14)
+#define TXMAC (1 << 13)
+#define SHA1MD5 (1 << 12)
+#define COCON3 (1 << 11)
+#define COCON2 (1 << 10)
+#define COCON1 (1 << 9)
+#define COCON0 (1 << 8)
+#define AUTOFC (1 << 7)
+#define TXRST (1 << 6)
+#define RXRST (1 << 5)
+#define ETHRST (1 << 4)
+#define MODLEN1 (1 << 3)
+#define MODLEN0 (1 << 2)
+#define AESLEN1 (1 << 1)
+#define AESLEN0 (1 << 0)
+
+/* EIE */
+#define INTIE (1 << 15)
+#define MODEXIE (1 << 14)
+#define HASHIE (1 << 13)
+#define AESIE (1 << 12)
+#define LINKIE (1 << 11)
+#define PKTIE (1 << 6)
+#define DMAIE (1 << 5)
+#define TXIE (1 << 3)
+#define TXABTIE (1 << 2)
+#define RXABTIE (1 << 1)
+#define PCFULIE (1 << 0)
+
+/* EIDLED */
+#define LACFG3 (1 << 15)
+#define LACFG2 (1 << 14)
+#define LACFG1 (1 << 13)
+#define LACFG0 (1 << 12)
+#define LBCFG3 (1 << 11)
+#define LBCFG2 (1 << 10)
+#define LBCFG1 (1 << 9)
+#define LBCFG0 (1 << 8)
+#define DEVID_SHIFT 5
+#define DEVID_MASK (0x7 << DEVID_SHIFT)
+#define REVID_SHIFT 0
+#define REVID_MASK (0x1F << REVID_SHIFT)
+
+/* PHY registers */
+#define PHCON1 0x00
+#define PHSTAT1 0x01
+#define PHANA 0x04
+#define PHANLPA 0x05
+#define PHANE 0x06
+#define PHCON2 0x11
+#define PHSTAT2 0x1B
+#define PHSTAT3 0x1F
+
+/* PHCON1 */
+#define PRST (1 << 15)
+#define PLOOPBK (1 << 14)
+#define SPD100 (1 << 13)
+#define ANEN (1 << 12)
+#define PSLEEP (1 << 11)
+#define RENEG (1 << 9)
+#define PFULDPX (1 << 8)
+
+/* PHSTAT1 */
+#define FULL100 (1 << 14)
+#define HALF100 (1 << 13)
+#define FULL10 (1 << 12)
+#define HALF10 (1 << 11)
+#define ANDONE (1 << 5)
+#define LRFAULT (1 << 4)
+#define ANABLE (1 << 3)
+#define LLSTAT (1 << 2)
+#define EXTREGS (1 << 0)
+
+/* PHSTAT2 */
+#define PLRITY (1 << 4)
+
+/* PHSTAT3 */
+#define PHY3SPD100 (1 << 3)
+#define PHY3DPX (1 << 4)
+#define SPDDPX_SHIFT 2
+#define SPDDPX_MASK (0x7 << SPDDPX_SHIFT)
+
+/* PHANA */
+/* Default value for PHY initialization*/
+#define PHANA_DEFAULT 0x05E1
+
+/* PHANE */
+#define PDFLT (1 << 4)
+#define LPARCD (1 << 1)
+#define LPANABL (1 << 0)
+
+#define EUDAST_TEST_VAL 0x1234
+
+#define TSV_SIZE 7
+
+#define ENCX24J600_DEV_ID 0x1
+
+/* Configuration */
+
+/* Led is on when the link is present and driven low
+ * temporarily when packet is TX'd or RX'd
+ */
+#define LED_A_SETTINGS 0xC
+
+/* Led is on if the link is in 100 Mbps mode */
+#define LED_B_SETTINGS 0x8
+
+/* maximum ethernet frame length
+ * Currently not used as a limit anywhere
+ * (we're using the "huge frame enable" feature of
+ * enc424j600).
+ */
+#define MAX_FRAMELEN 1518
+
+/* Size in bytes of the receive buffer in enc424j600.
+ * Must be word aligned (even).
+ */
+#define RX_BUFFER_SIZE (15 * MAX_FRAMELEN)
+
+/* Start of the general purpose area in sram */
+#define SRAM_GP_START 0x0
+
+/* SRAM size */
+#define SRAM_SIZE 0x6000
+
+/* Start of the receive buffer */
+#define ERXST_VAL (SRAM_SIZE - RX_BUFFER_SIZE)
+
+#define RSV_RXLONGEVDROPEV 16
+#define RSV_CARRIEREV 18
+#define RSV_CRCERROR 20
+#define RSV_LENCHECKERR 21
+#define RSV_LENOUTOFRANGE 22
+#define RSV_RXOK 23
+#define RSV_RXMULTICAST 24
+#define RSV_RXBROADCAST 25
+#define RSV_DRIBBLENIBBLE 26
+#define RSV_RXCONTROLFRAME 27
+#define RSV_RXPAUSEFRAME 28
+#define RSV_RXUNKNOWNOPCODE 29
+#define RSV_RXTYPEVLAN 30
+
+#define RSV_RUNTFILTERMATCH 31
+#define RSV_NOTMEFILTERMATCH 32
+#define RSV_HASHFILTERMATCH 33
+#define RSV_MAGICPKTFILTERMATCH 34
+#define RSV_PTRNMTCHFILTERMATCH 35
+#define RSV_UNICASTFILTERMATCH 36
+
+#define RSV_SIZE 8
+#define RSV_BITMASK(x) (1 << ((x) - 16))
+#define RSV_GETBIT(x, y) (((x) & RSV_BITMASK(y)) ? 1 : 0)
+
+struct rsv {
+ u16 next_packet;
+ u16 len;
+ u32 rxstat;
+};
+
+/* Put RX buffer at 0 as suggested by the Errata datasheet */
+
+#define RXSTART_INIT ERXST_VAL
+#define RXEND_INIT 0x5FFF
+
+int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
+ size_t count);
+int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count);
+
+
+#endif
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
new file mode 100644
index 0000000000..2db5949b4c
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -0,0 +1,1413 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include "lan743x_main.h"
+#include "lan743x_ethtool.h"
+#include <linux/sched.h>
+#include <linux/iopoll.h>
+
+/* eeprom */
+#define LAN743X_EEPROM_MAGIC (0x74A5)
+#define LAN743X_OTP_MAGIC (0x74F3)
+#define EEPROM_INDICATOR_1 (0xA5)
+#define EEPROM_INDICATOR_2 (0xAA)
+#define EEPROM_MAC_OFFSET (0x01)
+#define MAX_EEPROM_SIZE (512)
+#define MAX_OTP_SIZE (1024)
+#define OTP_INDICATOR_1 (0xF3)
+#define OTP_INDICATOR_2 (0xF7)
+
+#define LOCK_TIMEOUT_MAX_CNT (100) // 1 sec (10 msce * 100)
+
+#define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset)
+
+static int lan743x_otp_power_up(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, OTP_PWR_DN);
+
+ if (reg_value & OTP_PWR_DN_PWRDN_N_) {
+ /* clear it and wait to be cleared */
+ reg_value &= ~OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, OTP_PWR_DN, reg_value);
+
+ usleep_range(100, 20000);
+ }
+
+ return 0;
+}
+
+static void lan743x_otp_power_down(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, OTP_PWR_DN);
+ if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) {
+ /* set power down bit */
+ reg_value |= OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, OTP_PWR_DN, reg_value);
+ }
+}
+
+static void lan743x_otp_set_address(struct lan743x_adapter *adapter,
+ u32 address)
+{
+ lan743x_csr_write(adapter, OTP_ADDR_HIGH, (address >> 8) & 0x03);
+ lan743x_csr_write(adapter, OTP_ADDR_LOW, address & 0xFF);
+}
+
+static void lan743x_otp_read_go(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_);
+}
+
+static int lan743x_otp_wait_till_not_busy(struct lan743x_adapter *adapter)
+{
+ unsigned long timeout;
+ u32 reg_val;
+
+ timeout = jiffies + HZ;
+ do {
+ if (time_after(jiffies, timeout)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Timeout on OTP_STATUS completion\n");
+ return -EIO;
+ }
+ udelay(1);
+ reg_val = lan743x_csr_read(adapter, OTP_STATUS);
+ } while (reg_val & OTP_STATUS_BUSY_);
+
+ return 0;
+}
+
+static int lan743x_otp_read(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ if (offset + length > MAX_OTP_SIZE)
+ return -EINVAL;
+
+ ret = lan743x_otp_power_up(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < length; i++) {
+ lan743x_otp_set_address(adapter, offset + i);
+
+ lan743x_otp_read_go(adapter);
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ data[i] = lan743x_csr_read(adapter, OTP_READ_DATA);
+ }
+
+ lan743x_otp_power_down(adapter);
+
+ return 0;
+}
+
+static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ if (offset + length > MAX_OTP_SIZE)
+ return -EINVAL;
+
+ ret = lan743x_otp_power_up(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* set to BYTE program mode */
+ lan743x_csr_write(adapter, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+
+ for (i = 0; i < length; i++) {
+ lan743x_otp_set_address(adapter, offset + i);
+
+ lan743x_csr_write(adapter, OTP_PRGM_DATA, data[i]);
+ lan743x_csr_write(adapter, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ }
+
+ lan743x_otp_power_down(adapter);
+
+ return 0;
+}
+
+int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
+ u16 timeout)
+{
+ u16 timeout_cnt = 0;
+ u32 val;
+
+ do {
+ spin_lock(&adapter->eth_syslock_spinlock);
+ if (adapter->eth_syslock_acquire_cnt == 0) {
+ lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG,
+ SYS_LOCK_REG_ENET_SS_LOCK_);
+ val = lan743x_csr_read(adapter,
+ ETH_SYSTEM_SYS_LOCK_REG);
+ if (val & SYS_LOCK_REG_ENET_SS_LOCK_) {
+ adapter->eth_syslock_acquire_cnt++;
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+ spin_unlock(&adapter->eth_syslock_spinlock);
+ break;
+ }
+ } else {
+ adapter->eth_syslock_acquire_cnt++;
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+ spin_unlock(&adapter->eth_syslock_spinlock);
+ break;
+ }
+
+ spin_unlock(&adapter->eth_syslock_spinlock);
+
+ if (timeout_cnt++ < timeout)
+ usleep_range(10000, 11000);
+ else
+ return -ETIMEDOUT;
+ } while (true);
+
+ return 0;
+}
+
+void lan743x_hs_syslock_release(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ spin_lock(&adapter->eth_syslock_spinlock);
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+
+ if (adapter->eth_syslock_acquire_cnt) {
+ adapter->eth_syslock_acquire_cnt--;
+ if (adapter->eth_syslock_acquire_cnt == 0) {
+ lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG, 0);
+ val = lan743x_csr_read(adapter,
+ ETH_SYSTEM_SYS_LOCK_REG);
+ WARN_ON((val & SYS_LOCK_REG_ENET_SS_LOCK_) != 0);
+ }
+ }
+
+ spin_unlock(&adapter->eth_syslock_spinlock);
+}
+
+static void lan743x_hs_otp_power_up(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ if (reg_value & OTP_PWR_DN_PWRDN_N_) {
+ reg_value &= ~OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value);
+ /* To flush the posted write so the subsequent delay is
+ * guaranteed to happen after the write at the hardware
+ */
+ lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ udelay(1);
+ }
+}
+
+static void lan743x_hs_otp_power_down(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) {
+ reg_value |= OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value);
+ /* To flush the posted write so the subsequent delay is
+ * guaranteed to happen after the write at the hardware
+ */
+ lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ udelay(1);
+ }
+}
+
+static void lan743x_hs_otp_set_address(struct lan743x_adapter *adapter,
+ u32 address)
+{
+ lan743x_csr_write(adapter, HS_OTP_ADDR_HIGH, (address >> 8) & 0x03);
+ lan743x_csr_write(adapter, HS_OTP_ADDR_LOW, address & 0xFF);
+}
+
+static void lan743x_hs_otp_read_go(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, HS_OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_);
+}
+
+static int lan743x_hs_otp_cmd_cmplt_chk(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_OTP_STATUS, val,
+ !(val & OTP_STATUS_BUSY_),
+ 80, 10000);
+}
+
+static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_power_up(adapter);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ lan743x_hs_syslock_release(adapter);
+
+ for (i = 0; i < length; i++) {
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_set_address(adapter, offset + i);
+
+ lan743x_hs_otp_read_go(adapter);
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ data[i] = lan743x_csr_read(adapter, HS_OTP_READ_DATA);
+
+ lan743x_hs_syslock_release(adapter);
+ }
+
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+power_down:
+ lan743x_hs_otp_power_down(adapter);
+ lan743x_hs_syslock_release(adapter);
+
+ return ret;
+}
+
+static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_power_up(adapter);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ /* set to BYTE program mode */
+ lan743x_csr_write(adapter, HS_OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+
+ lan743x_hs_syslock_release(adapter);
+
+ for (i = 0; i < length; i++) {
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_set_address(adapter, offset + i);
+
+ lan743x_csr_write(adapter, HS_OTP_PRGM_DATA, data[i]);
+ lan743x_csr_write(adapter, HS_OTP_TST_CMD,
+ OTP_TST_CMD_PRGVRFY_);
+ lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ lan743x_hs_syslock_release(adapter);
+ }
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+power_down:
+ lan743x_hs_otp_power_down(adapter);
+ lan743x_hs_syslock_release(adapter);
+
+ return ret;
+}
+
+static int lan743x_eeprom_wait(struct lan743x_adapter *adapter)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+
+ do {
+ val = lan743x_csr_read(adapter, E2P_CMD);
+
+ if (!(val & E2P_CMD_EPC_BUSY_) ||
+ (val & E2P_CMD_EPC_TIMEOUT_))
+ break;
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "EEPROM read operation timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_confirm_not_busy(struct lan743x_adapter *adapter)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+
+ do {
+ val = lan743x_csr_read(adapter, E2P_CMD);
+
+ if (!(val & E2P_CMD_EPC_BUSY_))
+ return 0;
+
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ netif_warn(adapter, drv, adapter->netdev, "EEPROM is busy\n");
+ return -EIO;
+}
+
+static int lan743x_eeprom_read(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ if (offset + length > MAX_EEPROM_SIZE)
+ return -EINVAL;
+
+ retval = lan743x_eeprom_confirm_not_busy(adapter);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ val = lan743x_csr_read(adapter, E2P_DATA);
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_write(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ if (offset + length > MAX_EEPROM_SIZE)
+ return -EINVAL;
+
+ retval = lan743x_eeprom_confirm_not_busy(adapter);
+ if (retval)
+ return retval;
+
+ /* Issue write/erase enable command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ /* Fill data register */
+ val = data[i];
+ lan743x_csr_write(adapter, E2P_DATA, val);
+
+ /* Send "write" command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan743x_hs_eeprom_cmd_cmplt_chk(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_E2P_CMD, val,
+ (!(val & HS_E2P_CMD_EPC_BUSY_) ||
+ (val & HS_E2P_CMD_EPC_TIMEOUT_)),
+ 50, 10000);
+}
+
+static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ retval = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_READ_;
+ val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, HS_E2P_CMD, val);
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ if (retval < 0) {
+ lan743x_hs_syslock_release(adapter);
+ return retval;
+ }
+
+ val = lan743x_csr_read(adapter, HS_E2P_DATA);
+
+ lan743x_hs_syslock_release(adapter);
+
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ retval = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ /* Fill data register */
+ val = data[i];
+ lan743x_csr_write(adapter, HS_E2P_DATA, val);
+
+ /* Send "write" command */
+ val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_WRITE_;
+ val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, HS_E2P_CMD, val);
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ offset++;
+ }
+
+ return 0;
+}
+
+static void lan743x_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->bus_info,
+ pci_name(adapter->pdev), sizeof(info->bus_info));
+}
+
+static u32 lan743x_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void lan743x_ethtool_set_msglevel(struct net_device *netdev,
+ u32 msglevel)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = msglevel;
+}
+
+static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
+ return MAX_OTP_SIZE;
+
+ return MAX_EEPROM_SIZE;
+}
+
+static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_otp_read(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_otp_read(adapter, ee->offset,
+ ee->len, data);
+ } else {
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_eeprom_read(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_eeprom_read(adapter, ee->offset,
+ ee->len, data);
+ }
+
+ return ret;
+}
+
+static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret = -EINVAL;
+
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
+ /* Beware! OTP is One Time Programming ONLY! */
+ if (ee->magic == LAN743X_OTP_MAGIC) {
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_otp_write(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_otp_write(adapter, ee->offset,
+ ee->len, data);
+ }
+ } else {
+ if (ee->magic == LAN743X_EEPROM_MAGIC) {
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_eeprom_write(adapter,
+ ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_eeprom_write(adapter, ee->offset,
+ ee->len, data);
+ }
+ }
+
+ return ret;
+}
+
+static const char lan743x_set0_hw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX FCS Errors",
+ "RX Alignment Errors",
+ "Rx Fragment Errors",
+ "RX Jabber Errors",
+ "RX Undersize Frame Errors",
+ "RX Oversize Frame Errors",
+ "RX Dropped Frames",
+ "RX Unicast Byte Count",
+ "RX Broadcast Byte Count",
+ "RX Multicast Byte Count",
+ "RX Unicast Frames",
+ "RX Broadcast Frames",
+ "RX Multicast Frames",
+ "RX Pause Frames",
+ "RX 64 Byte Frames",
+ "RX 65 - 127 Byte Frames",
+ "RX 128 - 255 Byte Frames",
+ "RX 256 - 511 Bytes Frames",
+ "RX 512 - 1023 Byte Frames",
+ "RX 1024 - 1518 Byte Frames",
+ "RX Greater 1518 Byte Frames",
+};
+
+static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX Queue 0 Frames",
+ "RX Queue 1 Frames",
+ "RX Queue 2 Frames",
+ "RX Queue 3 Frames",
+};
+
+static const char lan743x_tx_queue_cnt_strings[][ETH_GSTRING_LEN] = {
+ "TX Queue 0 Frames",
+ "TX Queue 1 Frames",
+ "TX Queue 2 Frames",
+ "TX Queue 3 Frames",
+ "TX Total Queue Frames",
+};
+
+static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX Total Frames",
+ "EEE RX LPI Transitions",
+ "EEE RX LPI Time",
+ "RX Counter Rollover Status",
+ "TX FCS Errors",
+ "TX Excess Deferral Errors",
+ "TX Carrier Errors",
+ "TX Bad Byte Count",
+ "TX Single Collisions",
+ "TX Multiple Collisions",
+ "TX Excessive Collision",
+ "TX Late Collisions",
+ "TX Unicast Byte Count",
+ "TX Broadcast Byte Count",
+ "TX Multicast Byte Count",
+ "TX Unicast Frames",
+ "TX Broadcast Frames",
+ "TX Multicast Frames",
+ "TX Pause Frames",
+ "TX 64 Byte Frames",
+ "TX 65 - 127 Byte Frames",
+ "TX 128 - 255 Byte Frames",
+ "TX 256 - 511 Bytes Frames",
+ "TX 512 - 1023 Byte Frames",
+ "TX 1024 - 1518 Byte Frames",
+ "TX Greater 1518 Byte Frames",
+ "TX Total Frames",
+ "EEE TX LPI Transitions",
+ "EEE TX LPI Time",
+ "TX Counter Rollover Status",
+};
+
+static const u32 lan743x_set0_hw_cnt_addr[] = {
+ STAT_RX_FCS_ERRORS,
+ STAT_RX_ALIGNMENT_ERRORS,
+ STAT_RX_FRAGMENT_ERRORS,
+ STAT_RX_JABBER_ERRORS,
+ STAT_RX_UNDERSIZE_FRAME_ERRORS,
+ STAT_RX_OVERSIZE_FRAME_ERRORS,
+ STAT_RX_DROPPED_FRAMES,
+ STAT_RX_UNICAST_BYTE_COUNT,
+ STAT_RX_BROADCAST_BYTE_COUNT,
+ STAT_RX_MULTICAST_BYTE_COUNT,
+ STAT_RX_UNICAST_FRAMES,
+ STAT_RX_BROADCAST_FRAMES,
+ STAT_RX_MULTICAST_FRAMES,
+ STAT_RX_PAUSE_FRAMES,
+ STAT_RX_64_BYTE_FRAMES,
+ STAT_RX_65_127_BYTE_FRAMES,
+ STAT_RX_128_255_BYTE_FRAMES,
+ STAT_RX_256_511_BYTES_FRAMES,
+ STAT_RX_512_1023_BYTE_FRAMES,
+ STAT_RX_1024_1518_BYTE_FRAMES,
+ STAT_RX_GREATER_1518_BYTE_FRAMES,
+};
+
+static const u32 lan743x_set2_hw_cnt_addr[] = {
+ STAT_RX_TOTAL_FRAMES,
+ STAT_EEE_RX_LPI_TRANSITIONS,
+ STAT_EEE_RX_LPI_TIME,
+ STAT_RX_COUNTER_ROLLOVER_STATUS,
+ STAT_TX_FCS_ERRORS,
+ STAT_TX_EXCESS_DEFERRAL_ERRORS,
+ STAT_TX_CARRIER_ERRORS,
+ STAT_TX_BAD_BYTE_COUNT,
+ STAT_TX_SINGLE_COLLISIONS,
+ STAT_TX_MULTIPLE_COLLISIONS,
+ STAT_TX_EXCESSIVE_COLLISION,
+ STAT_TX_LATE_COLLISIONS,
+ STAT_TX_UNICAST_BYTE_COUNT,
+ STAT_TX_BROADCAST_BYTE_COUNT,
+ STAT_TX_MULTICAST_BYTE_COUNT,
+ STAT_TX_UNICAST_FRAMES,
+ STAT_TX_BROADCAST_FRAMES,
+ STAT_TX_MULTICAST_FRAMES,
+ STAT_TX_PAUSE_FRAMES,
+ STAT_TX_64_BYTE_FRAMES,
+ STAT_TX_65_127_BYTE_FRAMES,
+ STAT_TX_128_255_BYTE_FRAMES,
+ STAT_TX_256_511_BYTES_FRAMES,
+ STAT_TX_512_1023_BYTE_FRAMES,
+ STAT_TX_1024_1518_BYTE_FRAMES,
+ STAT_TX_GREATER_1518_BYTE_FRAMES,
+ STAT_TX_TOTAL_FRAMES,
+ STAT_EEE_TX_LPI_TRANSITIONS,
+ STAT_EEE_TX_LPI_TIME,
+ STAT_TX_COUNTER_ROLLOVER_STATUS
+};
+
+static const char lan743x_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "OTP_ACCESS",
+};
+
+static void lan743x_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, lan743x_set0_hw_cnt_strings,
+ sizeof(lan743x_set0_hw_cnt_strings));
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings)],
+ lan743x_set1_sw_cnt_strings,
+ sizeof(lan743x_set1_sw_cnt_strings));
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) +
+ sizeof(lan743x_set1_sw_cnt_strings)],
+ lan743x_set2_hw_cnt_strings,
+ sizeof(lan743x_set2_hw_cnt_strings));
+ if (adapter->is_pci11x1x) {
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) +
+ sizeof(lan743x_set1_sw_cnt_strings) +
+ sizeof(lan743x_set2_hw_cnt_strings)],
+ lan743x_tx_queue_cnt_strings,
+ sizeof(lan743x_tx_queue_cnt_strings));
+ }
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, lan743x_priv_flags_strings,
+ sizeof(lan743x_priv_flags_strings));
+ break;
+ }
+}
+
+static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u64 total_queue_count = 0;
+ int data_index = 0;
+ u64 pkt_cnt;
+ u32 buf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lan743x_set0_hw_cnt_addr); i++) {
+ buf = lan743x_csr_read(adapter, lan743x_set0_hw_cnt_addr[i]);
+ data[data_index++] = (u64)buf;
+ }
+ for (i = 0; i < ARRAY_SIZE(adapter->rx); i++)
+ data[data_index++] = (u64)(adapter->rx[i].frame_count);
+ for (i = 0; i < ARRAY_SIZE(lan743x_set2_hw_cnt_addr); i++) {
+ buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]);
+ data[data_index++] = (u64)buf;
+ }
+ if (adapter->is_pci11x1x) {
+ for (i = 0; i < ARRAY_SIZE(adapter->tx); i++) {
+ pkt_cnt = (u64)(adapter->tx[i].frame_count);
+ data[data_index++] = pkt_cnt;
+ total_queue_count += pkt_cnt;
+ }
+ data[data_index++] = total_queue_count;
+ }
+}
+
+static u32 lan743x_ethtool_get_priv_flags(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->flags;
+}
+
+static int lan743x_ethtool_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->flags = flags;
+
+ return 0;
+}
+
+static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ {
+ int ret;
+
+ ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings);
+ ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings);
+ ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings);
+ if (adapter->is_pci11x1x)
+ ret += ARRAY_SIZE(lan743x_tx_queue_cnt_strings);
+ return ret;
+ }
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(lan743x_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *rxnfc,
+ u32 *rule_locs)
+{
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXFH:
+ rxnfc->data = 0;
+ switch (rxnfc->flow_type) {
+ case TCP_V4_FLOW:case UDP_V4_FLOW:
+ case TCP_V6_FLOW:case UDP_V6_FLOW:
+ rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case IPV4_FLOW: case IPV6_FLOW:
+ rxnfc->data |= RXH_IP_SRC | RXH_IP_DST;
+ return 0;
+ }
+ break;
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = LAN743X_USED_RX_CHANNELS;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static u32 lan743x_ethtool_get_rxfh_key_size(struct net_device *netdev)
+{
+ return 40;
+}
+
+static u32 lan743x_ethtool_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return 128;
+}
+
+static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
+ u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (indir) {
+ int dw_index;
+ int byte_index = 0;
+
+ for (dw_index = 0; dw_index < 32; dw_index++) {
+ u32 four_entries =
+ lan743x_csr_read(adapter, RFE_INDX(dw_index));
+
+ byte_index = dw_index << 2;
+ indir[byte_index + 0] =
+ ((four_entries >> 0) & 0x000000FF);
+ indir[byte_index + 1] =
+ ((four_entries >> 8) & 0x000000FF);
+ indir[byte_index + 2] =
+ ((four_entries >> 16) & 0x000000FF);
+ indir[byte_index + 3] =
+ ((four_entries >> 24) & 0x000000FF);
+ }
+ }
+ if (key) {
+ int dword_index;
+ int byte_index = 0;
+
+ for (dword_index = 0; dword_index < 10; dword_index++) {
+ u32 four_entries =
+ lan743x_csr_read(adapter,
+ RFE_HASH_KEY(dword_index));
+
+ byte_index = dword_index << 2;
+ key[byte_index + 0] =
+ ((four_entries >> 0) & 0x000000FF);
+ key[byte_index + 1] =
+ ((four_entries >> 8) & 0x000000FF);
+ key[byte_index + 2] =
+ ((four_entries >> 16) & 0x000000FF);
+ key[byte_index + 3] =
+ ((four_entries >> 24) & 0x000000FF);
+ }
+ }
+ if (hfunc)
+ (*hfunc) = ETH_RSS_HASH_TOP;
+ return 0;
+}
+
+static int lan743x_ethtool_set_rxfh(struct net_device *netdev,
+ const u32 *indir, const u8 *key,
+ const u8 hfunc)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ u32 indir_value = 0;
+ int dword_index = 0;
+ int byte_index = 0;
+
+ for (dword_index = 0; dword_index < 32; dword_index++) {
+ byte_index = dword_index << 2;
+ indir_value =
+ (((indir[byte_index + 0] & 0x000000FF) << 0) |
+ ((indir[byte_index + 1] & 0x000000FF) << 8) |
+ ((indir[byte_index + 2] & 0x000000FF) << 16) |
+ ((indir[byte_index + 3] & 0x000000FF) << 24));
+ lan743x_csr_write(adapter, RFE_INDX(dword_index),
+ indir_value);
+ }
+ }
+ if (key) {
+ int dword_index = 0;
+ int byte_index = 0;
+ u32 key_value = 0;
+
+ for (dword_index = 0; dword_index < 10; dword_index++) {
+ byte_index = dword_index << 2;
+ key_value =
+ ((((u32)(key[byte_index + 0])) << 0) |
+ (((u32)(key[byte_index + 1])) << 8) |
+ (((u32)(key[byte_index + 2])) << 16) |
+ (((u32)(key[byte_index + 3])) << 24));
+ lan743x_csr_write(adapter, RFE_HASH_KEY(dword_index),
+ key_value);
+ }
+ }
+ return 0;
+}
+
+static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *ts_info)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp.ptp_clock)
+ ts_info->phc_index = ptp_clock_index(adapter->ptp.ptp_clock);
+ else
+ ts_info->phc_index = -1;
+
+ ts_info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ ts_info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static int lan743x_ethtool_get_eee(struct net_device *netdev,
+ struct ethtool_eee *eee)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u32 buf;
+ int ret;
+
+ if (!phydev)
+ return -EIO;
+ if (!phydev->drv) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Missing PHY Driver\n");
+ return -EIO;
+ }
+
+ ret = phy_ethtool_get_eee(phydev, eee);
+ if (ret < 0)
+ return ret;
+
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ if (buf & MAC_CR_EEE_EN_) {
+ eee->eee_enabled = true;
+ eee->eee_active = !!(eee->advertised & eee->lp_advertised);
+ eee->tx_lpi_enabled = true;
+ /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
+ buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ eee->tx_lpi_timer = buf;
+ } else {
+ eee->eee_enabled = false;
+ eee->eee_active = false;
+ eee->tx_lpi_enabled = false;
+ eee->tx_lpi_timer = 0;
+ }
+
+ return 0;
+}
+
+static int lan743x_ethtool_set_eee(struct net_device *netdev,
+ struct ethtool_eee *eee)
+{
+ struct lan743x_adapter *adapter;
+ struct phy_device *phydev;
+ u32 buf = 0;
+ int ret = 0;
+
+ if (!netdev)
+ return -EINVAL;
+ adapter = netdev_priv(netdev);
+ if (!adapter)
+ return -EINVAL;
+ phydev = netdev->phydev;
+ if (!phydev)
+ return -EIO;
+ if (!phydev->drv) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Missing PHY Driver\n");
+ return -EIO;
+ }
+
+ if (eee->eee_enabled) {
+ ret = phy_init_eee(phydev, false);
+ if (ret) {
+ netif_err(adapter, drv, adapter->netdev,
+ "EEE initialization failed\n");
+ return ret;
+ }
+
+ buf = (u32)eee->tx_lpi_timer;
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
+
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ buf |= MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, buf);
+ } else {
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ buf &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, buf);
+ }
+
+ return phy_ethtool_set_eee(phydev, eee);
+}
+
+#ifdef CONFIG_PM
+static void lan743x_ethtool_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (netdev->phydev)
+ phy_ethtool_get_wol(netdev->phydev, wol);
+
+ wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
+ WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+
+ if (adapter->is_pci11x1x)
+ wol->supported |= WAKE_MAGICSECURE;
+
+ wol->wolopts |= adapter->wolopts;
+ if (adapter->wolopts & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, adapter->sopass, sizeof(wol->sopass));
+}
+
+static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->wolopts = 0;
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wolopts |= WAKE_UCAST;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wolopts |= WAKE_MCAST;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wolopts |= WAKE_BCAST;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wolopts |= WAKE_MAGIC;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wolopts |= WAKE_PHY;
+ if (wol->wolopts & WAKE_ARP)
+ adapter->wolopts |= WAKE_ARP;
+ if (wol->wolopts & WAKE_MAGICSECURE &&
+ wol->wolopts & WAKE_MAGIC) {
+ memcpy(adapter->sopass, wol->sopass, sizeof(wol->sopass));
+ adapter->wolopts |= WAKE_MAGICSECURE;
+ } else {
+ memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
+ }
+
+ device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
+
+ return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
+ : -ENETDOWN;
+}
+#endif /* CONFIG_PM */
+
+static void lan743x_common_regs(struct net_device *dev, void *p)
+{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ u32 *rb = p;
+
+ memset(p, 0, (MAX_LAN743X_ETH_COMMON_REGS * sizeof(u32)));
+
+ rb[ETH_PRIV_FLAGS] = adapter->flags;
+ rb[ETH_ID_REV] = lan743x_csr_read(adapter, ID_REV);
+ rb[ETH_FPGA_REV] = lan743x_csr_read(adapter, FPGA_REV);
+ rb[ETH_STRAP_READ] = lan743x_csr_read(adapter, STRAP_READ);
+ rb[ETH_INT_STS] = lan743x_csr_read(adapter, INT_STS);
+ rb[ETH_HW_CFG] = lan743x_csr_read(adapter, HW_CFG);
+ rb[ETH_PMT_CTL] = lan743x_csr_read(adapter, PMT_CTL);
+ rb[ETH_E2P_CMD] = lan743x_csr_read(adapter, E2P_CMD);
+ rb[ETH_E2P_DATA] = lan743x_csr_read(adapter, E2P_DATA);
+ rb[ETH_MAC_CR] = lan743x_csr_read(adapter, MAC_CR);
+ rb[ETH_MAC_RX] = lan743x_csr_read(adapter, MAC_RX);
+ rb[ETH_MAC_TX] = lan743x_csr_read(adapter, MAC_TX);
+ rb[ETH_FLOW] = lan743x_csr_read(adapter, MAC_FLOW);
+ rb[ETH_MII_ACC] = lan743x_csr_read(adapter, MAC_MII_ACC);
+ rb[ETH_MII_DATA] = lan743x_csr_read(adapter, MAC_MII_DATA);
+ rb[ETH_EEE_TX_LPI_REQ_DLY] = lan743x_csr_read(adapter,
+ MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ rb[ETH_WUCSR] = lan743x_csr_read(adapter, MAC_WUCSR);
+ rb[ETH_WK_SRC] = lan743x_csr_read(adapter, MAC_WK_SRC);
+}
+
+static void lan743x_sgmii_regs(struct net_device *dev, void *p)
+{
+ struct lan743x_adapter *adp = netdev_priv(dev);
+ u32 *rb = p;
+ u16 idx;
+ int val;
+ struct {
+ u8 id;
+ u8 dev;
+ u16 addr;
+ } regs[] = {
+ { ETH_SR_VSMMD_DEV_ID1, MDIO_MMD_VEND1, 0x0002},
+ { ETH_SR_VSMMD_DEV_ID2, MDIO_MMD_VEND1, 0x0003},
+ { ETH_SR_VSMMD_PCS_ID1, MDIO_MMD_VEND1, 0x0004},
+ { ETH_SR_VSMMD_PCS_ID2, MDIO_MMD_VEND1, 0x0005},
+ { ETH_SR_VSMMD_STS, MDIO_MMD_VEND1, 0x0008},
+ { ETH_SR_VSMMD_CTRL, MDIO_MMD_VEND1, 0x0009},
+ { ETH_SR_MII_CTRL, MDIO_MMD_VEND2, 0x0000},
+ { ETH_SR_MII_STS, MDIO_MMD_VEND2, 0x0001},
+ { ETH_SR_MII_DEV_ID1, MDIO_MMD_VEND2, 0x0002},
+ { ETH_SR_MII_DEV_ID2, MDIO_MMD_VEND2, 0x0003},
+ { ETH_SR_MII_AN_ADV, MDIO_MMD_VEND2, 0x0004},
+ { ETH_SR_MII_LP_BABL, MDIO_MMD_VEND2, 0x0005},
+ { ETH_SR_MII_EXPN, MDIO_MMD_VEND2, 0x0006},
+ { ETH_SR_MII_EXT_STS, MDIO_MMD_VEND2, 0x000F},
+ { ETH_SR_MII_TIME_SYNC_ABL, MDIO_MMD_VEND2, 0x0708},
+ { ETH_SR_MII_TIME_SYNC_TX_MAX_DLY_LWR, MDIO_MMD_VEND2, 0x0709},
+ { ETH_SR_MII_TIME_SYNC_TX_MAX_DLY_UPR, MDIO_MMD_VEND2, 0x070A},
+ { ETH_SR_MII_TIME_SYNC_TX_MIN_DLY_LWR, MDIO_MMD_VEND2, 0x070B},
+ { ETH_SR_MII_TIME_SYNC_TX_MIN_DLY_UPR, MDIO_MMD_VEND2, 0x070C},
+ { ETH_SR_MII_TIME_SYNC_RX_MAX_DLY_LWR, MDIO_MMD_VEND2, 0x070D},
+ { ETH_SR_MII_TIME_SYNC_RX_MAX_DLY_UPR, MDIO_MMD_VEND2, 0x070E},
+ { ETH_SR_MII_TIME_SYNC_RX_MIN_DLY_LWR, MDIO_MMD_VEND2, 0x070F},
+ { ETH_SR_MII_TIME_SYNC_RX_MIN_DLY_UPR, MDIO_MMD_VEND2, 0x0710},
+ { ETH_VR_MII_DIG_CTRL1, MDIO_MMD_VEND2, 0x8000},
+ { ETH_VR_MII_AN_CTRL, MDIO_MMD_VEND2, 0x8001},
+ { ETH_VR_MII_AN_INTR_STS, MDIO_MMD_VEND2, 0x8002},
+ { ETH_VR_MII_TC, MDIO_MMD_VEND2, 0x8003},
+ { ETH_VR_MII_DBG_CTRL, MDIO_MMD_VEND2, 0x8005},
+ { ETH_VR_MII_EEE_MCTRL0, MDIO_MMD_VEND2, 0x8006},
+ { ETH_VR_MII_EEE_TXTIMER, MDIO_MMD_VEND2, 0x8008},
+ { ETH_VR_MII_EEE_RXTIMER, MDIO_MMD_VEND2, 0x8009},
+ { ETH_VR_MII_LINK_TIMER_CTRL, MDIO_MMD_VEND2, 0x800A},
+ { ETH_VR_MII_EEE_MCTRL1, MDIO_MMD_VEND2, 0x800B},
+ { ETH_VR_MII_DIG_STS, MDIO_MMD_VEND2, 0x8010},
+ { ETH_VR_MII_ICG_ERRCNT1, MDIO_MMD_VEND2, 0x8011},
+ { ETH_VR_MII_GPIO, MDIO_MMD_VEND2, 0x8015},
+ { ETH_VR_MII_EEE_LPI_STATUS, MDIO_MMD_VEND2, 0x8016},
+ { ETH_VR_MII_EEE_WKERR, MDIO_MMD_VEND2, 0x8017},
+ { ETH_VR_MII_MISC_STS, MDIO_MMD_VEND2, 0x8018},
+ { ETH_VR_MII_RX_LSTS, MDIO_MMD_VEND2, 0x8020},
+ { ETH_VR_MII_GEN2_GEN4_TX_BSTCTRL0, MDIO_MMD_VEND2, 0x8038},
+ { ETH_VR_MII_GEN2_GEN4_TX_LVLCTRL0, MDIO_MMD_VEND2, 0x803A},
+ { ETH_VR_MII_GEN2_GEN4_TXGENCTRL0, MDIO_MMD_VEND2, 0x803C},
+ { ETH_VR_MII_GEN2_GEN4_TXGENCTRL1, MDIO_MMD_VEND2, 0x803D},
+ { ETH_VR_MII_GEN4_TXGENCTRL2, MDIO_MMD_VEND2, 0x803E},
+ { ETH_VR_MII_GEN2_GEN4_TX_STS, MDIO_MMD_VEND2, 0x8048},
+ { ETH_VR_MII_GEN2_GEN4_RXGENCTRL0, MDIO_MMD_VEND2, 0x8058},
+ { ETH_VR_MII_GEN2_GEN4_RXGENCTRL1, MDIO_MMD_VEND2, 0x8059},
+ { ETH_VR_MII_GEN4_RXEQ_CTRL, MDIO_MMD_VEND2, 0x805B},
+ { ETH_VR_MII_GEN4_RXLOS_CTRL0, MDIO_MMD_VEND2, 0x805D},
+ { ETH_VR_MII_GEN2_GEN4_MPLL_CTRL0, MDIO_MMD_VEND2, 0x8078},
+ { ETH_VR_MII_GEN2_GEN4_MPLL_CTRL1, MDIO_MMD_VEND2, 0x8079},
+ { ETH_VR_MII_GEN2_GEN4_MPLL_STS, MDIO_MMD_VEND2, 0x8088},
+ { ETH_VR_MII_GEN2_GEN4_LVL_CTRL, MDIO_MMD_VEND2, 0x8090},
+ { ETH_VR_MII_GEN4_MISC_CTRL2, MDIO_MMD_VEND2, 0x8093},
+ { ETH_VR_MII_GEN2_GEN4_MISC_CTRL0, MDIO_MMD_VEND2, 0x8099},
+ { ETH_VR_MII_GEN2_GEN4_MISC_CTRL1, MDIO_MMD_VEND2, 0x809A},
+ { ETH_VR_MII_SNPS_CR_CTRL, MDIO_MMD_VEND2, 0x80A0},
+ { ETH_VR_MII_SNPS_CR_ADDR, MDIO_MMD_VEND2, 0x80A1},
+ { ETH_VR_MII_SNPS_CR_DATA, MDIO_MMD_VEND2, 0x80A2},
+ { ETH_VR_MII_DIG_CTRL2, MDIO_MMD_VEND2, 0x80E1},
+ { ETH_VR_MII_DIG_ERRCNT, MDIO_MMD_VEND2, 0x80E2},
+ };
+
+ for (idx = 0; idx < ARRAY_SIZE(regs); idx++) {
+ val = lan743x_sgmii_read(adp, regs[idx].dev, regs[idx].addr);
+ if (val < 0)
+ rb[regs[idx].id] = 0xFFFF;
+ else
+ rb[regs[idx].id] = val;
+ }
+}
+
+static int lan743x_get_regs_len(struct net_device *dev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ u32 num_regs = MAX_LAN743X_ETH_COMMON_REGS;
+
+ if (adapter->is_sgmii_en)
+ num_regs += MAX_LAN743X_ETH_SGMII_REGS;
+
+ return num_regs * sizeof(u32);
+}
+
+static void lan743x_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ int regs_len;
+
+ regs_len = lan743x_get_regs_len(dev);
+ memset(p, 0, regs_len);
+
+ regs->version = LAN743X_ETH_REG_VERSION;
+ regs->len = regs_len;
+
+ lan743x_common_regs(dev, p);
+ p = (u32 *)p + MAX_LAN743X_ETH_COMMON_REGS;
+
+ if (adapter->is_sgmii_en) {
+ lan743x_sgmii_regs(dev, p);
+ p = (u32 *)p + MAX_LAN743X_ETH_SGMII_REGS;
+ }
+}
+
+static void lan743x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ struct lan743x_phy *phy = &adapter->phy;
+
+ if (phy->fc_request_control & FLOW_CTRL_TX)
+ pause->tx_pause = 1;
+ if (phy->fc_request_control & FLOW_CTRL_RX)
+ pause->rx_pause = 1;
+ pause->autoneg = phy->fc_autoneg;
+}
+
+static int lan743x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ struct lan743x_phy *phy = &adapter->phy;
+
+ if (!phydev)
+ return -ENODEV;
+
+ if (!phy_validate_pause(phydev, pause))
+ return -EINVAL;
+
+ phy->fc_request_control = 0;
+ if (pause->rx_pause)
+ phy->fc_request_control |= FLOW_CTRL_RX;
+
+ if (pause->tx_pause)
+ phy->fc_request_control |= FLOW_CTRL_TX;
+
+ phy->fc_autoneg = pause->autoneg;
+
+ if (pause->autoneg == AUTONEG_DISABLE)
+ lan743x_mac_flow_ctrl_set_enables(adapter, pause->tx_pause,
+ pause->rx_pause);
+ else
+ phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
+
+ return 0;
+}
+
+const struct ethtool_ops lan743x_ethtool_ops = {
+ .get_drvinfo = lan743x_ethtool_get_drvinfo,
+ .get_msglevel = lan743x_ethtool_get_msglevel,
+ .set_msglevel = lan743x_ethtool_set_msglevel,
+ .get_link = ethtool_op_get_link,
+
+ .get_eeprom_len = lan743x_ethtool_get_eeprom_len,
+ .get_eeprom = lan743x_ethtool_get_eeprom,
+ .set_eeprom = lan743x_ethtool_set_eeprom,
+ .get_strings = lan743x_ethtool_get_strings,
+ .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats,
+ .get_priv_flags = lan743x_ethtool_get_priv_flags,
+ .set_priv_flags = lan743x_ethtool_set_priv_flags,
+ .get_sset_count = lan743x_ethtool_get_sset_count,
+ .get_rxnfc = lan743x_ethtool_get_rxnfc,
+ .get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size,
+ .get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size,
+ .get_rxfh = lan743x_ethtool_get_rxfh,
+ .set_rxfh = lan743x_ethtool_set_rxfh,
+ .get_ts_info = lan743x_ethtool_get_ts_info,
+ .get_eee = lan743x_ethtool_get_eee,
+ .set_eee = lan743x_ethtool_set_eee,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = lan743x_get_regs_len,
+ .get_regs = lan743x_get_regs,
+ .get_pauseparam = lan743x_get_pauseparam,
+ .set_pauseparam = lan743x_set_pauseparam,
+#ifdef CONFIG_PM
+ .get_wol = lan743x_ethtool_get_wol,
+ .set_wol = lan743x_ethtool_set_wol,
+#endif
+};
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h
new file mode 100644
index 0000000000..267d5035b8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#ifndef _LAN743X_ETHTOOL_H
+#define _LAN743X_ETHTOOL_H
+
+#include "linux/ethtool.h"
+
+#define LAN743X_ETH_REG_VERSION 1
+
+enum {
+ ETH_PRIV_FLAGS,
+ ETH_ID_REV,
+ ETH_FPGA_REV,
+ ETH_STRAP_READ,
+ ETH_INT_STS,
+ ETH_HW_CFG,
+ ETH_PMT_CTL,
+ ETH_E2P_CMD,
+ ETH_E2P_DATA,
+ ETH_MAC_CR,
+ ETH_MAC_RX,
+ ETH_MAC_TX,
+ ETH_FLOW,
+ ETH_MII_ACC,
+ ETH_MII_DATA,
+ ETH_EEE_TX_LPI_REQ_DLY,
+ ETH_WUCSR,
+ ETH_WK_SRC,
+
+ /* Add new registers above */
+ MAX_LAN743X_ETH_COMMON_REGS
+};
+
+enum {
+ /* SGMII Register */
+ ETH_SR_VSMMD_DEV_ID1,
+ ETH_SR_VSMMD_DEV_ID2,
+ ETH_SR_VSMMD_PCS_ID1,
+ ETH_SR_VSMMD_PCS_ID2,
+ ETH_SR_VSMMD_STS,
+ ETH_SR_VSMMD_CTRL,
+ ETH_SR_MII_CTRL,
+ ETH_SR_MII_STS,
+ ETH_SR_MII_DEV_ID1,
+ ETH_SR_MII_DEV_ID2,
+ ETH_SR_MII_AN_ADV,
+ ETH_SR_MII_LP_BABL,
+ ETH_SR_MII_EXPN,
+ ETH_SR_MII_EXT_STS,
+ ETH_SR_MII_TIME_SYNC_ABL,
+ ETH_SR_MII_TIME_SYNC_TX_MAX_DLY_LWR,
+ ETH_SR_MII_TIME_SYNC_TX_MAX_DLY_UPR,
+ ETH_SR_MII_TIME_SYNC_TX_MIN_DLY_LWR,
+ ETH_SR_MII_TIME_SYNC_TX_MIN_DLY_UPR,
+ ETH_SR_MII_TIME_SYNC_RX_MAX_DLY_LWR,
+ ETH_SR_MII_TIME_SYNC_RX_MAX_DLY_UPR,
+ ETH_SR_MII_TIME_SYNC_RX_MIN_DLY_LWR,
+ ETH_SR_MII_TIME_SYNC_RX_MIN_DLY_UPR,
+ ETH_VR_MII_DIG_CTRL1,
+ ETH_VR_MII_AN_CTRL,
+ ETH_VR_MII_AN_INTR_STS,
+ ETH_VR_MII_TC,
+ ETH_VR_MII_DBG_CTRL,
+ ETH_VR_MII_EEE_MCTRL0,
+ ETH_VR_MII_EEE_TXTIMER,
+ ETH_VR_MII_EEE_RXTIMER,
+ ETH_VR_MII_LINK_TIMER_CTRL,
+ ETH_VR_MII_EEE_MCTRL1,
+ ETH_VR_MII_DIG_STS,
+ ETH_VR_MII_ICG_ERRCNT1,
+ ETH_VR_MII_GPIO,
+ ETH_VR_MII_EEE_LPI_STATUS,
+ ETH_VR_MII_EEE_WKERR,
+ ETH_VR_MII_MISC_STS,
+ ETH_VR_MII_RX_LSTS,
+ ETH_VR_MII_GEN2_GEN4_TX_BSTCTRL0,
+ ETH_VR_MII_GEN2_GEN4_TX_LVLCTRL0,
+ ETH_VR_MII_GEN2_GEN4_TXGENCTRL0,
+ ETH_VR_MII_GEN2_GEN4_TXGENCTRL1,
+ ETH_VR_MII_GEN4_TXGENCTRL2,
+ ETH_VR_MII_GEN2_GEN4_TX_STS,
+ ETH_VR_MII_GEN2_GEN4_RXGENCTRL0,
+ ETH_VR_MII_GEN2_GEN4_RXGENCTRL1,
+ ETH_VR_MII_GEN4_RXEQ_CTRL,
+ ETH_VR_MII_GEN4_RXLOS_CTRL0,
+ ETH_VR_MII_GEN2_GEN4_MPLL_CTRL0,
+ ETH_VR_MII_GEN2_GEN4_MPLL_CTRL1,
+ ETH_VR_MII_GEN2_GEN4_MPLL_STS,
+ ETH_VR_MII_GEN2_GEN4_LVL_CTRL,
+ ETH_VR_MII_GEN4_MISC_CTRL2,
+ ETH_VR_MII_GEN2_GEN4_MISC_CTRL0,
+ ETH_VR_MII_GEN2_GEN4_MISC_CTRL1,
+ ETH_VR_MII_SNPS_CR_CTRL,
+ ETH_VR_MII_SNPS_CR_ADDR,
+ ETH_VR_MII_SNPS_CR_DATA,
+ ETH_VR_MII_DIG_CTRL2,
+ ETH_VR_MII_DIG_ERRCNT,
+
+ /* Add new registers above */
+ MAX_LAN743X_ETH_SGMII_REGS
+};
+
+extern const struct ethtool_ops lan743x_ethtool_ops;
+
+#endif /* _LAN743X_ETHTOOL_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
new file mode 100644
index 0000000000..c81cdeb4d4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -0,0 +1,3702 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/microchipphy.h>
+#include <linux/net_tstamp.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/rtnetlink.h>
+#include <linux/iopoll.h>
+#include <linux/crc16.h>
+#include "lan743x_main.h"
+#include "lan743x_ethtool.h"
+
+#define MMD_ACCESS_ADDRESS 0
+#define MMD_ACCESS_WRITE 1
+#define MMD_ACCESS_READ 2
+#define MMD_ACCESS_READ_INC 3
+#define PCS_POWER_STATE_DOWN 0x6
+#define PCS_POWER_STATE_UP 0x4
+
+static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
+{
+ u32 chip_rev;
+ u32 cfg_load;
+ u32 hw_cfg;
+ u32 strap;
+ int ret;
+
+ /* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */
+ ret = lan743x_hs_syslock_acquire(adapter, 100);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Sys Lock acquire failed ret:%d\n", ret);
+ return;
+ }
+
+ cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG);
+ lan743x_hs_syslock_release(adapter);
+ hw_cfg = lan743x_csr_read(adapter, HW_CFG);
+
+ if (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ ||
+ hw_cfg & HW_CFG_RST_PROTECT_) {
+ strap = lan743x_csr_read(adapter, STRAP_READ);
+ if (strap & STRAP_READ_SGMII_EN_)
+ adapter->is_sgmii_en = true;
+ else
+ adapter->is_sgmii_en = false;
+ } else {
+ chip_rev = lan743x_csr_read(adapter, FPGA_REV);
+ if (chip_rev) {
+ if (chip_rev & FPGA_SGMII_OP)
+ adapter->is_sgmii_en = true;
+ else
+ adapter->is_sgmii_en = false;
+ } else {
+ adapter->is_sgmii_en = false;
+ }
+ }
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis");
+}
+
+static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
+{
+ struct lan743x_csr *csr = &adapter->csr;
+ u32 id_rev = csr->id_rev;
+
+ if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) ||
+ ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) {
+ return true;
+ }
+ return false;
+}
+
+static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
+{
+ pci_release_selected_regions(adapter->pdev,
+ pci_select_bars(adapter->pdev,
+ IORESOURCE_MEM));
+ pci_disable_device(adapter->pdev);
+}
+
+static int lan743x_pci_init(struct lan743x_adapter *adapter,
+ struct pci_dev *pdev)
+{
+ unsigned long bars = 0;
+ int ret;
+
+ adapter->pdev = pdev;
+ ret = pci_enable_device_mem(pdev);
+ if (ret)
+ goto return_error;
+
+ netif_info(adapter, probe, adapter->netdev,
+ "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
+ pdev->vendor, pdev->device);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (!test_bit(0, &bars))
+ goto disable_device;
+
+ ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
+ if (ret)
+ goto disable_device;
+
+ pci_set_master(pdev);
+ return 0;
+
+disable_device:
+ pci_disable_device(adapter->pdev);
+
+return_error:
+ return ret;
+}
+
+u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
+{
+ return ioread32(&adapter->csr.csr_address[offset]);
+}
+
+void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
+ u32 data)
+{
+ iowrite32(data, &adapter->csr.csr_address[offset]);
+}
+
+#define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset)
+
+static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
+{
+ u32 data;
+
+ data = lan743x_csr_read(adapter, HW_CFG);
+ data |= HW_CFG_LRST_;
+ lan743x_csr_write(adapter, HW_CFG, data);
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
+ !(data & HW_CFG_LRST_), 100000, 10000000);
+}
+
+static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter,
+ int offset, u32 bit_mask,
+ int target_value, int udelay_min,
+ int udelay_max, int count)
+{
+ u32 data;
+
+ return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data,
+ target_value == !!(data & bit_mask),
+ udelay_max, udelay_min * count);
+}
+
+static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
+ int offset, u32 bit_mask,
+ int target_value, int usleep_min,
+ int usleep_max, int count)
+{
+ u32 data;
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
+ target_value == !!(data & bit_mask),
+ usleep_max, usleep_min * count);
+}
+
+static int lan743x_csr_init(struct lan743x_adapter *adapter)
+{
+ struct lan743x_csr *csr = &adapter->csr;
+ resource_size_t bar_start, bar_length;
+
+ bar_start = pci_resource_start(adapter->pdev, 0);
+ bar_length = pci_resource_len(adapter->pdev, 0);
+ csr->csr_address = devm_ioremap(&adapter->pdev->dev,
+ bar_start, bar_length);
+ if (!csr->csr_address)
+ return -ENOMEM;
+
+ csr->id_rev = lan743x_csr_read(adapter, ID_REV);
+ csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
+ netif_info(adapter, probe, adapter->netdev,
+ "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
+ csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev),
+ FPGA_REV_GET_MINOR_(csr->fpga_rev));
+ if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev))
+ return -ENODEV;
+
+ csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
+ switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
+ case ID_REV_CHIP_REV_A0_:
+ csr->flags |= LAN743X_CSR_FLAG_IS_A0;
+ csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
+ break;
+ case ID_REV_CHIP_REV_B0_:
+ csr->flags |= LAN743X_CSR_FLAG_IS_B0;
+ break;
+ }
+
+ return lan743x_csr_light_reset(adapter);
+}
+
+static void lan743x_intr_software_isr(struct lan743x_adapter *adapter)
+{
+ struct lan743x_intr *intr = &adapter->intr;
+
+ /* disable the interrupt to prevent repeated re-triggering */
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
+ intr->software_isr_flag = true;
+ wake_up(&intr->software_isr_wq);
+}
+
+static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
+{
+ struct lan743x_tx *tx = context;
+ struct lan743x_adapter *adapter = tx->adapter;
+ bool enable_flag = true;
+
+ lan743x_csr_read(adapter, INT_EN_SET);
+ if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
+ lan743x_csr_write(adapter, INT_EN_CLR,
+ INT_BIT_DMA_TX_(tx->channel_number));
+ }
+
+ if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
+ u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
+ u32 dmac_int_sts;
+ u32 dmac_int_en;
+
+ if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
+ dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
+ else
+ dmac_int_sts = ioc_bit;
+ if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
+ dmac_int_en = lan743x_csr_read(adapter,
+ DMAC_INT_EN_SET);
+ else
+ dmac_int_en = ioc_bit;
+
+ dmac_int_en &= ioc_bit;
+ dmac_int_sts &= dmac_int_en;
+ if (dmac_int_sts & ioc_bit) {
+ napi_schedule(&tx->napi);
+ enable_flag = false;/* poll func will enable later */
+ }
+ }
+
+ if (enable_flag)
+ /* enable isr */
+ lan743x_csr_write(adapter, INT_EN_SET,
+ INT_BIT_DMA_TX_(tx->channel_number));
+}
+
+static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
+{
+ struct lan743x_rx *rx = context;
+ struct lan743x_adapter *adapter = rx->adapter;
+ bool enable_flag = true;
+
+ if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
+ lan743x_csr_write(adapter, INT_EN_CLR,
+ INT_BIT_DMA_RX_(rx->channel_number));
+ }
+
+ if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
+ u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
+ u32 dmac_int_sts;
+ u32 dmac_int_en;
+
+ if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
+ dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
+ else
+ dmac_int_sts = rx_frame_bit;
+ if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
+ dmac_int_en = lan743x_csr_read(adapter,
+ DMAC_INT_EN_SET);
+ else
+ dmac_int_en = rx_frame_bit;
+
+ dmac_int_en &= rx_frame_bit;
+ dmac_int_sts &= dmac_int_en;
+ if (dmac_int_sts & rx_frame_bit) {
+ napi_schedule(&rx->napi);
+ enable_flag = false;/* poll funct will enable later */
+ }
+ }
+
+ if (enable_flag) {
+ /* enable isr */
+ lan743x_csr_write(adapter, INT_EN_SET,
+ INT_BIT_DMA_RX_(rx->channel_number));
+ }
+}
+
+static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
+{
+ struct lan743x_adapter *adapter = context;
+ unsigned int channel;
+
+ if (int_sts & INT_BIT_ALL_RX_) {
+ for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
+ channel++) {
+ u32 int_bit = INT_BIT_DMA_RX_(channel);
+
+ if (int_sts & int_bit) {
+ lan743x_rx_isr(&adapter->rx[channel],
+ int_bit, flags);
+ int_sts &= ~int_bit;
+ }
+ }
+ }
+ if (int_sts & INT_BIT_ALL_TX_) {
+ for (channel = 0; channel < adapter->used_tx_channels;
+ channel++) {
+ u32 int_bit = INT_BIT_DMA_TX_(channel);
+
+ if (int_sts & int_bit) {
+ lan743x_tx_isr(&adapter->tx[channel],
+ int_bit, flags);
+ int_sts &= ~int_bit;
+ }
+ }
+ }
+ if (int_sts & INT_BIT_ALL_OTHER_) {
+ if (int_sts & INT_BIT_SW_GP_) {
+ lan743x_intr_software_isr(adapter);
+ int_sts &= ~INT_BIT_SW_GP_;
+ }
+ if (int_sts & INT_BIT_1588_) {
+ lan743x_ptp_isr(adapter);
+ int_sts &= ~INT_BIT_1588_;
+ }
+ }
+ if (int_sts)
+ lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
+}
+
+static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
+{
+ struct lan743x_vector *vector = ptr;
+ struct lan743x_adapter *adapter = vector->adapter;
+ irqreturn_t result = IRQ_NONE;
+ u32 int_enables;
+ u32 int_sts;
+
+ if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
+ int_sts = lan743x_csr_read(adapter, INT_STS);
+ } else if (vector->flags &
+ (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
+ int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
+ } else {
+ /* use mask as implied status */
+ int_sts = vector->int_mask | INT_BIT_MAS_;
+ }
+
+ if (!(int_sts & INT_BIT_MAS_))
+ goto irq_done;
+
+ if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
+ /* disable vector interrupt */
+ lan743x_csr_write(adapter,
+ INT_VEC_EN_CLR,
+ INT_VEC_EN_(vector->vector_index));
+
+ if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
+ /* disable master interrupt */
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
+
+ if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
+ int_enables = lan743x_csr_read(adapter, INT_EN_SET);
+ } else {
+ /* use vector mask as implied enable mask */
+ int_enables = vector->int_mask;
+ }
+
+ int_sts &= int_enables;
+ int_sts &= vector->int_mask;
+ if (int_sts) {
+ if (vector->handler) {
+ vector->handler(vector->context,
+ int_sts, vector->flags);
+ } else {
+ /* disable interrupts on this vector */
+ lan743x_csr_write(adapter, INT_EN_CLR,
+ vector->int_mask);
+ }
+ result = IRQ_HANDLED;
+ }
+
+ if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
+ /* enable master interrupt */
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
+
+ if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
+ /* enable vector interrupt */
+ lan743x_csr_write(adapter,
+ INT_VEC_EN_SET,
+ INT_VEC_EN_(vector->vector_index));
+irq_done:
+ return result;
+}
+
+static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
+{
+ struct lan743x_intr *intr = &adapter->intr;
+ int ret;
+
+ intr->software_isr_flag = false;
+
+ /* enable and activate test interrupt */
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
+ lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
+
+ ret = wait_event_timeout(intr->software_isr_wq,
+ intr->software_isr_flag,
+ msecs_to_jiffies(200));
+
+ /* disable test interrupt */
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
+
+ return ret > 0 ? 0 : -ENODEV;
+}
+
+static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
+ int vector_index, u32 flags,
+ u32 int_mask,
+ lan743x_vector_handler handler,
+ void *context)
+{
+ struct lan743x_vector *vector = &adapter->intr.vector_list
+ [vector_index];
+ int ret;
+
+ vector->adapter = adapter;
+ vector->flags = flags;
+ vector->vector_index = vector_index;
+ vector->int_mask = int_mask;
+ vector->handler = handler;
+ vector->context = context;
+
+ ret = request_irq(vector->irq,
+ lan743x_intr_entry_isr,
+ (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
+ IRQF_SHARED : 0, DRIVER_NAME, vector);
+ if (ret) {
+ vector->handler = NULL;
+ vector->context = NULL;
+ vector->int_mask = 0;
+ vector->flags = 0;
+ }
+ return ret;
+}
+
+static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
+ int vector_index)
+{
+ struct lan743x_vector *vector = &adapter->intr.vector_list
+ [vector_index];
+
+ free_irq(vector->irq, vector);
+ vector->handler = NULL;
+ vector->context = NULL;
+ vector->int_mask = 0;
+ vector->flags = 0;
+}
+
+static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
+ u32 int_mask)
+{
+ int index;
+
+ for (index = 0; index < adapter->max_vector_count; index++) {
+ if (adapter->intr.vector_list[index].int_mask & int_mask)
+ return adapter->intr.vector_list[index].flags;
+ }
+ return 0;
+}
+
+static void lan743x_intr_close(struct lan743x_adapter *adapter)
+{
+ struct lan743x_intr *intr = &adapter->intr;
+ int index = 0;
+
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
+ if (adapter->is_pci11x1x)
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF);
+ else
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
+
+ for (index = 0; index < intr->number_of_vectors; index++) {
+ if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
+ lan743x_intr_unregister_isr(adapter, index);
+ intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
+ }
+ }
+
+ if (intr->flags & INTR_FLAG_MSI_ENABLED) {
+ pci_disable_msi(adapter->pdev);
+ intr->flags &= ~INTR_FLAG_MSI_ENABLED;
+ }
+
+ if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
+ pci_disable_msix(adapter->pdev);
+ intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
+ }
+}
+
+static int lan743x_intr_open(struct lan743x_adapter *adapter)
+{
+ struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT];
+ struct lan743x_intr *intr = &adapter->intr;
+ unsigned int used_tx_channels;
+ u32 int_vec_en_auto_clr = 0;
+ u8 max_vector_count;
+ u32 int_vec_map0 = 0;
+ u32 int_vec_map1 = 0;
+ int ret = -ENODEV;
+ int index = 0;
+ u32 flags = 0;
+
+ intr->number_of_vectors = 0;
+
+ /* Try to set up MSIX interrupts */
+ max_vector_count = adapter->max_vector_count;
+ memset(&msix_entries[0], 0,
+ sizeof(struct msix_entry) * max_vector_count);
+ for (index = 0; index < max_vector_count; index++)
+ msix_entries[index].entry = index;
+ used_tx_channels = adapter->used_tx_channels;
+ ret = pci_enable_msix_range(adapter->pdev,
+ msix_entries, 1,
+ 1 + used_tx_channels +
+ LAN743X_USED_RX_CHANNELS);
+
+ if (ret > 0) {
+ intr->flags |= INTR_FLAG_MSIX_ENABLED;
+ intr->number_of_vectors = ret;
+ intr->using_vectors = true;
+ for (index = 0; index < intr->number_of_vectors; index++)
+ intr->vector_list[index].irq = msix_entries
+ [index].vector;
+ netif_info(adapter, ifup, adapter->netdev,
+ "using MSIX interrupts, number of vectors = %d\n",
+ intr->number_of_vectors);
+ }
+
+ /* If MSIX failed try to setup using MSI interrupts */
+ if (!intr->number_of_vectors) {
+ if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
+ if (!pci_enable_msi(adapter->pdev)) {
+ intr->flags |= INTR_FLAG_MSI_ENABLED;
+ intr->number_of_vectors = 1;
+ intr->using_vectors = true;
+ intr->vector_list[0].irq =
+ adapter->pdev->irq;
+ netif_info(adapter, ifup, adapter->netdev,
+ "using MSI interrupts, number of vectors = %d\n",
+ intr->number_of_vectors);
+ }
+ }
+ }
+
+ /* If MSIX, and MSI failed, setup using legacy interrupt */
+ if (!intr->number_of_vectors) {
+ intr->number_of_vectors = 1;
+ intr->using_vectors = false;
+ intr->vector_list[0].irq = intr->irq;
+ netif_info(adapter, ifup, adapter->netdev,
+ "using legacy interrupts\n");
+ }
+
+ /* At this point we must have at least one irq */
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
+
+ /* map all interrupts to vector 0 */
+ lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
+ lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
+ lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
+ flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
+ LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
+
+ if (intr->using_vectors) {
+ flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
+ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
+ } else {
+ flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
+ LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
+ LAN743X_VECTOR_FLAG_IRQ_SHARED;
+ }
+
+ if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
+ flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
+ flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
+ flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
+ flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
+ flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
+ flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
+ }
+
+ init_waitqueue_head(&intr->software_isr_wq);
+
+ ret = lan743x_intr_register_isr(adapter, 0, flags,
+ INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
+ INT_BIT_ALL_OTHER_,
+ lan743x_intr_shared_isr, adapter);
+ if (ret)
+ goto clean_up;
+ intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
+
+ if (intr->using_vectors)
+ lan743x_csr_write(adapter, INT_VEC_EN_SET,
+ INT_VEC_EN_(0));
+
+ if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
+ lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
+ if (adapter->is_pci11x1x) {
+ lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654);
+ lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210);
+ } else {
+ lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
+ lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
+ }
+ lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
+ }
+
+ /* enable interrupts */
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
+ ret = lan743x_intr_test_isr(adapter);
+ if (ret)
+ goto clean_up;
+
+ if (intr->number_of_vectors > 1) {
+ int number_of_tx_vectors = intr->number_of_vectors - 1;
+
+ if (number_of_tx_vectors > used_tx_channels)
+ number_of_tx_vectors = used_tx_channels;
+ flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
+ LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
+ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
+ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
+
+ if (adapter->csr.flags &
+ LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
+ flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
+ LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
+ }
+
+ for (index = 0; index < number_of_tx_vectors; index++) {
+ u32 int_bit = INT_BIT_DMA_TX_(index);
+ int vector = index + 1;
+
+ /* map TX interrupt to vector */
+ int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
+ lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
+
+ /* Remove TX interrupt from shared mask */
+ intr->vector_list[0].int_mask &= ~int_bit;
+ ret = lan743x_intr_register_isr(adapter, vector, flags,
+ int_bit, lan743x_tx_isr,
+ &adapter->tx[index]);
+ if (ret)
+ goto clean_up;
+ intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
+ if (!(flags &
+ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
+ lan743x_csr_write(adapter, INT_VEC_EN_SET,
+ INT_VEC_EN_(vector));
+ }
+ }
+ if ((intr->number_of_vectors - used_tx_channels) > 1) {
+ int number_of_rx_vectors = intr->number_of_vectors -
+ used_tx_channels - 1;
+
+ if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
+ number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
+
+ flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
+ LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
+ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
+ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
+
+ if (adapter->csr.flags &
+ LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
+ flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
+ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
+ LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
+ }
+ for (index = 0; index < number_of_rx_vectors; index++) {
+ int vector = index + 1 + used_tx_channels;
+ u32 int_bit = INT_BIT_DMA_RX_(index);
+
+ /* map RX interrupt to vector */
+ int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
+ lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
+ if (flags &
+ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
+ int_vec_en_auto_clr |= INT_VEC_EN_(vector);
+ lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
+ int_vec_en_auto_clr);
+ }
+
+ /* Remove RX interrupt from shared mask */
+ intr->vector_list[0].int_mask &= ~int_bit;
+ ret = lan743x_intr_register_isr(adapter, vector, flags,
+ int_bit, lan743x_rx_isr,
+ &adapter->rx[index]);
+ if (ret)
+ goto clean_up;
+ intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
+
+ lan743x_csr_write(adapter, INT_VEC_EN_SET,
+ INT_VEC_EN_(vector));
+ }
+ }
+ return 0;
+
+clean_up:
+ lan743x_intr_close(adapter);
+ return ret;
+}
+
+static int lan743x_dp_write(struct lan743x_adapter *adapter,
+ u32 select, u32 addr, u32 length, u32 *buf)
+{
+ u32 dp_sel;
+ int i;
+
+ if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_,
+ 1, 40, 100, 100))
+ return -EIO;
+ dp_sel = lan743x_csr_read(adapter, DP_SEL);
+ dp_sel &= ~DP_SEL_MASK_;
+ dp_sel |= select;
+ lan743x_csr_write(adapter, DP_SEL, dp_sel);
+
+ for (i = 0; i < length; i++) {
+ lan743x_csr_write(adapter, DP_ADDR, addr + i);
+ lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
+ lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
+ if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL,
+ DP_SEL_DPRDY_,
+ 1, 40, 100, 100))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
+{
+ u32 ret;
+
+ ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
+ MAC_MII_ACC_PHY_ADDR_MASK_;
+ ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
+ MAC_MII_ACC_MIIRINDA_MASK_;
+
+ if (read)
+ ret |= MAC_MII_ACC_MII_READ_;
+ else
+ ret |= MAC_MII_ACC_MII_WRITE_;
+ ret |= MAC_MII_ACC_MII_BUSY_;
+
+ return ret;
+}
+
+static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
+{
+ u32 data;
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
+ !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
+}
+
+static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 val, mii_access;
+ int ret;
+
+ /* comfirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* set the address, index & direction (read from PHY) */
+ mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ val = lan743x_csr_read(adapter, MAC_MII_DATA);
+ return (int)(val & 0xFFFF);
+}
+
+static int lan743x_mdiobus_write_c22(struct mii_bus *bus,
+ int phy_id, int index, u16 regval)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 val, mii_access;
+ int ret;
+
+ /* confirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ val = (u32)regval;
+ lan743x_csr_write(adapter, MAC_MII_DATA, val);
+
+ /* set the address, index & direction (write to PHY) */
+ mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ return ret;
+}
+
+static u32 lan743x_mac_mmd_access(int id, int dev_addr, int op)
+{
+ u32 ret;
+
+ ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
+ MAC_MII_ACC_PHY_ADDR_MASK_;
+ ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
+ MAC_MII_ACC_MIIMMD_MASK_;
+ if (op == MMD_ACCESS_WRITE)
+ ret |= MAC_MII_ACC_MIICMD_WRITE_;
+ else if (op == MMD_ACCESS_READ)
+ ret |= MAC_MII_ACC_MIICMD_READ_;
+ else if (op == MMD_ACCESS_READ_INC)
+ ret |= MAC_MII_ACC_MIICMD_READ_INC_;
+ else
+ ret |= MAC_MII_ACC_MIICMD_ADDR_;
+ ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_);
+
+ return ret;
+}
+
+static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id,
+ int dev_addr, int index)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 mmd_access;
+ int ret;
+
+ /* comfirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, index);
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* Read Data */
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_READ);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_csr_read(adapter, MAC_MII_DATA);
+ return (int)(ret & 0xFFFF);
+}
+
+static int lan743x_mdiobus_write_c45(struct mii_bus *bus, int phy_id,
+ int dev_addr, int index, u16 regval)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 mmd_access;
+ int ret;
+
+ /* confirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)index);
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* Write Data */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_WRITE);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+
+ return lan743x_mac_mii_wait_till_not_busy(adapter);
+}
+
+static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
+{
+ u32 data;
+ int ret;
+
+ ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data,
+ !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "%s: error %d sgmii wait timeout\n", __func__, ret);
+
+ return ret;
+}
+
+int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr)
+{
+ u32 mmd_access;
+ int ret;
+ u32 val;
+
+ if (mmd > 31) {
+ netif_err(adapter, probe, adapter->netdev,
+ "%s mmd should <= 31\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&adapter->sgmii_rw_lock);
+ /* Load Register Address */
+ mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
+ mmd_access |= (addr | SGMII_ACC_SGMII_BZY_);
+ lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
+ ret = lan743x_sgmii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ goto sgmii_unlock;
+
+ val = lan743x_csr_read(adapter, SGMII_DATA);
+ ret = (int)(val & SGMII_DATA_MASK_);
+
+sgmii_unlock:
+ mutex_unlock(&adapter->sgmii_rw_lock);
+
+ return ret;
+}
+
+static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
+ u8 mmd, u16 addr, u16 val)
+{
+ u32 mmd_access;
+ int ret;
+
+ if (mmd > 31) {
+ netif_err(adapter, probe, adapter->netdev,
+ "%s mmd should <= 31\n", __func__);
+ return -EINVAL;
+ }
+ mutex_lock(&adapter->sgmii_rw_lock);
+ /* Load Register Data */
+ lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_));
+ /* Load Register Address */
+ mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
+ mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_);
+ lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
+ ret = lan743x_sgmii_wait_till_not_busy(adapter);
+ mutex_unlock(&adapter->sgmii_rw_lock);
+
+ return ret;
+}
+
+static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
+ u16 baud)
+{
+ int mpllctrl0;
+ int mpllctrl1;
+ int miscctrl1;
+ int ret;
+
+ mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL0);
+ if (mpllctrl0 < 0)
+ return mpllctrl0;
+
+ mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_;
+ if (baud == VR_MII_BAUD_RATE_1P25GBPS) {
+ mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100;
+ /* mpll_baud_clk/4 */
+ miscctrl1 = 0xA;
+ } else {
+ mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125;
+ /* mpll_baud_clk/2 */
+ miscctrl1 = 0x5;
+ }
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1);
+ if (ret < 0)
+ return ret;
+
+ return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MISC_CTRL1, miscctrl1);
+}
+
+static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
+ bool enable)
+{
+ if (enable)
+ return lan743x_sgmii_mpll_set(adapter,
+ VR_MII_BAUD_RATE_3P125GBPS);
+ else
+ return lan743x_sgmii_mpll_set(adapter,
+ VR_MII_BAUD_RATE_1P25GBPS);
+}
+
+static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
+ bool *status)
+{
+ int ret;
+
+ ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
+ ret == VR_MII_MPLL_MULTIPLIER_50)
+ *status = true;
+ else
+ *status = false;
+
+ return 0;
+}
+
+static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
+{
+ enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
+ int mii_ctrl;
+ int dgt_ctrl;
+ int an_ctrl;
+ int ret;
+
+ if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE)
+ /* Switch to 2.5 Gbps */
+ ret = lan743x_sgmii_2_5G_mode_set(adapter, true);
+ else
+ /* Switch to 10/100/1000 Mbps clock */
+ ret = lan743x_sgmii_2_5G_mode_set(adapter, false);
+ if (ret < 0)
+ return ret;
+
+ /* Enable SGMII Auto NEG */
+ mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
+ if (mii_ctrl < 0)
+ return mii_ctrl;
+
+ an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL);
+ if (an_ctrl < 0)
+ return an_ctrl;
+
+ dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_CTRL1);
+ if (dgt_ctrl < 0)
+ return dgt_ctrl;
+
+ if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) {
+ mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100);
+ mii_ctrl |= BMCR_SPEED1000;
+ dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
+ dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
+ /* In order for Auto-Negotiation to operate properly at
+ * 2.5 Gbps the 1.6ms link timer values must be adjusted
+ * The VR_MII_LINK_TIMER_CTRL Register must be set to
+ * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the
+ * VR_MII_DIG_CTRL1 Register set to 1
+ */
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_LINK_TIMER_CTRL, 0x7A1);
+ if (ret < 0)
+ return ret;
+ } else {
+ mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_;
+ dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
+ dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
+ }
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR,
+ mii_ctrl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_CTRL1, dgt_ctrl);
+ if (ret < 0)
+ return ret;
+
+ return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_AN_CTRL, an_ctrl);
+}
+
+static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
+{
+ u8 wait_cnt = 0;
+ u32 dig_sts;
+
+ do {
+ dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_STS);
+ if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >>
+ VR_MII_DIG_STS_PSEQ_STATE_POS_) == state)
+ break;
+ usleep_range(1000, 2000);
+ } while (wait_cnt++ < 10);
+
+ if (wait_cnt >= 10)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct phy_device *phydev = netdev->phydev;
+ enum lan743x_sgmii_lsd lsd = POWER_DOWN;
+ int mii_ctl;
+ bool status;
+ int ret;
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
+ lsd = LINK_2500_MASTER;
+ else
+ lsd = LINK_2500_SLAVE;
+ break;
+ case SPEED_1000:
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
+ lsd = LINK_1000_MASTER;
+ else
+ lsd = LINK_1000_SLAVE;
+ break;
+ case SPEED_100:
+ if (phydev->duplex)
+ lsd = LINK_100FD;
+ else
+ lsd = LINK_100HD;
+ break;
+ case SPEED_10:
+ if (phydev->duplex)
+ lsd = LINK_10FD;
+ else
+ lsd = LINK_10HD;
+ break;
+ default:
+ netif_err(adapter, drv, adapter->netdev,
+ "Invalid speed %d\n", phydev->speed);
+ return -EINVAL;
+ }
+
+ adapter->sgmii_lsd = lsd;
+ ret = lan743x_sgmii_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d SGMII cfg failed\n", ret);
+ return ret;
+ }
+
+ ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "erro %d SGMII get mode failed\n", ret);
+ return ret;
+ }
+
+ if (status)
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII 2.5G mode enable\n");
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII 1G mode enable\n");
+
+ /* SGMII/1000/2500BASE-X PCS power down */
+ mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
+ if (mii_ctl < 0)
+ return mii_ctl;
+
+ mii_ctl |= BMCR_PDOWN;
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN);
+ if (ret < 0)
+ return ret;
+
+ /* SGMII/1000/2500BASE-X PCS power up */
+ mii_ctl &= ~BMCR_PDOWN;
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
+ u8 *addr)
+{
+ u32 addr_lo, addr_hi;
+
+ addr_lo = addr[0] |
+ addr[1] << 8 |
+ addr[2] << 16 |
+ addr[3] << 24;
+ addr_hi = addr[4] |
+ addr[5] << 8;
+ lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
+ lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
+
+ ether_addr_copy(adapter->mac_address, addr);
+ netif_info(adapter, drv, adapter->netdev,
+ "MAC address set to %pM\n", addr);
+}
+
+static int lan743x_mac_init(struct lan743x_adapter *adapter)
+{
+ bool mac_address_valid = true;
+ struct net_device *netdev;
+ u32 mac_addr_hi = 0;
+ u32 mac_addr_lo = 0;
+ u32 data;
+
+ netdev = adapter->netdev;
+
+ /* disable auto duplex, and speed detection. Phylib does that */
+ data = lan743x_csr_read(adapter, MAC_CR);
+ data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_);
+ data |= MAC_CR_CNTR_RST_;
+ lan743x_csr_write(adapter, MAC_CR, data);
+
+ if (!is_valid_ether_addr(adapter->mac_address)) {
+ mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
+ mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
+ adapter->mac_address[0] = mac_addr_lo & 0xFF;
+ adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
+ adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
+ adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
+ adapter->mac_address[4] = mac_addr_hi & 0xFF;
+ adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
+
+ if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
+ mac_addr_lo == 0xFFFFFFFF) {
+ mac_address_valid = false;
+ } else if (!is_valid_ether_addr(adapter->mac_address)) {
+ mac_address_valid = false;
+ }
+
+ if (!mac_address_valid)
+ eth_random_addr(adapter->mac_address);
+ }
+ lan743x_mac_set_address(adapter, adapter->mac_address);
+ eth_hw_addr_set(netdev, adapter->mac_address);
+
+ return 0;
+}
+
+static int lan743x_mac_open(struct lan743x_adapter *adapter)
+{
+ u32 temp;
+
+ temp = lan743x_csr_read(adapter, MAC_RX);
+ lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
+ temp = lan743x_csr_read(adapter, MAC_TX);
+ lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
+ return 0;
+}
+
+static void lan743x_mac_close(struct lan743x_adapter *adapter)
+{
+ u32 temp;
+
+ temp = lan743x_csr_read(adapter, MAC_TX);
+ temp &= ~MAC_TX_TXEN_;
+ lan743x_csr_write(adapter, MAC_TX, temp);
+ lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
+ 1, 1000, 20000, 100);
+
+ temp = lan743x_csr_read(adapter, MAC_RX);
+ temp &= ~MAC_RX_RXEN_;
+ lan743x_csr_write(adapter, MAC_RX, temp);
+ lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
+ 1, 1000, 20000, 100);
+}
+
+void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
+ bool tx_enable, bool rx_enable)
+{
+ u32 flow_setting = 0;
+
+ /* set maximum pause time because when fifo space frees
+ * up a zero value pause frame will be sent to release the pause
+ */
+ flow_setting = MAC_FLOW_CR_FCPT_MASK_;
+ if (tx_enable)
+ flow_setting |= MAC_FLOW_CR_TX_FCEN_;
+ if (rx_enable)
+ flow_setting |= MAC_FLOW_CR_RX_FCEN_;
+ lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
+}
+
+static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
+{
+ int enabled = 0;
+ u32 mac_rx = 0;
+
+ mac_rx = lan743x_csr_read(adapter, MAC_RX);
+ if (mac_rx & MAC_RX_RXEN_) {
+ enabled = 1;
+ if (mac_rx & MAC_RX_RXD_) {
+ lan743x_csr_write(adapter, MAC_RX, mac_rx);
+ mac_rx &= ~MAC_RX_RXD_;
+ }
+ mac_rx &= ~MAC_RX_RXEN_;
+ lan743x_csr_write(adapter, MAC_RX, mac_rx);
+ lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
+ 1, 1000, 20000, 100);
+ lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
+ }
+
+ mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
+ mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
+ << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
+ lan743x_csr_write(adapter, MAC_RX, mac_rx);
+
+ if (enabled) {
+ mac_rx |= MAC_RX_RXEN_;
+ lan743x_csr_write(adapter, MAC_RX, mac_rx);
+ }
+ return 0;
+}
+
+/* PHY */
+static int lan743x_phy_reset(struct lan743x_adapter *adapter)
+{
+ u32 data;
+
+ /* Only called with in probe, and before mdiobus_register */
+
+ data = lan743x_csr_read(adapter, PMT_CTL);
+ data |= PMT_CTL_ETH_PHY_RST_;
+ lan743x_csr_write(adapter, PMT_CTL, data);
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
+ (!(data & PMT_CTL_ETH_PHY_RST_) &&
+ (data & PMT_CTL_READY_)),
+ 50000, 1000000);
+}
+
+static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
+ u16 local_adv, u16 remote_adv)
+{
+ struct lan743x_phy *phy = &adapter->phy;
+ u8 cap;
+
+ if (phy->fc_autoneg)
+ cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
+ else
+ cap = phy->fc_request_control;
+
+ lan743x_mac_flow_ctrl_set_enables(adapter,
+ cap & FLOW_CTRL_TX,
+ cap & FLOW_CTRL_RX);
+}
+
+static int lan743x_phy_init(struct lan743x_adapter *adapter)
+{
+ return lan743x_phy_reset(adapter);
+}
+
+static void lan743x_phy_link_status_change(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u32 data;
+
+ phy_print_status(phydev);
+ if (phydev->state == PHY_RUNNING) {
+ int remote_advertisement = 0;
+ int local_advertisement = 0;
+
+ data = lan743x_csr_read(adapter, MAC_CR);
+
+ /* set duplex mode */
+ if (phydev->duplex)
+ data |= MAC_CR_DPX_;
+ else
+ data &= ~MAC_CR_DPX_;
+
+ /* set bus speed */
+ switch (phydev->speed) {
+ case SPEED_10:
+ data &= ~MAC_CR_CFG_H_;
+ data &= ~MAC_CR_CFG_L_;
+ break;
+ case SPEED_100:
+ data &= ~MAC_CR_CFG_H_;
+ data |= MAC_CR_CFG_L_;
+ break;
+ case SPEED_1000:
+ data |= MAC_CR_CFG_H_;
+ data &= ~MAC_CR_CFG_L_;
+ break;
+ case SPEED_2500:
+ data |= MAC_CR_CFG_H_;
+ data |= MAC_CR_CFG_L_;
+ break;
+ }
+ lan743x_csr_write(adapter, MAC_CR, data);
+
+ local_advertisement =
+ linkmode_adv_to_mii_adv_t(phydev->advertising);
+ remote_advertisement =
+ linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
+
+ lan743x_phy_update_flowcontrol(adapter, local_advertisement,
+ remote_advertisement);
+ lan743x_ptp_update_latency(adapter, phydev->speed);
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
+ lan743x_sgmii_config(adapter);
+ }
+}
+
+static void lan743x_phy_close(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ phy_stop(netdev->phydev);
+ phy_disconnect(netdev->phydev);
+}
+
+static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
+{
+ u32 id_rev;
+ u32 data;
+
+ data = lan743x_csr_read(adapter, MAC_CR);
+ id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (adapter->is_pci11x1x && adapter->is_sgmii_en)
+ adapter->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ else if (id_rev == ID_REV_ID_LAN7430_)
+ adapter->phy_interface = PHY_INTERFACE_MODE_GMII;
+ else if ((id_rev == ID_REV_ID_LAN7431_) && (data & MAC_CR_MII_EN_))
+ adapter->phy_interface = PHY_INTERFACE_MODE_MII;
+ else
+ adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
+}
+
+static int lan743x_phy_open(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct lan743x_phy *phy = &adapter->phy;
+ struct fixed_phy_status fphy_status = {
+ .link = 1,
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ };
+ struct phy_device *phydev;
+ int ret = -EIO;
+
+ /* try devicetree phy, or fixed link */
+ phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
+ lan743x_phy_link_status_change);
+
+ if (!phydev) {
+ /* try internal phy */
+ phydev = phy_find_first(adapter->mdiobus);
+ if (!phydev) {
+ if ((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
+ ID_REV_ID_LAN7431_) {
+ phydev = fixed_phy_register(PHY_POLL,
+ &fphy_status, NULL);
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "No PHY/fixed_PHY found\n");
+ return PTR_ERR(phydev);
+ }
+ } else {
+ goto return_error;
+ }
+ }
+
+ lan743x_phy_interface_select(adapter);
+
+ ret = phy_connect_direct(netdev, phydev,
+ lan743x_phy_link_status_change,
+ adapter->phy_interface);
+ if (ret)
+ goto return_error;
+ }
+
+ /* MAC doesn't support 1000T Half */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ /* support both flow controls */
+ phy_support_asym_pause(phydev);
+ phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
+ phy->fc_autoneg = phydev->autoneg;
+
+ phy_start(phydev);
+ phy_start_aneg(phydev);
+ phy_attached_info(phydev);
+ return 0;
+
+return_error:
+ return ret;
+}
+
+static void lan743x_rfe_open(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, RFE_RSS_CFG,
+ RFE_RSS_CFG_UDP_IPV6_EX_ |
+ RFE_RSS_CFG_TCP_IPV6_EX_ |
+ RFE_RSS_CFG_IPV6_EX_ |
+ RFE_RSS_CFG_UDP_IPV6_ |
+ RFE_RSS_CFG_TCP_IPV6_ |
+ RFE_RSS_CFG_IPV6_ |
+ RFE_RSS_CFG_UDP_IPV4_ |
+ RFE_RSS_CFG_TCP_IPV4_ |
+ RFE_RSS_CFG_IPV4_ |
+ RFE_RSS_CFG_VALID_HASH_BITS_ |
+ RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
+ RFE_RSS_CFG_RSS_HASH_STORE_ |
+ RFE_RSS_CFG_RSS_ENABLE_);
+}
+
+static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
+{
+ u8 *mac_addr;
+ u32 mac_addr_hi = 0;
+ u32 mac_addr_lo = 0;
+
+ /* Add mac address to perfect Filter */
+ mac_addr = adapter->mac_address;
+ mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
+ (((u32)(mac_addr[1])) << 8) |
+ (((u32)(mac_addr[2])) << 16) |
+ (((u32)(mac_addr[3])) << 24));
+ mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
+ (((u32)(mac_addr[5])) << 8));
+
+ lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
+ lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
+ mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
+}
+
+static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 hash_table[DP_SEL_VHF_HASH_LEN];
+ u32 rfctl;
+ u32 data;
+
+ rfctl = lan743x_csr_read(adapter, RFE_CTL);
+ rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
+ RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
+ rfctl |= RFE_CTL_AB_;
+ if (netdev->flags & IFF_PROMISC) {
+ rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI)
+ rfctl |= RFE_CTL_AM_;
+ }
+
+ if (netdev->features & NETIF_F_RXCSUM)
+ rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_;
+
+ memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
+ if (netdev_mc_count(netdev)) {
+ struct netdev_hw_addr *ha;
+ int i;
+
+ rfctl |= RFE_CTL_DA_PERFECT_;
+ i = 1;
+ netdev_for_each_mc_addr(ha, netdev) {
+ /* set first 32 into Perfect Filter */
+ if (i < 33) {
+ lan743x_csr_write(adapter,
+ RFE_ADDR_FILT_HI(i), 0);
+ data = ha->addr[3];
+ data = ha->addr[2] | (data << 8);
+ data = ha->addr[1] | (data << 8);
+ data = ha->addr[0] | (data << 8);
+ lan743x_csr_write(adapter,
+ RFE_ADDR_FILT_LO(i), data);
+ data = ha->addr[5];
+ data = ha->addr[4] | (data << 8);
+ data |= RFE_ADDR_FILT_HI_VALID_;
+ lan743x_csr_write(adapter,
+ RFE_ADDR_FILT_HI(i), data);
+ } else {
+ u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
+ 23) & 0x1FF;
+ hash_table[bitnum / 32] |= (1 << (bitnum % 32));
+ rfctl |= RFE_CTL_MCAST_HASH_;
+ }
+ i++;
+ }
+ }
+
+ lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
+ DP_SEL_VHF_VLAN_LEN,
+ DP_SEL_VHF_HASH_LEN, hash_table);
+ lan743x_csr_write(adapter, RFE_CTL, rfctl);
+}
+
+static int lan743x_dmac_init(struct lan743x_adapter *adapter)
+{
+ u32 data = 0;
+
+ lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
+ lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
+ 0, 1000, 20000, 100);
+ switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
+ case DMA_DESCRIPTOR_SPACING_16:
+ data = DMAC_CFG_MAX_DSPACE_16_;
+ break;
+ case DMA_DESCRIPTOR_SPACING_32:
+ data = DMAC_CFG_MAX_DSPACE_32_;
+ break;
+ case DMA_DESCRIPTOR_SPACING_64:
+ data = DMAC_CFG_MAX_DSPACE_64_;
+ break;
+ case DMA_DESCRIPTOR_SPACING_128:
+ data = DMAC_CFG_MAX_DSPACE_128_;
+ break;
+ default:
+ return -EPERM;
+ }
+ if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
+ data |= DMAC_CFG_COAL_EN_;
+ data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
+ data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
+ lan743x_csr_write(adapter, DMAC_CFG, data);
+ data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
+ data |= DMAC_COAL_CFG_TIMER_TX_START_;
+ data |= DMAC_COAL_CFG_FLUSH_INTS_;
+ data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
+ data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
+ data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
+ data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
+ lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
+ data = DMAC_OBFF_TX_THRES_SET_(0x08);
+ data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
+ lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
+ return 0;
+}
+
+static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
+ int tx_channel)
+{
+ u32 dmac_cmd = 0;
+
+ dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
+ return DMAC_CHANNEL_STATE_SET((dmac_cmd &
+ DMAC_CMD_START_T_(tx_channel)),
+ (dmac_cmd &
+ DMAC_CMD_STOP_T_(tx_channel)));
+}
+
+static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
+ int tx_channel)
+{
+ int timeout = 100;
+ int result = 0;
+
+ while (timeout &&
+ ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
+ DMAC_CHANNEL_STATE_STOP_PENDING)) {
+ usleep_range(1000, 20000);
+ timeout--;
+ }
+ if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
+ result = -ENODEV;
+ return result;
+}
+
+static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
+ int rx_channel)
+{
+ u32 dmac_cmd = 0;
+
+ dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
+ return DMAC_CHANNEL_STATE_SET((dmac_cmd &
+ DMAC_CMD_START_R_(rx_channel)),
+ (dmac_cmd &
+ DMAC_CMD_STOP_R_(rx_channel)));
+}
+
+static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
+ int rx_channel)
+{
+ int timeout = 100;
+ int result = 0;
+
+ while (timeout &&
+ ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
+ DMAC_CHANNEL_STATE_STOP_PENDING)) {
+ usleep_range(1000, 20000);
+ timeout--;
+ }
+ if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
+ result = -ENODEV;
+ return result;
+}
+
+static void lan743x_tx_release_desc(struct lan743x_tx *tx,
+ int descriptor_index, bool cleanup)
+{
+ struct lan743x_tx_buffer_info *buffer_info = NULL;
+ struct lan743x_tx_descriptor *descriptor = NULL;
+ u32 descriptor_type = 0;
+ bool ignore_sync;
+
+ descriptor = &tx->ring_cpu_ptr[descriptor_index];
+ buffer_info = &tx->buffer_info[descriptor_index];
+ if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
+ goto done;
+
+ descriptor_type = le32_to_cpu(descriptor->data0) &
+ TX_DESC_DATA0_DTYPE_MASK_;
+ if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
+ goto clean_up_data_descriptor;
+ else
+ goto clear_active;
+
+clean_up_data_descriptor:
+ if (buffer_info->dma_ptr) {
+ if (buffer_info->flags &
+ TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
+ dma_unmap_page(&tx->adapter->pdev->dev,
+ buffer_info->dma_ptr,
+ buffer_info->buffer_length,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(&tx->adapter->pdev->dev,
+ buffer_info->dma_ptr,
+ buffer_info->buffer_length,
+ DMA_TO_DEVICE);
+ }
+ buffer_info->dma_ptr = 0;
+ buffer_info->buffer_length = 0;
+ }
+ if (!buffer_info->skb)
+ goto clear_active;
+
+ if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
+ dev_kfree_skb_any(buffer_info->skb);
+ goto clear_skb;
+ }
+
+ if (cleanup) {
+ lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
+ dev_kfree_skb_any(buffer_info->skb);
+ } else {
+ ignore_sync = (buffer_info->flags &
+ TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
+ lan743x_ptp_tx_timestamp_skb(tx->adapter,
+ buffer_info->skb, ignore_sync);
+ }
+
+clear_skb:
+ buffer_info->skb = NULL;
+
+clear_active:
+ buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
+
+done:
+ memset(buffer_info, 0, sizeof(*buffer_info));
+ memset(descriptor, 0, sizeof(*descriptor));
+}
+
+static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
+{
+ return ((++index) % tx->ring_size);
+}
+
+static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
+{
+ while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
+ lan743x_tx_release_desc(tx, tx->last_head, false);
+ tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
+ }
+}
+
+static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
+{
+ u32 original_head = 0;
+
+ original_head = tx->last_head;
+ do {
+ lan743x_tx_release_desc(tx, tx->last_head, true);
+ tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
+ } while (tx->last_head != original_head);
+ memset(tx->ring_cpu_ptr, 0,
+ sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
+ memset(tx->buffer_info, 0,
+ sizeof(*tx->buffer_info) * (tx->ring_size));
+}
+
+static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
+ struct sk_buff *skb)
+{
+ int result = 1; /* 1 for the main skb buffer */
+ int nr_frags = 0;
+
+ if (skb_is_gso(skb))
+ result++; /* requires an extension descriptor */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ result += nr_frags; /* 1 for each fragment buffer */
+ return result;
+}
+
+static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
+{
+ int last_head = tx->last_head;
+ int last_tail = tx->last_tail;
+
+ if (last_tail >= last_head)
+ return tx->ring_size - last_tail + last_head - 1;
+ else
+ return last_head - last_tail - 1;
+}
+
+void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
+ bool enable_timestamping,
+ bool enable_onestep_sync)
+{
+ if (enable_timestamping)
+ tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
+ else
+ tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
+ if (enable_onestep_sync)
+ tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
+ else
+ tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
+}
+
+static int lan743x_tx_frame_start(struct lan743x_tx *tx,
+ unsigned char *first_buffer,
+ unsigned int first_buffer_length,
+ unsigned int frame_length,
+ bool time_stamp,
+ bool check_sum)
+{
+ /* called only from within lan743x_tx_xmit_frame.
+ * assuming tx->ring_lock has already been acquired.
+ */
+ struct lan743x_tx_descriptor *tx_descriptor = NULL;
+ struct lan743x_tx_buffer_info *buffer_info = NULL;
+ struct lan743x_adapter *adapter = tx->adapter;
+ struct device *dev = &adapter->pdev->dev;
+ dma_addr_t dma_ptr;
+
+ tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
+ tx->frame_first = tx->last_tail;
+ tx->frame_tail = tx->frame_first;
+
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ buffer_info = &tx->buffer_info[tx->frame_tail];
+ dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_ptr))
+ return -ENOMEM;
+
+ tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
+ tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
+ tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
+ TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
+
+ buffer_info->skb = NULL;
+ buffer_info->dma_ptr = dma_ptr;
+ buffer_info->buffer_length = first_buffer_length;
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
+
+ tx->frame_data0 = (first_buffer_length &
+ TX_DESC_DATA0_BUF_LENGTH_MASK_) |
+ TX_DESC_DATA0_DTYPE_DATA_ |
+ TX_DESC_DATA0_FS_ |
+ TX_DESC_DATA0_FCS_;
+ if (time_stamp)
+ tx->frame_data0 |= TX_DESC_DATA0_TSE_;
+
+ if (check_sum)
+ tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
+ TX_DESC_DATA0_IPE_ |
+ TX_DESC_DATA0_TPE_;
+
+ /* data0 will be programmed in one of other frame assembler functions */
+ return 0;
+}
+
+static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
+ unsigned int frame_length,
+ int nr_frags)
+{
+ /* called only from within lan743x_tx_xmit_frame.
+ * assuming tx->ring_lock has already been acquired.
+ */
+ struct lan743x_tx_descriptor *tx_descriptor = NULL;
+ struct lan743x_tx_buffer_info *buffer_info = NULL;
+
+ /* wrap up previous descriptor */
+ tx->frame_data0 |= TX_DESC_DATA0_EXT_;
+ if (nr_frags <= 0) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+
+ /* move to next descriptor */
+ tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ buffer_info = &tx->buffer_info[tx->frame_tail];
+
+ /* add extension descriptor */
+ tx_descriptor->data1 = 0;
+ tx_descriptor->data2 = 0;
+ tx_descriptor->data3 = 0;
+
+ buffer_info->skb = NULL;
+ buffer_info->dma_ptr = 0;
+ buffer_info->buffer_length = 0;
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
+
+ tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
+ TX_DESC_DATA0_DTYPE_EXT_ |
+ TX_DESC_DATA0_EXT_LSO_;
+
+ /* data0 will be programmed in one of other frame assembler functions */
+}
+
+static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
+ const skb_frag_t *fragment,
+ unsigned int frame_length)
+{
+ /* called only from within lan743x_tx_xmit_frame
+ * assuming tx->ring_lock has already been acquired
+ */
+ struct lan743x_tx_descriptor *tx_descriptor = NULL;
+ struct lan743x_tx_buffer_info *buffer_info = NULL;
+ struct lan743x_adapter *adapter = tx->adapter;
+ struct device *dev = &adapter->pdev->dev;
+ unsigned int fragment_length = 0;
+ dma_addr_t dma_ptr;
+
+ fragment_length = skb_frag_size(fragment);
+ if (!fragment_length)
+ return 0;
+
+ /* wrap up previous descriptor */
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+
+ /* move to next descriptor */
+ tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ buffer_info = &tx->buffer_info[tx->frame_tail];
+ dma_ptr = skb_frag_dma_map(dev, fragment,
+ 0, fragment_length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_ptr)) {
+ int desc_index;
+
+ /* cleanup all previously setup descriptors */
+ desc_index = tx->frame_first;
+ while (desc_index != tx->frame_tail) {
+ lan743x_tx_release_desc(tx, desc_index, true);
+ desc_index = lan743x_tx_next_index(tx, desc_index);
+ }
+ dma_wmb();
+ tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
+ tx->frame_first = 0;
+ tx->frame_data0 = 0;
+ tx->frame_tail = 0;
+ return -ENOMEM;
+ }
+
+ tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
+ tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
+ tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
+ TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
+
+ buffer_info->skb = NULL;
+ buffer_info->dma_ptr = dma_ptr;
+ buffer_info->buffer_length = fragment_length;
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
+
+ tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
+ TX_DESC_DATA0_DTYPE_DATA_ |
+ TX_DESC_DATA0_FCS_;
+
+ /* data0 will be programmed in one of other frame assembler functions */
+ return 0;
+}
+
+static void lan743x_tx_frame_end(struct lan743x_tx *tx,
+ struct sk_buff *skb,
+ bool time_stamp,
+ bool ignore_sync)
+{
+ /* called only from within lan743x_tx_xmit_frame
+ * assuming tx->ring_lock has already been acquired
+ */
+ struct lan743x_tx_descriptor *tx_descriptor = NULL;
+ struct lan743x_tx_buffer_info *buffer_info = NULL;
+ struct lan743x_adapter *adapter = tx->adapter;
+ u32 tx_tail_flags = 0;
+
+ /* wrap up previous descriptor */
+ if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
+ TX_DESC_DATA0_DTYPE_DATA_) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
+
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ buffer_info = &tx->buffer_info[tx->frame_tail];
+ buffer_info->skb = skb;
+ if (time_stamp)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
+ if (ignore_sync)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+ tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
+ tx->last_tail = tx->frame_tail;
+
+ dma_wmb();
+
+ if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
+ tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
+ if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
+ tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
+ TX_TAIL_SET_TOP_INT_EN_;
+
+ lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
+ tx_tail_flags | tx->frame_tail);
+ tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
+}
+
+static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
+ struct sk_buff *skb)
+{
+ int required_number_of_descriptors = 0;
+ unsigned int start_frame_length = 0;
+ netdev_tx_t retval = NETDEV_TX_OK;
+ unsigned int frame_length = 0;
+ unsigned int head_length = 0;
+ unsigned long irq_flags = 0;
+ bool do_timestamp = false;
+ bool ignore_sync = false;
+ struct netdev_queue *txq;
+ int nr_frags = 0;
+ bool gso = false;
+ int j;
+
+ required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
+
+ spin_lock_irqsave(&tx->ring_lock, irq_flags);
+ if (required_number_of_descriptors >
+ lan743x_tx_get_avail_desc(tx)) {
+ if (required_number_of_descriptors > (tx->ring_size - 1)) {
+ dev_kfree_skb_irq(skb);
+ } else {
+ /* save how many descriptors we needed to restart the queue */
+ tx->rqd_descriptors = required_number_of_descriptors;
+ retval = NETDEV_TX_BUSY;
+ txq = netdev_get_tx_queue(tx->adapter->netdev,
+ tx->channel_number);
+ netif_tx_stop_queue(txq);
+ }
+ goto unlock;
+ }
+
+ /* space available, transmit skb */
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
+ (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_timestamp = true;
+ if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
+ ignore_sync = true;
+ }
+ head_length = skb_headlen(skb);
+ frame_length = skb_pagelen(skb);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ start_frame_length = frame_length;
+ gso = skb_is_gso(skb);
+ if (gso) {
+ start_frame_length = max(skb_shinfo(skb)->gso_size,
+ (unsigned short)8);
+ }
+
+ if (lan743x_tx_frame_start(tx,
+ skb->data, head_length,
+ start_frame_length,
+ do_timestamp,
+ skb->ip_summed == CHECKSUM_PARTIAL)) {
+ dev_kfree_skb_irq(skb);
+ goto unlock;
+ }
+ tx->frame_count++;
+
+ if (gso)
+ lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
+
+ if (nr_frags <= 0)
+ goto finish;
+
+ for (j = 0; j < nr_frags; j++) {
+ const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
+
+ if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
+ /* upon error no need to call
+ * lan743x_tx_frame_end
+ * frame assembler clean up was performed inside
+ * lan743x_tx_frame_add_fragment
+ */
+ dev_kfree_skb_irq(skb);
+ goto unlock;
+ }
+ }
+
+finish:
+ lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
+
+unlock:
+ spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
+ return retval;
+}
+
+static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
+ struct lan743x_adapter *adapter = tx->adapter;
+ unsigned long irq_flags = 0;
+ struct netdev_queue *txq;
+ u32 ioc_bit = 0;
+
+ ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
+ lan743x_csr_read(adapter, DMAC_INT_STS);
+ if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
+ lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
+ spin_lock_irqsave(&tx->ring_lock, irq_flags);
+
+ /* clean up tx ring */
+ lan743x_tx_release_completed_descriptors(tx);
+ txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number);
+ if (netif_tx_queue_stopped(txq)) {
+ if (tx->rqd_descriptors) {
+ if (tx->rqd_descriptors <=
+ lan743x_tx_get_avail_desc(tx)) {
+ tx->rqd_descriptors = 0;
+ netif_tx_wake_queue(txq);
+ }
+ } else {
+ netif_tx_wake_queue(txq);
+ }
+ }
+ spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
+
+ if (!napi_complete(napi))
+ goto done;
+
+ /* enable isr */
+ lan743x_csr_write(adapter, INT_EN_SET,
+ INT_BIT_DMA_TX_(tx->channel_number));
+ lan743x_csr_read(adapter, INT_STS);
+
+done:
+ return 0;
+}
+
+static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
+{
+ if (tx->head_cpu_ptr) {
+ dma_free_coherent(&tx->adapter->pdev->dev,
+ sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr,
+ tx->head_dma_ptr);
+ tx->head_cpu_ptr = NULL;
+ tx->head_dma_ptr = 0;
+ }
+ kfree(tx->buffer_info);
+ tx->buffer_info = NULL;
+
+ if (tx->ring_cpu_ptr) {
+ dma_free_coherent(&tx->adapter->pdev->dev,
+ tx->ring_allocation_size, tx->ring_cpu_ptr,
+ tx->ring_dma_ptr);
+ tx->ring_allocation_size = 0;
+ tx->ring_cpu_ptr = NULL;
+ tx->ring_dma_ptr = 0;
+ }
+ tx->ring_size = 0;
+}
+
+static int lan743x_tx_ring_init(struct lan743x_tx *tx)
+{
+ size_t ring_allocation_size = 0;
+ void *cpu_ptr = NULL;
+ dma_addr_t dma_ptr;
+ int ret = -ENOMEM;
+
+ tx->ring_size = LAN743X_TX_RING_SIZE;
+ if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ dev_warn(&tx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ring_allocation_size = ALIGN(tx->ring_size *
+ sizeof(struct lan743x_tx_descriptor),
+ PAGE_SIZE);
+ dma_ptr = 0;
+ cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
+ ring_allocation_size, &dma_ptr, GFP_KERNEL);
+ if (!cpu_ptr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ tx->ring_allocation_size = ring_allocation_size;
+ tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
+ tx->ring_dma_ptr = dma_ptr;
+
+ cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
+ if (!cpu_ptr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
+ dma_ptr = 0;
+ cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
+ sizeof(*tx->head_cpu_ptr), &dma_ptr,
+ GFP_KERNEL);
+ if (!cpu_ptr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ tx->head_cpu_ptr = cpu_ptr;
+ tx->head_dma_ptr = dma_ptr;
+ if (tx->head_dma_ptr & 0x3) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ lan743x_tx_ring_cleanup(tx);
+ return ret;
+}
+
+static void lan743x_tx_close(struct lan743x_tx *tx)
+{
+ struct lan743x_adapter *adapter = tx->adapter;
+
+ lan743x_csr_write(adapter,
+ DMAC_CMD,
+ DMAC_CMD_STOP_T_(tx->channel_number));
+ lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
+
+ lan743x_csr_write(adapter,
+ DMAC_INT_EN_CLR,
+ DMAC_INT_BIT_TX_IOC_(tx->channel_number));
+ lan743x_csr_write(adapter, INT_EN_CLR,
+ INT_BIT_DMA_TX_(tx->channel_number));
+ napi_disable(&tx->napi);
+ netif_napi_del(&tx->napi);
+
+ lan743x_csr_write(adapter, FCT_TX_CTL,
+ FCT_TX_CTL_DIS_(tx->channel_number));
+ lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
+ FCT_TX_CTL_EN_(tx->channel_number),
+ 0, 1000, 20000, 100);
+
+ lan743x_tx_release_all_descriptors(tx);
+
+ tx->rqd_descriptors = 0;
+
+ lan743x_tx_ring_cleanup(tx);
+}
+
+static int lan743x_tx_open(struct lan743x_tx *tx)
+{
+ struct lan743x_adapter *adapter = NULL;
+ u32 data = 0;
+ int ret;
+
+ adapter = tx->adapter;
+ ret = lan743x_tx_ring_init(tx);
+ if (ret)
+ return ret;
+
+ /* initialize fifo */
+ lan743x_csr_write(adapter, FCT_TX_CTL,
+ FCT_TX_CTL_RESET_(tx->channel_number));
+ lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
+ FCT_TX_CTL_RESET_(tx->channel_number),
+ 0, 1000, 20000, 100);
+
+ /* enable fifo */
+ lan743x_csr_write(adapter, FCT_TX_CTL,
+ FCT_TX_CTL_EN_(tx->channel_number));
+
+ /* reset tx channel */
+ lan743x_csr_write(adapter, DMAC_CMD,
+ DMAC_CMD_TX_SWR_(tx->channel_number));
+ lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
+ DMAC_CMD_TX_SWR_(tx->channel_number),
+ 0, 1000, 20000, 100);
+
+ /* Write TX_BASE_ADDR */
+ lan743x_csr_write(adapter,
+ TX_BASE_ADDRH(tx->channel_number),
+ DMA_ADDR_HIGH32(tx->ring_dma_ptr));
+ lan743x_csr_write(adapter,
+ TX_BASE_ADDRL(tx->channel_number),
+ DMA_ADDR_LOW32(tx->ring_dma_ptr));
+
+ /* Write TX_CFG_B */
+ data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
+ data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
+ data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
+ if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
+ data |= TX_CFG_B_TDMABL_512_;
+ lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
+
+ /* Write TX_CFG_A */
+ data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
+ if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
+ data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
+ data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
+ data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
+ data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
+ }
+ lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
+
+ /* Write TX_HEAD_WRITEBACK_ADDR */
+ lan743x_csr_write(adapter,
+ TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
+ DMA_ADDR_HIGH32(tx->head_dma_ptr));
+ lan743x_csr_write(adapter,
+ TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
+ DMA_ADDR_LOW32(tx->head_dma_ptr));
+
+ /* set last head */
+ tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
+
+ /* write TX_TAIL */
+ tx->last_tail = 0;
+ lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
+ (u32)(tx->last_tail));
+ tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
+ INT_BIT_DMA_TX_
+ (tx->channel_number));
+ netif_napi_add_tx_weight(adapter->netdev,
+ &tx->napi, lan743x_tx_napi_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&tx->napi);
+
+ data = 0;
+ if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
+ data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
+ if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
+ data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
+ if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
+ data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
+ if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
+ data |= TX_CFG_C_TX_INT_EN_R2C_;
+ lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
+
+ if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
+ lan743x_csr_write(adapter, INT_EN_SET,
+ INT_BIT_DMA_TX_(tx->channel_number));
+ lan743x_csr_write(adapter, DMAC_INT_EN_SET,
+ DMAC_INT_BIT_TX_IOC_(tx->channel_number));
+
+ /* start dmac channel */
+ lan743x_csr_write(adapter, DMAC_CMD,
+ DMAC_CMD_START_T_(tx->channel_number));
+ return 0;
+}
+
+static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
+{
+ return ((++index) % rx->ring_size);
+}
+
+static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
+{
+ /* update the tail once per 8 descriptors */
+ if ((index & 7) == 7)
+ lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number),
+ index);
+}
+
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+ gfp_t gfp)
+{
+ struct net_device *netdev = rx->adapter->netdev;
+ struct device *dev = &rx->adapter->pdev->dev;
+ struct lan743x_rx_buffer_info *buffer_info;
+ unsigned int buffer_length, used_length;
+ struct lan743x_rx_descriptor *descriptor;
+ struct sk_buff *skb;
+ dma_addr_t dma_ptr;
+
+ buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
+
+ descriptor = &rx->ring_cpu_ptr[index];
+ buffer_info = &rx->buffer_info[index];
+ skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
+ if (!skb)
+ return -ENOMEM;
+ dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_ptr)) {
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ if (buffer_info->dma_ptr) {
+ /* sync used area of buffer only */
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_)
+ /* frame length is valid only if LS bit is set.
+ * it's a safe upper bound for the used area in this
+ * buffer.
+ */
+ used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_
+ (le32_to_cpu(descriptor->data0)),
+ buffer_info->buffer_length);
+ else
+ used_length = buffer_info->buffer_length;
+ dma_sync_single_for_cpu(dev, buffer_info->dma_ptr,
+ used_length,
+ DMA_FROM_DEVICE);
+ dma_unmap_single_attrs(dev, buffer_info->dma_ptr,
+ buffer_info->buffer_length,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+
+ buffer_info->skb = skb;
+ buffer_info->dma_ptr = dma_ptr;
+ buffer_info->buffer_length = buffer_length;
+ descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
+ descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
+ descriptor->data3 = 0;
+ descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
+ (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
+ lan743x_rx_update_tail(rx, index);
+
+ return 0;
+}
+
+static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
+{
+ struct lan743x_rx_buffer_info *buffer_info;
+ struct lan743x_rx_descriptor *descriptor;
+
+ descriptor = &rx->ring_cpu_ptr[index];
+ buffer_info = &rx->buffer_info[index];
+
+ descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
+ descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
+ descriptor->data3 = 0;
+ descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
+ ((buffer_info->buffer_length) &
+ RX_DESC_DATA0_BUF_LENGTH_MASK_)));
+ lan743x_rx_update_tail(rx, index);
+}
+
+static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
+{
+ struct lan743x_rx_buffer_info *buffer_info;
+ struct lan743x_rx_descriptor *descriptor;
+
+ descriptor = &rx->ring_cpu_ptr[index];
+ buffer_info = &rx->buffer_info[index];
+
+ memset(descriptor, 0, sizeof(*descriptor));
+
+ if (buffer_info->dma_ptr) {
+ dma_unmap_single(&rx->adapter->pdev->dev,
+ buffer_info->dma_ptr,
+ buffer_info->buffer_length,
+ DMA_FROM_DEVICE);
+ buffer_info->dma_ptr = 0;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+
+ memset(buffer_info, 0, sizeof(*buffer_info));
+}
+
+static struct sk_buff *
+lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
+{
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_irq(skb);
+ return NULL;
+ }
+ frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
+ if (skb->len > frame_length) {
+ skb->tail -= skb->len - frame_length;
+ skb->len = frame_length;
+ }
+ return skb;
+}
+
+static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
+{
+ int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
+ struct lan743x_rx_descriptor *descriptor, *desc_ext;
+ struct net_device *netdev = rx->adapter->netdev;
+ int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
+ struct lan743x_rx_buffer_info *buffer_info;
+ int frame_length, buffer_length;
+ bool is_ice, is_tce, is_icsm;
+ int extension_index = -1;
+ bool is_last, is_first;
+ struct sk_buff *skb;
+
+ if (current_head_index < 0 || current_head_index >= rx->ring_size)
+ goto done;
+
+ if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
+ goto done;
+
+ if (rx->last_head == current_head_index)
+ goto done;
+
+ descriptor = &rx->ring_cpu_ptr[rx->last_head];
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
+ goto done;
+ buffer_info = &rx->buffer_info[rx->last_head];
+
+ is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_;
+ is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_;
+
+ if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
+ /* extension is expected to follow */
+ int index = lan743x_rx_next_index(rx, rx->last_head);
+
+ if (index == current_head_index)
+ /* extension not yet available */
+ goto done;
+ desc_ext = &rx->ring_cpu_ptr[index];
+ if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_)
+ /* extension not yet available */
+ goto done;
+ if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_))
+ goto move_forward;
+ extension_index = index;
+ }
+
+ /* Only the last buffer in a multi-buffer frame contains the total frame
+ * length. The chip occasionally sends more buffers than strictly
+ * required to reach the total frame length.
+ * Handle this by adding all buffers to the skb in their entirety.
+ * Once the real frame length is known, trim the skb.
+ */
+ frame_length =
+ RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
+ buffer_length = buffer_info->buffer_length;
+ is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_;
+ is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_;
+ is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_;
+
+ netdev_dbg(netdev, "%s%schunk: %d/%d",
+ is_first ? "first " : " ",
+ is_last ? "last " : " ",
+ frame_length, buffer_length);
+
+ /* save existing skb, allocate new skb and map to dma */
+ skb = buffer_info->skb;
+ if (lan743x_rx_init_ring_element(rx, rx->last_head,
+ GFP_ATOMIC | GFP_DMA)) {
+ /* failed to allocate next skb.
+ * Memory is very low.
+ * Drop this packet and reuse buffer.
+ */
+ lan743x_rx_reuse_ring_element(rx, rx->last_head);
+ /* drop packet that was being assembled */
+ dev_kfree_skb_irq(rx->skb_head);
+ rx->skb_head = NULL;
+ goto process_extension;
+ }
+
+ /* add buffers to skb via skb->frag_list */
+ if (is_first) {
+ skb_reserve(skb, RX_HEAD_PADDING);
+ skb_put(skb, buffer_length - RX_HEAD_PADDING);
+ if (rx->skb_head)
+ dev_kfree_skb_irq(rx->skb_head);
+ rx->skb_head = skb;
+ } else if (rx->skb_head) {
+ skb_put(skb, buffer_length);
+ if (skb_shinfo(rx->skb_head)->frag_list)
+ rx->skb_tail->next = skb;
+ else
+ skb_shinfo(rx->skb_head)->frag_list = skb;
+ rx->skb_tail = skb;
+ rx->skb_head->len += skb->len;
+ rx->skb_head->data_len += skb->len;
+ rx->skb_head->truesize += skb->truesize;
+ } else {
+ /* packet to assemble has already been dropped because one or
+ * more of its buffers could not be allocated
+ */
+ netdev_dbg(netdev, "drop buffer intended for dropped packet");
+ dev_kfree_skb_irq(skb);
+ }
+
+process_extension:
+ if (extension_index >= 0) {
+ u32 ts_sec;
+ u32 ts_nsec;
+
+ ts_sec = le32_to_cpu(desc_ext->data1);
+ ts_nsec = (le32_to_cpu(desc_ext->data2) &
+ RX_DESC_DATA2_TS_NS_MASK_);
+ if (rx->skb_head)
+ skb_hwtstamps(rx->skb_head)->hwtstamp =
+ ktime_set(ts_sec, ts_nsec);
+ lan743x_rx_reuse_ring_element(rx, extension_index);
+ rx->last_head = extension_index;
+ netdev_dbg(netdev, "process extension");
+ }
+
+ if (is_last && rx->skb_head)
+ rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length);
+
+ if (is_last && rx->skb_head) {
+ rx->skb_head->protocol = eth_type_trans(rx->skb_head,
+ rx->adapter->netdev);
+ if (rx->adapter->netdev->features & NETIF_F_RXCSUM) {
+ if (!is_ice && !is_tce && !is_icsm)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ netdev_dbg(netdev, "sending %d byte frame to OS",
+ rx->skb_head->len);
+ napi_gro_receive(&rx->napi, rx->skb_head);
+ rx->skb_head = NULL;
+ }
+
+move_forward:
+ /* push tail and head forward */
+ rx->last_tail = rx->last_head;
+ rx->last_head = lan743x_rx_next_index(rx, rx->last_head);
+ result = RX_PROCESS_RESULT_BUFFER_RECEIVED;
+done:
+ return result;
+}
+
+static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
+ struct lan743x_adapter *adapter = rx->adapter;
+ int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
+ u32 rx_tail_flags = 0;
+ int count;
+
+ if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
+ /* clear int status bit before reading packet */
+ lan743x_csr_write(adapter, DMAC_INT_STS,
+ DMAC_INT_BIT_RXFRM_(rx->channel_number));
+ }
+ for (count = 0; count < weight; count++) {
+ result = lan743x_rx_process_buffer(rx);
+ if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
+ break;
+ }
+ rx->frame_count += count;
+ if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED)
+ return weight;
+
+ if (!napi_complete_done(napi, count))
+ return count;
+
+ /* re-arm interrupts, must write to rx tail on some chip variants */
+ if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
+ rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
+ if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
+ rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
+ } else {
+ lan743x_csr_write(adapter, INT_EN_SET,
+ INT_BIT_DMA_RX_(rx->channel_number));
+ }
+
+ if (rx_tail_flags)
+ lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
+ rx_tail_flags | rx->last_tail);
+
+ return count;
+}
+
+static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
+{
+ if (rx->buffer_info && rx->ring_cpu_ptr) {
+ int index;
+
+ for (index = 0; index < rx->ring_size; index++)
+ lan743x_rx_release_ring_element(rx, index);
+ }
+
+ if (rx->head_cpu_ptr) {
+ dma_free_coherent(&rx->adapter->pdev->dev,
+ sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr,
+ rx->head_dma_ptr);
+ rx->head_cpu_ptr = NULL;
+ rx->head_dma_ptr = 0;
+ }
+
+ kfree(rx->buffer_info);
+ rx->buffer_info = NULL;
+
+ if (rx->ring_cpu_ptr) {
+ dma_free_coherent(&rx->adapter->pdev->dev,
+ rx->ring_allocation_size, rx->ring_cpu_ptr,
+ rx->ring_dma_ptr);
+ rx->ring_allocation_size = 0;
+ rx->ring_cpu_ptr = NULL;
+ rx->ring_dma_ptr = 0;
+ }
+
+ rx->ring_size = 0;
+ rx->last_head = 0;
+}
+
+static int lan743x_rx_ring_init(struct lan743x_rx *rx)
+{
+ size_t ring_allocation_size = 0;
+ dma_addr_t dma_ptr = 0;
+ void *cpu_ptr = NULL;
+ int ret = -ENOMEM;
+ int index = 0;
+
+ rx->ring_size = LAN743X_RX_RING_SIZE;
+ if (rx->ring_size <= 1) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ dev_warn(&rx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ring_allocation_size = ALIGN(rx->ring_size *
+ sizeof(struct lan743x_rx_descriptor),
+ PAGE_SIZE);
+ dma_ptr = 0;
+ cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
+ ring_allocation_size, &dma_ptr, GFP_KERNEL);
+ if (!cpu_ptr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ rx->ring_allocation_size = ring_allocation_size;
+ rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
+ rx->ring_dma_ptr = dma_ptr;
+
+ cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
+ GFP_KERNEL);
+ if (!cpu_ptr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
+ dma_ptr = 0;
+ cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
+ sizeof(*rx->head_cpu_ptr), &dma_ptr,
+ GFP_KERNEL);
+ if (!cpu_ptr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ rx->head_cpu_ptr = cpu_ptr;
+ rx->head_dma_ptr = dma_ptr;
+ if (rx->head_dma_ptr & 0x3) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ rx->last_head = 0;
+ for (index = 0; index < rx->ring_size; index++) {
+ ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
+ if (ret)
+ goto cleanup;
+ }
+ return 0;
+
+cleanup:
+ netif_warn(rx->adapter, ifup, rx->adapter->netdev,
+ "Error allocating memory for LAN743x\n");
+
+ lan743x_rx_ring_cleanup(rx);
+ return ret;
+}
+
+static void lan743x_rx_close(struct lan743x_rx *rx)
+{
+ struct lan743x_adapter *adapter = rx->adapter;
+
+ lan743x_csr_write(adapter, FCT_RX_CTL,
+ FCT_RX_CTL_DIS_(rx->channel_number));
+ lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
+ FCT_RX_CTL_EN_(rx->channel_number),
+ 0, 1000, 20000, 100);
+
+ lan743x_csr_write(adapter, DMAC_CMD,
+ DMAC_CMD_STOP_R_(rx->channel_number));
+ lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
+
+ lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
+ DMAC_INT_BIT_RXFRM_(rx->channel_number));
+ lan743x_csr_write(adapter, INT_EN_CLR,
+ INT_BIT_DMA_RX_(rx->channel_number));
+ napi_disable(&rx->napi);
+
+ netif_napi_del(&rx->napi);
+
+ lan743x_rx_ring_cleanup(rx);
+}
+
+static int lan743x_rx_open(struct lan743x_rx *rx)
+{
+ struct lan743x_adapter *adapter = rx->adapter;
+ u32 data = 0;
+ int ret;
+
+ rx->frame_count = 0;
+ ret = lan743x_rx_ring_init(rx);
+ if (ret)
+ goto return_error;
+
+ netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll);
+
+ lan743x_csr_write(adapter, DMAC_CMD,
+ DMAC_CMD_RX_SWR_(rx->channel_number));
+ lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
+ DMAC_CMD_RX_SWR_(rx->channel_number),
+ 0, 1000, 20000, 100);
+
+ /* set ring base address */
+ lan743x_csr_write(adapter,
+ RX_BASE_ADDRH(rx->channel_number),
+ DMA_ADDR_HIGH32(rx->ring_dma_ptr));
+ lan743x_csr_write(adapter,
+ RX_BASE_ADDRL(rx->channel_number),
+ DMA_ADDR_LOW32(rx->ring_dma_ptr));
+
+ /* set rx write back address */
+ lan743x_csr_write(adapter,
+ RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
+ DMA_ADDR_HIGH32(rx->head_dma_ptr));
+ lan743x_csr_write(adapter,
+ RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
+ DMA_ADDR_LOW32(rx->head_dma_ptr));
+ data = RX_CFG_A_RX_HP_WB_EN_;
+ if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
+ data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
+ RX_CFG_A_RX_WB_THRES_SET_(0x7) |
+ RX_CFG_A_RX_PF_THRES_SET_(16) |
+ RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
+ }
+
+ /* set RX_CFG_A */
+ lan743x_csr_write(adapter,
+ RX_CFG_A(rx->channel_number), data);
+
+ /* set RX_CFG_B */
+ data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
+ data &= ~RX_CFG_B_RX_PAD_MASK_;
+ if (!RX_HEAD_PADDING)
+ data |= RX_CFG_B_RX_PAD_0_;
+ else
+ data |= RX_CFG_B_RX_PAD_2_;
+ data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
+ data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
+ data |= RX_CFG_B_TS_ALL_RX_;
+ if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
+ data |= RX_CFG_B_RDMABL_512_;
+
+ lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
+ rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
+ INT_BIT_DMA_RX_
+ (rx->channel_number));
+
+ /* set RX_CFG_C */
+ data = 0;
+ if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
+ data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
+ if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
+ data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
+ if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
+ data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
+ if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
+ data |= RX_CFG_C_RX_INT_EN_R2C_;
+ lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
+
+ rx->last_tail = ((u32)(rx->ring_size - 1));
+ lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
+ rx->last_tail);
+ rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
+ if (rx->last_head) {
+ ret = -EIO;
+ goto napi_delete;
+ }
+
+ napi_enable(&rx->napi);
+
+ lan743x_csr_write(adapter, INT_EN_SET,
+ INT_BIT_DMA_RX_(rx->channel_number));
+ lan743x_csr_write(adapter, DMAC_INT_STS,
+ DMAC_INT_BIT_RXFRM_(rx->channel_number));
+ lan743x_csr_write(adapter, DMAC_INT_EN_SET,
+ DMAC_INT_BIT_RXFRM_(rx->channel_number));
+ lan743x_csr_write(adapter, DMAC_CMD,
+ DMAC_CMD_START_R_(rx->channel_number));
+
+ /* initialize fifo */
+ lan743x_csr_write(adapter, FCT_RX_CTL,
+ FCT_RX_CTL_RESET_(rx->channel_number));
+ lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
+ FCT_RX_CTL_RESET_(rx->channel_number),
+ 0, 1000, 20000, 100);
+ lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
+ FCT_FLOW_CTL_REQ_EN_ |
+ FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
+ FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
+
+ /* enable fifo */
+ lan743x_csr_write(adapter, FCT_RX_CTL,
+ FCT_RX_CTL_EN_(rx->channel_number));
+ return 0;
+
+napi_delete:
+ netif_napi_del(&rx->napi);
+ lan743x_rx_ring_cleanup(rx);
+
+return_error:
+ return ret;
+}
+
+static int lan743x_netdev_close(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int index;
+
+ for (index = 0; index < adapter->used_tx_channels; index++)
+ lan743x_tx_close(&adapter->tx[index]);
+
+ for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
+ lan743x_rx_close(&adapter->rx[index]);
+
+ lan743x_ptp_close(adapter);
+
+ lan743x_phy_close(adapter);
+
+ lan743x_mac_close(adapter);
+
+ lan743x_intr_close(adapter);
+
+ return 0;
+}
+
+static int lan743x_netdev_open(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int index;
+ int ret;
+
+ ret = lan743x_intr_open(adapter);
+ if (ret)
+ goto return_error;
+
+ ret = lan743x_mac_open(adapter);
+ if (ret)
+ goto close_intr;
+
+ ret = lan743x_phy_open(adapter);
+ if (ret)
+ goto close_mac;
+
+ ret = lan743x_ptp_open(adapter);
+ if (ret)
+ goto close_phy;
+
+ lan743x_rfe_open(adapter);
+
+ for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
+ ret = lan743x_rx_open(&adapter->rx[index]);
+ if (ret)
+ goto close_rx;
+ }
+
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ ret = lan743x_tx_open(&adapter->tx[index]);
+ if (ret)
+ goto close_tx;
+ }
+ return 0;
+
+close_tx:
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ if (adapter->tx[index].ring_cpu_ptr)
+ lan743x_tx_close(&adapter->tx[index]);
+ }
+
+close_rx:
+ for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
+ if (adapter->rx[index].ring_cpu_ptr)
+ lan743x_rx_close(&adapter->rx[index]);
+ }
+ lan743x_ptp_close(adapter);
+
+close_phy:
+ lan743x_phy_close(adapter);
+
+close_mac:
+ lan743x_mac_close(adapter);
+
+close_intr:
+ lan743x_intr_close(adapter);
+
+return_error:
+ netif_warn(adapter, ifup, adapter->netdev,
+ "Error opening LAN743x\n");
+ return ret;
+}
+
+static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u8 ch = 0;
+
+ if (adapter->is_pci11x1x)
+ ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS;
+
+ return lan743x_tx_xmit_frame(&adapter->tx[ch], skb);
+}
+
+static int lan743x_netdev_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP)
+ return lan743x_ptp_ioctl(netdev, ifr, cmd);
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void lan743x_netdev_set_multicast(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ lan743x_rfe_set_multicast(adapter);
+}
+
+static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+
+ ret = lan743x_mac_set_mtu(adapter, new_mtu);
+ if (!ret)
+ netdev->mtu = new_mtu;
+ return ret;
+}
+
+static void lan743x_netdev_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
+ stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
+ stats->rx_bytes = lan743x_csr_read(adapter,
+ STAT_RX_UNICAST_BYTE_COUNT) +
+ lan743x_csr_read(adapter,
+ STAT_RX_BROADCAST_BYTE_COUNT) +
+ lan743x_csr_read(adapter,
+ STAT_RX_MULTICAST_BYTE_COUNT);
+ stats->tx_bytes = lan743x_csr_read(adapter,
+ STAT_TX_UNICAST_BYTE_COUNT) +
+ lan743x_csr_read(adapter,
+ STAT_TX_BROADCAST_BYTE_COUNT) +
+ lan743x_csr_read(adapter,
+ STAT_TX_MULTICAST_BYTE_COUNT);
+ stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
+ lan743x_csr_read(adapter,
+ STAT_RX_ALIGNMENT_ERRORS) +
+ lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
+ lan743x_csr_read(adapter,
+ STAT_RX_UNDERSIZE_FRAME_ERRORS) +
+ lan743x_csr_read(adapter,
+ STAT_RX_OVERSIZE_FRAME_ERRORS);
+ stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
+ lan743x_csr_read(adapter,
+ STAT_TX_EXCESS_DEFERRAL_ERRORS) +
+ lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
+ stats->rx_dropped = lan743x_csr_read(adapter,
+ STAT_RX_DROPPED_FRAMES);
+ stats->tx_dropped = lan743x_csr_read(adapter,
+ STAT_TX_EXCESSIVE_COLLISION);
+ stats->multicast = lan743x_csr_read(adapter,
+ STAT_RX_MULTICAST_FRAMES) +
+ lan743x_csr_read(adapter,
+ STAT_TX_MULTICAST_FRAMES);
+ stats->collisions = lan743x_csr_read(adapter,
+ STAT_TX_SINGLE_COLLISIONS) +
+ lan743x_csr_read(adapter,
+ STAT_TX_MULTIPLE_COLLISIONS) +
+ lan743x_csr_read(adapter,
+ STAT_TX_LATE_COLLISIONS);
+}
+
+static int lan743x_netdev_set_mac_address(struct net_device *netdev,
+ void *addr)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *sock_addr = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, sock_addr);
+ if (ret)
+ return ret;
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
+ lan743x_mac_set_address(adapter, sock_addr->sa_data);
+ lan743x_rfe_update_mac_address(adapter);
+ return 0;
+}
+
+static const struct net_device_ops lan743x_netdev_ops = {
+ .ndo_open = lan743x_netdev_open,
+ .ndo_stop = lan743x_netdev_close,
+ .ndo_start_xmit = lan743x_netdev_xmit_frame,
+ .ndo_eth_ioctl = lan743x_netdev_ioctl,
+ .ndo_set_rx_mode = lan743x_netdev_set_multicast,
+ .ndo_change_mtu = lan743x_netdev_change_mtu,
+ .ndo_get_stats64 = lan743x_netdev_get_stats64,
+ .ndo_set_mac_address = lan743x_netdev_set_mac_address,
+};
+
+static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
+}
+
+static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
+{
+ mdiobus_unregister(adapter->mdiobus);
+}
+
+static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
+{
+ unregister_netdev(adapter->netdev);
+
+ lan743x_mdiobus_cleanup(adapter);
+ lan743x_hardware_cleanup(adapter);
+ lan743x_pci_cleanup(adapter);
+}
+
+static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+ struct pci_dev *pdev)
+{
+ struct lan743x_tx *tx;
+ int index;
+ int ret;
+
+ adapter->is_pci11x1x = is_pci11x1x_chip(adapter);
+ if (adapter->is_pci11x1x) {
+ adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS;
+ adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
+ pci11x1x_strap_get_status(adapter);
+ spin_lock_init(&adapter->eth_syslock_spinlock);
+ mutex_init(&adapter->sgmii_rw_lock);
+ } else {
+ adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
+ adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT;
+ }
+
+ adapter->intr.irq = adapter->pdev->irq;
+ lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
+
+ ret = lan743x_gpio_init(adapter);
+ if (ret)
+ return ret;
+
+ ret = lan743x_mac_init(adapter);
+ if (ret)
+ return ret;
+
+ ret = lan743x_phy_init(adapter);
+ if (ret)
+ return ret;
+
+ ret = lan743x_ptp_init(adapter);
+ if (ret)
+ return ret;
+
+ lan743x_rfe_update_mac_address(adapter);
+
+ ret = lan743x_dmac_init(adapter);
+ if (ret)
+ return ret;
+
+ for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
+ adapter->rx[index].adapter = adapter;
+ adapter->rx[index].channel_number = index;
+ }
+
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ tx = &adapter->tx[index];
+ tx->adapter = adapter;
+ tx->channel_number = index;
+ spin_lock_init(&tx->ring_lock);
+ }
+
+ return 0;
+}
+
+static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
+ if (!(adapter->mdiobus)) {
+ ret = -ENOMEM;
+ goto return_error;
+ }
+
+ adapter->mdiobus->priv = (void *)adapter;
+ if (adapter->is_pci11x1x) {
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII operation\n");
+ adapter->mdiobus->read = lan743x_mdiobus_read_c22;
+ adapter->mdiobus->write = lan743x_mdiobus_write_c22;
+ adapter->mdiobus->read_c45 = lan743x_mdiobus_read_c45;
+ adapter->mdiobus->write_c45 = lan743x_mdiobus_write_c45;
+ adapter->mdiobus->name = "lan743x-mdiobus-c45";
+ netif_dbg(adapter, drv, adapter->netdev,
+ "lan743x-mdiobus-c45\n");
+ } else {
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "RGMII operation\n");
+ // Only C22 support when RGMII I/F
+ adapter->mdiobus->read = lan743x_mdiobus_read_c22;
+ adapter->mdiobus->write = lan743x_mdiobus_write_c22;
+ adapter->mdiobus->name = "lan743x-mdiobus";
+ netif_dbg(adapter, drv, adapter->netdev,
+ "lan743x-mdiobus\n");
+ }
+ } else {
+ adapter->mdiobus->read = lan743x_mdiobus_read_c22;
+ adapter->mdiobus->write = lan743x_mdiobus_write_c22;
+ adapter->mdiobus->name = "lan743x-mdiobus";
+ netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
+ }
+
+ snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
+ "pci-%s", pci_name(adapter->pdev));
+
+ if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
+ /* LAN7430 uses internal phy at address 1 */
+ adapter->mdiobus->phy_mask = ~(u32)BIT(1);
+
+ /* register mdiobus */
+ ret = mdiobus_register(adapter->mdiobus);
+ if (ret < 0)
+ goto return_error;
+ return 0;
+
+return_error:
+ return ret;
+}
+
+/* lan743x_pcidev_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @id: entry in lan743x_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int lan743x_pcidev_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct lan743x_adapter *adapter = NULL;
+ struct net_device *netdev = NULL;
+ int ret = -ENODEV;
+
+ if (id->device == PCI_DEVICE_ID_SMSC_A011 ||
+ id->device == PCI_DEVICE_ID_SMSC_A041) {
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct lan743x_adapter),
+ PCI11X1X_USED_TX_CHANNELS,
+ LAN743X_USED_RX_CHANNELS);
+ } else {
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct lan743x_adapter),
+ LAN743X_USED_TX_CHANNELS,
+ LAN743X_USED_RX_CHANNELS);
+ }
+
+ if (!netdev)
+ goto return_error;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
+ netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
+
+ of_get_mac_address(pdev->dev.of_node, adapter->mac_address);
+
+ ret = lan743x_pci_init(adapter, pdev);
+ if (ret)
+ goto return_error;
+
+ ret = lan743x_csr_init(adapter);
+ if (ret)
+ goto cleanup_pci;
+
+ ret = lan743x_hardware_init(adapter, pdev);
+ if (ret)
+ goto cleanup_pci;
+
+ ret = lan743x_mdiobus_init(adapter);
+ if (ret)
+ goto cleanup_hardware;
+
+ adapter->netdev->netdev_ops = &lan743x_netdev_ops;
+ adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
+ adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ adapter->netdev->hw_features = adapter->netdev->features;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ ret = register_netdev(adapter->netdev);
+ if (ret < 0)
+ goto cleanup_mdiobus;
+ return 0;
+
+cleanup_mdiobus:
+ lan743x_mdiobus_cleanup(adapter);
+
+cleanup_hardware:
+ lan743x_hardware_cleanup(adapter);
+
+cleanup_pci:
+ lan743x_pci_cleanup(adapter);
+
+return_error:
+ pr_warn("Initialization failed\n");
+ return ret;
+}
+
+/**
+ * lan743x_pcidev_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * this is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void lan743x_pcidev_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ lan743x_full_cleanup(adapter);
+}
+
+static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ rtnl_lock();
+ netif_device_detach(netdev);
+
+ /* close netdev when netdev is at running state.
+ * For instance, it is true when system goes to sleep by pm-suspend
+ * However, it is false when system goes to sleep by suspend GUI menu
+ */
+ if (netif_running(netdev))
+ lan743x_netdev_close(netdev);
+ rtnl_unlock();
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+#endif
+
+ /* clean up lan743x portion */
+ lan743x_hardware_cleanup(adapter);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
+{
+ return bitrev16(crc16(0xFFFF, buf, len));
+}
+
+static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
+{
+ const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
+ const u8 ipv6_multicast[3] = { 0x33, 0x33 };
+ const u8 arp_type[2] = { 0x08, 0x06 };
+ int mask_index;
+ u32 sopass;
+ u32 pmtctl;
+ u32 wucsr;
+ u32 macrx;
+ u16 crc;
+
+ for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
+
+ /* clear wake settings */
+ pmtctl = lan743x_csr_read(adapter, PMT_CTL);
+ pmtctl |= PMT_CTL_WUPS_MASK_;
+ pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
+ PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
+ PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
+
+ macrx = lan743x_csr_read(adapter, MAC_RX);
+
+ wucsr = 0;
+ mask_index = 0;
+
+ pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
+
+ if (adapter->wolopts & WAKE_PHY) {
+ pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
+ pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
+ }
+ if (adapter->wolopts & WAKE_MAGIC) {
+ wucsr |= MAC_WUCSR_MPEN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_UCAST) {
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_BCAST) {
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_MCAST) {
+ /* IPv4 multicast */
+ crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ /* IPv6 multicast */
+ crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_ARP) {
+ /* set MAC_WUF_CFG & WUF_MASK
+ * for packettype (offset 12,13) = ARP (0x0806)
+ */
+ crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+
+ if (adapter->wolopts & WAKE_MAGICSECURE) {
+ sopass = *(u32 *)adapter->sopass;
+ lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass);
+ sopass = *(u16 *)&adapter->sopass[4];
+ lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass);
+ wucsr |= MAC_MP_SO_EN_;
+ }
+
+ lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
+ lan743x_csr_write(adapter, PMT_CTL, pmtctl);
+ lan743x_csr_write(adapter, MAC_RX, macrx);
+}
+
+static int lan743x_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 data;
+
+ lan743x_pcidev_shutdown(pdev);
+
+ /* clear all wakes */
+ lan743x_csr_write(adapter, MAC_WUCSR, 0);
+ lan743x_csr_write(adapter, MAC_WUCSR2, 0);
+ lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
+
+ if (adapter->wolopts)
+ lan743x_pm_set_wol(adapter);
+
+ if (adapter->is_pci11x1x) {
+ /* Save HW_CFG to config again in PM resume */
+ data = lan743x_csr_read(adapter, HW_CFG);
+ adapter->hw_cfg = data;
+ data |= (HW_CFG_RST_PROTECT_PCIE_ |
+ HW_CFG_D3_RESET_DIS_ |
+ HW_CFG_D3_VAUX_OVR_ |
+ HW_CFG_HOT_RESET_DIS_ |
+ HW_CFG_RST_PROTECT_);
+ lan743x_csr_write(adapter, HW_CFG, data);
+ }
+
+ /* Host sets PME_En, put D3hot */
+ return pci_prepare_to_sleep(pdev);
+}
+
+static int lan743x_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ /* Restore HW_CFG that was saved during pm suspend */
+ if (adapter->is_pci11x1x)
+ lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg);
+
+ ret = lan743x_hardware_init(adapter, pdev);
+ if (ret) {
+ netif_err(adapter, probe, adapter->netdev,
+ "lan743x_hardware_init returned %d\n", ret);
+ lan743x_pci_cleanup(adapter);
+ return ret;
+ }
+
+ /* open netdev when netdev is at running state while resume.
+ * For instance, it is true when system wakesup after pm-suspend
+ * However, it is false when system wakes up after suspend GUI menu
+ */
+ if (netif_running(netdev))
+ lan743x_netdev_open(netdev);
+
+ netif_device_attach(netdev);
+ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+ netif_info(adapter, drv, adapter->netdev,
+ "Wakeup source : 0x%08X\n", ret);
+
+ return 0;
+}
+
+static const struct dev_pm_ops lan743x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
+};
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct pci_device_id lan743x_pcidev_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl);
+
+static struct pci_driver lan743x_pcidev_driver = {
+ .name = DRIVER_NAME,
+ .id_table = lan743x_pcidev_tbl,
+ .probe = lan743x_pcidev_probe,
+ .remove = lan743x_pcidev_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver.pm = &lan743x_pm_ops,
+#endif
+ .shutdown = lan743x_pcidev_shutdown,
+};
+
+module_pci_driver(lan743x_pcidev_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
new file mode 100644
index 0000000000..52609fc13a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -0,0 +1,1167 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#ifndef _LAN743X_H
+#define _LAN743X_H
+
+#include <linux/phy.h>
+#include "lan743x_ptp.h"
+
+#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>"
+#define DRIVER_DESC "LAN743x PCIe Gigabit Ethernet Driver"
+#define DRIVER_NAME "lan743x"
+
+/* Register Definitions */
+#define ID_REV (0x00)
+#define ID_REV_ID_MASK_ (0xFFFF0000)
+#define ID_REV_ID_LAN7430_ (0x74300000)
+#define ID_REV_ID_LAN7431_ (0x74310000)
+#define ID_REV_ID_LAN743X_ (0x74300000)
+#define ID_REV_ID_A011_ (0xA0110000) // PCI11010
+#define ID_REV_ID_A041_ (0xA0410000) // PCI11414
+#define ID_REV_ID_A0X1_ (0xA0010000)
+#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
+ ((((id_rev) & 0xFFF00000) == ID_REV_ID_LAN743X_) || \
+ (((id_rev) & 0xFF0F0000) == ID_REV_ID_A0X1_))
+#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
+#define ID_REV_CHIP_REV_A0_ (0x00000000)
+#define ID_REV_CHIP_REV_B0_ (0x00000010)
+
+#define FPGA_REV (0x04)
+#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF)
+#define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF)
+#define FPGA_SGMII_OP BIT(24)
+
+#define STRAP_READ (0x0C)
+#define STRAP_READ_USE_SGMII_EN_ BIT(22)
+#define STRAP_READ_SGMII_EN_ BIT(6)
+#define STRAP_READ_SGMII_REFCLK_ BIT(5)
+#define STRAP_READ_SGMII_2_5G_ BIT(4)
+#define STRAP_READ_BASE_X_ BIT(3)
+#define STRAP_READ_RGMII_TXC_DELAY_EN_ BIT(2)
+#define STRAP_READ_RGMII_RXC_DELAY_EN_ BIT(1)
+#define STRAP_READ_ADV_PM_DISABLE_ BIT(0)
+
+#define HW_CFG (0x010)
+#define HW_CFG_RST_PROTECT_PCIE_ BIT(19)
+#define HW_CFG_HOT_RESET_DIS_ BIT(15)
+#define HW_CFG_D3_VAUX_OVR_ BIT(14)
+#define HW_CFG_D3_RESET_DIS_ BIT(13)
+#define HW_CFG_RST_PROTECT_ BIT(12)
+#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0)
+#define HW_CFG_EE_OTP_RELOAD_ BIT(4)
+#define HW_CFG_LRST_ BIT(1)
+
+#define PMT_CTL (0x014)
+#define PMT_CTL_ETH_PHY_D3_COLD_OVR_ BIT(27)
+#define PMT_CTL_MAC_D3_RX_CLK_OVR_ BIT(25)
+#define PMT_CTL_ETH_PHY_EDPD_PLL_CTL_ BIT(24)
+#define PMT_CTL_ETH_PHY_D3_OVR_ BIT(23)
+#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
+#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
+#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
+#define PMT_CTL_READY_ BIT(7)
+#define PMT_CTL_ETH_PHY_RST_ BIT(4)
+#define PMT_CTL_WOL_EN_ BIT(3)
+#define PMT_CTL_ETH_PHY_WAKE_EN_ BIT(2)
+#define PMT_CTL_WUPS_MASK_ (0x00000003)
+
+#define DP_SEL (0x024)
+#define DP_SEL_DPRDY_ BIT(31)
+#define DP_SEL_MASK_ (0x0000001F)
+#define DP_SEL_RFE_RAM (0x00000001)
+
+#define DP_SEL_VHF_HASH_LEN (16)
+#define DP_SEL_VHF_VLAN_LEN (128)
+
+#define DP_CMD (0x028)
+#define DP_CMD_WRITE_ (0x00000001)
+
+#define DP_ADDR (0x02C)
+
+#define DP_DATA_0 (0x030)
+
+#define E2P_CMD (0x040)
+#define E2P_CMD_EPC_BUSY_ BIT(31)
+#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000)
+#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000)
+#define E2P_CMD_EPC_CMD_READ_ (0x00000000)
+#define E2P_CMD_EPC_TIMEOUT_ BIT(10)
+#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF)
+
+#define E2P_DATA (0x044)
+
+/* Hearthstone top level & System Reg Addresses */
+#define ETH_CTRL_REG_ADDR_BASE (0x0000)
+#define ETH_SYS_REG_ADDR_BASE (0x4000)
+#define CONFIG_REG_ADDR_BASE (0x0000)
+#define ETH_EEPROM_REG_ADDR_BASE (0x0E00)
+#define ETH_OTP_REG_ADDR_BASE (0x1000)
+#define GEN_SYS_CONFIG_LOAD_STARTED_REG (0x0078)
+#define ETH_SYS_CONFIG_LOAD_STARTED_REG (ETH_SYS_REG_ADDR_BASE + \
+ CONFIG_REG_ADDR_BASE + \
+ GEN_SYS_CONFIG_LOAD_STARTED_REG)
+#define GEN_SYS_LOAD_STARTED_REG_ETH_ BIT(4)
+#define SYS_LOCK_REG (0x00A0)
+#define SYS_LOCK_REG_MAIN_LOCK_ BIT(7)
+#define SYS_LOCK_REG_GEN_PERI_LOCK_ BIT(5)
+#define SYS_LOCK_REG_SPI_PERI_LOCK_ BIT(4)
+#define SYS_LOCK_REG_SMBUS_PERI_LOCK_ BIT(3)
+#define SYS_LOCK_REG_UART_SS_LOCK_ BIT(2)
+#define SYS_LOCK_REG_ENET_SS_LOCK_ BIT(1)
+#define SYS_LOCK_REG_USB_SS_LOCK_ BIT(0)
+#define ETH_SYSTEM_SYS_LOCK_REG (ETH_SYS_REG_ADDR_BASE + \
+ CONFIG_REG_ADDR_BASE + \
+ SYS_LOCK_REG)
+#define HS_EEPROM_REG_ADDR_BASE (ETH_SYS_REG_ADDR_BASE + \
+ ETH_EEPROM_REG_ADDR_BASE)
+#define HS_E2P_CMD (HS_EEPROM_REG_ADDR_BASE + 0x0000)
+#define HS_E2P_CMD_EPC_BUSY_ BIT(31)
+#define HS_E2P_CMD_EPC_CMD_WRITE_ GENMASK(29, 28)
+#define HS_E2P_CMD_EPC_CMD_READ_ (0x0)
+#define HS_E2P_CMD_EPC_TIMEOUT_ BIT(17)
+#define HS_E2P_CMD_EPC_ADDR_MASK_ GENMASK(15, 0)
+#define HS_E2P_DATA (HS_EEPROM_REG_ADDR_BASE + 0x0004)
+#define HS_E2P_DATA_MASK_ GENMASK(7, 0)
+#define HS_E2P_CFG (HS_EEPROM_REG_ADDR_BASE + 0x0008)
+#define HS_E2P_CFG_I2C_PULSE_MASK_ GENMASK(19, 16)
+#define HS_E2P_CFG_EEPROM_SIZE_SEL_ BIT(12)
+#define HS_E2P_CFG_I2C_BAUD_RATE_MASK_ GENMASK(9, 8)
+#define HS_E2P_CFG_TEST_EEPR_TO_BYP_ BIT(0)
+#define HS_E2P_PAD_CTL (HS_EEPROM_REG_ADDR_BASE + 0x000C)
+
+#define GPIO_CFG0 (0x050)
+#define GPIO_CFG0_GPIO_DIR_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG0_GPIO_DATA_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG1 (0x054)
+#define GPIO_CFG1_GPIOEN_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG1_GPIOBUF_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG2 (0x058)
+#define GPIO_CFG2_1588_POL_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG3 (0x05C)
+#define GPIO_CFG3_1588_CH_SEL_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG3_1588_OE_BIT_(bit) BIT(0 + (bit))
+
+#define FCT_RX_CTL (0xAC)
+#define FCT_RX_CTL_EN_(channel) BIT(28 + (channel))
+#define FCT_RX_CTL_DIS_(channel) BIT(24 + (channel))
+#define FCT_RX_CTL_RESET_(channel) BIT(20 + (channel))
+
+#define FCT_TX_CTL (0xC4)
+#define FCT_TX_CTL_EN_(channel) BIT(28 + (channel))
+#define FCT_TX_CTL_DIS_(channel) BIT(24 + (channel))
+#define FCT_TX_CTL_RESET_(channel) BIT(20 + (channel))
+
+#define FCT_FLOW(rx_channel) (0xE0 + ((rx_channel) << 2))
+#define FCT_FLOW_CTL_OFF_THRESHOLD_ (0x00007F00)
+#define FCT_FLOW_CTL_OFF_THRESHOLD_SET_(value) \
+ ((value << 8) & FCT_FLOW_CTL_OFF_THRESHOLD_)
+#define FCT_FLOW_CTL_REQ_EN_ BIT(7)
+#define FCT_FLOW_CTL_ON_THRESHOLD_ (0x0000007F)
+#define FCT_FLOW_CTL_ON_THRESHOLD_SET_(value) \
+ ((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_)
+
+#define MAC_CR (0x100)
+#define MAC_CR_MII_EN_ BIT(19)
+#define MAC_CR_EEE_EN_ BIT(17)
+#define MAC_CR_ADD_ BIT(12)
+#define MAC_CR_ASD_ BIT(11)
+#define MAC_CR_CNTR_RST_ BIT(5)
+#define MAC_CR_DPX_ BIT(3)
+#define MAC_CR_CFG_H_ BIT(2)
+#define MAC_CR_CFG_L_ BIT(1)
+#define MAC_CR_RST_ BIT(0)
+
+#define MAC_RX (0x104)
+#define MAC_RX_MAX_SIZE_SHIFT_ (16)
+#define MAC_RX_MAX_SIZE_MASK_ (0x3FFF0000)
+#define MAC_RX_RXD_ BIT(1)
+#define MAC_RX_RXEN_ BIT(0)
+
+#define MAC_TX (0x108)
+#define MAC_TX_TXD_ BIT(1)
+#define MAC_TX_TXEN_ BIT(0)
+
+#define MAC_FLOW (0x10C)
+#define MAC_FLOW_CR_TX_FCEN_ BIT(30)
+#define MAC_FLOW_CR_RX_FCEN_ BIT(29)
+#define MAC_FLOW_CR_FCPT_MASK_ (0x0000FFFF)
+
+#define MAC_RX_ADDRH (0x118)
+
+#define MAC_RX_ADDRL (0x11C)
+
+#define MAC_MII_ACC (0x120)
+#define MAC_MII_ACC_MDC_CYCLE_SHIFT_ (16)
+#define MAC_MII_ACC_MDC_CYCLE_MASK_ (0x00070000)
+#define MAC_MII_ACC_MDC_CYCLE_2_5MHZ_ (0)
+#define MAC_MII_ACC_MDC_CYCLE_5MHZ_ (1)
+#define MAC_MII_ACC_MDC_CYCLE_12_5MHZ_ (2)
+#define MAC_MII_ACC_MDC_CYCLE_25MHZ_ (3)
+#define MAC_MII_ACC_MDC_CYCLE_1_25MHZ_ (4)
+#define MAC_MII_ACC_PHY_ADDR_SHIFT_ (11)
+#define MAC_MII_ACC_PHY_ADDR_MASK_ (0x0000F800)
+#define MAC_MII_ACC_MIIRINDA_SHIFT_ (6)
+#define MAC_MII_ACC_MIIRINDA_MASK_ (0x000007C0)
+#define MAC_MII_ACC_MII_READ_ (0x00000000)
+#define MAC_MII_ACC_MII_WRITE_ (0x00000002)
+#define MAC_MII_ACC_MII_BUSY_ BIT(0)
+
+#define MAC_MII_ACC_MIIMMD_SHIFT_ (6)
+#define MAC_MII_ACC_MIIMMD_MASK_ (0x000007C0)
+#define MAC_MII_ACC_MIICL45_ BIT(3)
+#define MAC_MII_ACC_MIICMD_MASK_ (0x00000006)
+#define MAC_MII_ACC_MIICMD_ADDR_ (0x00000000)
+#define MAC_MII_ACC_MIICMD_WRITE_ (0x00000002)
+#define MAC_MII_ACC_MIICMD_READ_ (0x00000004)
+#define MAC_MII_ACC_MIICMD_READ_INC_ (0x00000006)
+
+#define MAC_MII_DATA (0x124)
+
+#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130)
+
+#define MAC_WUCSR (0x140)
+#define MAC_MP_SO_EN_ BIT(21)
+#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
+#define MAC_WUCSR_PFDA_EN_ BIT(3)
+#define MAC_WUCSR_WAKE_EN_ BIT(2)
+#define MAC_WUCSR_MPEN_ BIT(1)
+#define MAC_WUCSR_BCST_EN_ BIT(0)
+
+#define MAC_WK_SRC (0x144)
+#define MAC_MP_SO_HI (0x148)
+#define MAC_MP_SO_LO (0x14C)
+
+#define MAC_WUF_CFG0 (0x150)
+#define MAC_NUM_OF_WUF_CFG (32)
+#define MAC_WUF_CFG_BEGIN (MAC_WUF_CFG0)
+#define MAC_WUF_CFG(index) (MAC_WUF_CFG_BEGIN + (4 * (index)))
+#define MAC_WUF_CFG_EN_ BIT(31)
+#define MAC_WUF_CFG_TYPE_MCAST_ (0x02000000)
+#define MAC_WUF_CFG_TYPE_ALL_ (0x01000000)
+#define MAC_WUF_CFG_OFFSET_SHIFT_ (16)
+#define MAC_WUF_CFG_CRC16_MASK_ (0x0000FFFF)
+
+#define MAC_WUF_MASK0_0 (0x200)
+#define MAC_WUF_MASK0_1 (0x204)
+#define MAC_WUF_MASK0_2 (0x208)
+#define MAC_WUF_MASK0_3 (0x20C)
+#define MAC_WUF_MASK0_BEGIN (MAC_WUF_MASK0_0)
+#define MAC_WUF_MASK1_BEGIN (MAC_WUF_MASK0_1)
+#define MAC_WUF_MASK2_BEGIN (MAC_WUF_MASK0_2)
+#define MAC_WUF_MASK3_BEGIN (MAC_WUF_MASK0_3)
+#define MAC_WUF_MASK0(index) (MAC_WUF_MASK0_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK1(index) (MAC_WUF_MASK1_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK2(index) (MAC_WUF_MASK2_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK3(index) (MAC_WUF_MASK3_BEGIN + (0x10 * (index)))
+
+/* offset 0x400 - 0x500, x may range from 0 to 32, for a total of 33 entries */
+#define RFE_ADDR_FILT_HI(x) (0x400 + (8 * (x)))
+#define RFE_ADDR_FILT_HI_VALID_ BIT(31)
+
+/* offset 0x404 - 0x504, x may range from 0 to 32, for a total of 33 entries */
+#define RFE_ADDR_FILT_LO(x) (0x404 + (8 * (x)))
+
+#define RFE_CTL (0x508)
+#define RFE_CTL_TCP_UDP_COE_ BIT(12)
+#define RFE_CTL_IP_COE_ BIT(11)
+#define RFE_CTL_AB_ BIT(10)
+#define RFE_CTL_AM_ BIT(9)
+#define RFE_CTL_AU_ BIT(8)
+#define RFE_CTL_MCAST_HASH_ BIT(3)
+#define RFE_CTL_DA_PERFECT_ BIT(1)
+
+#define RFE_RSS_CFG (0x554)
+#define RFE_RSS_CFG_UDP_IPV6_EX_ BIT(16)
+#define RFE_RSS_CFG_TCP_IPV6_EX_ BIT(15)
+#define RFE_RSS_CFG_IPV6_EX_ BIT(14)
+#define RFE_RSS_CFG_UDP_IPV6_ BIT(13)
+#define RFE_RSS_CFG_TCP_IPV6_ BIT(12)
+#define RFE_RSS_CFG_IPV6_ BIT(11)
+#define RFE_RSS_CFG_UDP_IPV4_ BIT(10)
+#define RFE_RSS_CFG_TCP_IPV4_ BIT(9)
+#define RFE_RSS_CFG_IPV4_ BIT(8)
+#define RFE_RSS_CFG_VALID_HASH_BITS_ (0x000000E0)
+#define RFE_RSS_CFG_RSS_QUEUE_ENABLE_ BIT(2)
+#define RFE_RSS_CFG_RSS_HASH_STORE_ BIT(1)
+#define RFE_RSS_CFG_RSS_ENABLE_ BIT(0)
+
+#define RFE_HASH_KEY(index) (0x558 + (index << 2))
+
+#define RFE_INDX(index) (0x580 + (index << 2))
+
+#define MAC_WUCSR2 (0x600)
+
+#define SGMII_ACC (0x720)
+#define SGMII_ACC_SGMII_BZY_ BIT(31)
+#define SGMII_ACC_SGMII_WR_ BIT(30)
+#define SGMII_ACC_SGMII_MMD_SHIFT_ (16)
+#define SGMII_ACC_SGMII_MMD_MASK_ GENMASK(20, 16)
+#define SGMII_ACC_SGMII_MMD_VSR_ BIT(15)
+#define SGMII_ACC_SGMII_ADDR_SHIFT_ (0)
+#define SGMII_ACC_SGMII_ADDR_MASK_ GENMASK(15, 0)
+#define SGMII_DATA (0x724)
+#define SGMII_DATA_SHIFT_ (0)
+#define SGMII_DATA_MASK_ GENMASK(15, 0)
+#define SGMII_CTL (0x728)
+#define SGMII_CTL_SGMII_ENABLE_ BIT(31)
+#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
+#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+
+/* Vendor Specific SGMII MMD details */
+#define SR_VSMMD_PCS_ID1 0x0004
+#define SR_VSMMD_PCS_ID2 0x0005
+#define SR_VSMMD_STS 0x0008
+#define SR_VSMMD_CTRL 0x0009
+
+#define VR_MII_DIG_CTRL1 0x8000
+#define VR_MII_DIG_CTRL1_VR_RST_ BIT(15)
+#define VR_MII_DIG_CTRL1_R2TLBE_ BIT(14)
+#define VR_MII_DIG_CTRL1_EN_VSMMD1_ BIT(13)
+#define VR_MII_DIG_CTRL1_CS_EN_ BIT(10)
+#define VR_MII_DIG_CTRL1_MAC_AUTO_SW_ BIT(9)
+#define VR_MII_DIG_CTRL1_INIT_ BIT(8)
+#define VR_MII_DIG_CTRL1_DTXLANED_0_ BIT(4)
+#define VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_ BIT(3)
+#define VR_MII_DIG_CTRL1_EN_2_5G_MODE_ BIT(2)
+#define VR_MII_DIG_CTRL1_BYP_PWRUP_ BIT(1)
+#define VR_MII_DIG_CTRL1_PHY_MODE_CTRL_ BIT(0)
+#define VR_MII_AN_CTRL 0x8001
+#define VR_MII_AN_CTRL_MII_CTRL_ BIT(8)
+#define VR_MII_AN_CTRL_SGMII_LINK_STS_ BIT(4)
+#define VR_MII_AN_CTRL_TX_CONFIG_ BIT(3)
+#define VR_MII_AN_CTRL_1000BASE_X_ (0)
+#define VR_MII_AN_CTRL_SGMII_MODE_ (2)
+#define VR_MII_AN_CTRL_QSGMII_MODE_ (3)
+#define VR_MII_AN_CTRL_PCS_MODE_SHIFT_ (1)
+#define VR_MII_AN_CTRL_PCS_MODE_MASK_ GENMASK(2, 1)
+#define VR_MII_AN_CTRL_MII_AN_INTR_EN_ BIT(0)
+#define VR_MII_AN_INTR_STS 0x8002
+#define VR_MII_AN_INTR_STS_LINK_UP_ BIT(4)
+#define VR_MII_AN_INTR_STS_SPEED_MASK_ GENMASK(3, 2)
+#define VR_MII_AN_INTR_STS_1000_MBPS_ BIT(3)
+#define VR_MII_AN_INTR_STS_100_MBPS_ BIT(2)
+#define VR_MII_AN_INTR_STS_10_MBPS_ (0)
+#define VR_MII_AN_INTR_STS_FDX_ BIT(1)
+#define VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR_ BIT(0)
+
+#define VR_MII_LINK_TIMER_CTRL 0x800A
+#define VR_MII_DIG_STS 0x8010
+#define VR_MII_DIG_STS_PSEQ_STATE_MASK_ GENMASK(4, 2)
+#define VR_MII_DIG_STS_PSEQ_STATE_POS_ (2)
+#define VR_MII_GEN2_4_MPLL_CTRL0 0x8078
+#define VR_MII_MPLL_CTRL0_REF_CLK_DIV2_ BIT(12)
+#define VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_ BIT(4)
+#define VR_MII_GEN2_4_MPLL_CTRL1 0x8079
+#define VR_MII_MPLL_CTRL1_MPLL_MULTIPLIER_ GENMASK(6, 0)
+#define VR_MII_BAUD_RATE_3P125GBPS (3125)
+#define VR_MII_BAUD_RATE_1P25GBPS (1250)
+#define VR_MII_MPLL_MULTIPLIER_125 (125)
+#define VR_MII_MPLL_MULTIPLIER_100 (100)
+#define VR_MII_MPLL_MULTIPLIER_50 (50)
+#define VR_MII_MPLL_MULTIPLIER_40 (40)
+#define VR_MII_GEN2_4_MISC_CTRL1 0x809A
+#define VR_MII_CTRL1_RX_RATE_0_MASK_ GENMASK(3, 2)
+#define VR_MII_CTRL1_RX_RATE_0_SHIFT_ (2)
+#define VR_MII_CTRL1_TX_RATE_0_MASK_ GENMASK(1, 0)
+#define VR_MII_MPLL_BAUD_CLK (0)
+#define VR_MII_MPLL_BAUD_CLK_DIV_2 (1)
+#define VR_MII_MPLL_BAUD_CLK_DIV_4 (2)
+
+#define INT_STS (0x780)
+#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel))
+#define INT_BIT_ALL_RX_ (0x0F000000)
+#define INT_BIT_DMA_TX_(channel) BIT(16 + (channel))
+#define INT_BIT_ALL_TX_ (0x000F0000)
+#define INT_BIT_SW_GP_ BIT(9)
+#define INT_BIT_1588_ BIT(7)
+#define INT_BIT_ALL_OTHER_ (INT_BIT_SW_GP_ | INT_BIT_1588_)
+#define INT_BIT_MAS_ BIT(0)
+
+#define INT_SET (0x784)
+
+#define INT_EN_SET (0x788)
+
+#define INT_EN_CLR (0x78C)
+
+#define INT_STS_R2C (0x790)
+
+#define INT_VEC_EN_SET (0x794)
+#define INT_VEC_EN_CLR (0x798)
+#define INT_VEC_EN_AUTO_CLR (0x79C)
+#define INT_VEC_EN_(vector_index) BIT(0 + vector_index)
+
+#define INT_VEC_MAP0 (0x7A0)
+#define INT_VEC_MAP0_RX_VEC_(channel, vector) \
+ (((u32)(vector)) << ((channel) << 2))
+
+#define INT_VEC_MAP1 (0x7A4)
+#define INT_VEC_MAP1_TX_VEC_(channel, vector) \
+ (((u32)(vector)) << ((channel) << 2))
+
+#define INT_VEC_MAP2 (0x7A8)
+
+#define INT_MOD_MAP0 (0x7B0)
+
+#define INT_MOD_MAP1 (0x7B4)
+
+#define INT_MOD_MAP2 (0x7B8)
+
+#define INT_MOD_CFG0 (0x7C0)
+#define INT_MOD_CFG1 (0x7C4)
+#define INT_MOD_CFG2 (0x7C8)
+#define INT_MOD_CFG3 (0x7CC)
+#define INT_MOD_CFG4 (0x7D0)
+#define INT_MOD_CFG5 (0x7D4)
+#define INT_MOD_CFG6 (0x7D8)
+#define INT_MOD_CFG7 (0x7DC)
+#define INT_MOD_CFG8 (0x7E0)
+#define INT_MOD_CFG9 (0x7E4)
+
+#define PTP_CMD_CTL (0x0A00)
+#define PTP_CMD_CTL_PTP_LTC_TARGET_READ_ BIT(13)
+#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6)
+#define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_ BIT(5)
+#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4)
+#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3)
+#define PTP_CMD_CTL_PTP_ENABLE_ BIT(2)
+#define PTP_CMD_CTL_PTP_DISABLE_ BIT(1)
+#define PTP_CMD_CTL_PTP_RESET_ BIT(0)
+#define PTP_GENERAL_CONFIG (0x0A04)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \
+ (0x7 << (1 + ((channel) << 2)))
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (1)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (2)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_ (6)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
+ (((value) & 0x7) << (1 + ((channel) << 2)))
+#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
+
+#define HS_PTP_GENERAL_CONFIG (0x0A04)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \
+ (0xf << (4 + ((channel) << 2)))
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_ (1)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_ (2)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_ (3)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (4)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_ (5)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (6)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_ (7)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (8)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_ (9)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (10)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_ (11)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_ (12)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (13)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGG_ (14)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_INT_ (15)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
+ (((value) & 0xf) << (4 + ((channel) << 2)))
+#define HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(channel) (BIT(1 + ((channel) * 2)))
+#define HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) * 2))
+
+#define PTP_INT_STS (0x0A08)
+#define PTP_INT_IO_FE_MASK_ GENMASK(31, 24)
+#define PTP_INT_IO_FE_SHIFT_ (24)
+#define PTP_INT_IO_FE_SET_(channel) BIT(24 + (channel))
+#define PTP_INT_IO_RE_MASK_ GENMASK(23, 16)
+#define PTP_INT_IO_RE_SHIFT_ (16)
+#define PTP_INT_IO_RE_SET_(channel) BIT(16 + (channel))
+#define PTP_INT_TX_TS_OVRFL_INT_ BIT(14)
+#define PTP_INT_TX_SWTS_ERR_INT_ BIT(13)
+#define PTP_INT_TX_TS_INT_ BIT(12)
+#define PTP_INT_RX_TS_OVRFL_INT_ BIT(9)
+#define PTP_INT_RX_TS_INT_ BIT(8)
+#define PTP_INT_TIMER_INT_B_ BIT(1)
+#define PTP_INT_TIMER_INT_A_ BIT(0)
+#define PTP_INT_EN_SET (0x0A0C)
+#define PTP_INT_EN_FE_EN_SET_(channel) BIT(24 + (channel))
+#define PTP_INT_EN_RE_EN_SET_(channel) BIT(16 + (channel))
+#define PTP_INT_EN_TIMER_SET_(channel) BIT(channel)
+#define PTP_INT_EN_CLR (0x0A10)
+#define PTP_INT_EN_FE_EN_CLR_(channel) BIT(24 + (channel))
+#define PTP_INT_EN_RE_EN_CLR_(channel) BIT(16 + (channel))
+#define PTP_INT_BIT_TX_SWTS_ERR_ BIT(13)
+#define PTP_INT_BIT_TX_TS_ BIT(12)
+#define PTP_INT_BIT_TIMER_B_ BIT(1)
+#define PTP_INT_BIT_TIMER_A_ BIT(0)
+
+#define PTP_CLOCK_SEC (0x0A14)
+#define PTP_CLOCK_NS (0x0A18)
+#define PTP_CLOCK_SUBNS (0x0A1C)
+#define PTP_CLOCK_RATE_ADJ (0x0A20)
+#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(31)
+#define PTP_CLOCK_STEP_ADJ (0x0A2C)
+#define PTP_CLOCK_STEP_ADJ_DIR_ BIT(31)
+#define PTP_CLOCK_STEP_ADJ_VALUE_MASK_ (0x3FFFFFFF)
+#define PTP_CLOCK_TARGET_SEC_X(channel) (0x0A30 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_NS_X(channel) (0x0A34 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_RELOAD_NS_X(channel) (0x0A3C + ((channel) << 4))
+#define PTP_LTC_SET_SEC_HI (0x0A50)
+#define PTP_LTC_SET_SEC_HI_SEC_47_32_MASK_ GENMASK(15, 0)
+#define PTP_VERSION (0x0A54)
+#define PTP_VERSION_TX_UP_MASK_ GENMASK(31, 24)
+#define PTP_VERSION_TX_LO_MASK_ GENMASK(23, 16)
+#define PTP_VERSION_RX_UP_MASK_ GENMASK(15, 8)
+#define PTP_VERSION_RX_LO_MASK_ GENMASK(7, 0)
+#define PTP_IO_SEL (0x0A58)
+#define PTP_IO_SEL_MASK_ GENMASK(10, 8)
+#define PTP_IO_SEL_SHIFT_ (8)
+#define PTP_LATENCY (0x0A5C)
+#define PTP_LATENCY_TX_SET_(tx_latency) (((u32)(tx_latency)) << 16)
+#define PTP_LATENCY_RX_SET_(rx_latency) \
+ (((u32)(rx_latency)) & 0x0000FFFF)
+#define PTP_CAP_INFO (0x0A60)
+#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x00000070) >> 4)
+
+#define PTP_TX_MOD (0x0AA4)
+#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000)
+
+#define PTP_TX_MOD2 (0x0AA8)
+#define PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_ (0x00000001)
+
+#define PTP_TX_EGRESS_SEC (0x0AAC)
+#define PTP_TX_EGRESS_NS (0x0AB0)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_ (0xC0000000)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_ (0x00000000)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_ (0x40000000)
+#define PTP_TX_EGRESS_NS_TS_NS_MASK_ (0x3FFFFFFF)
+
+#define PTP_TX_MSG_HEADER (0x0AB4)
+#define PTP_TX_MSG_HEADER_MSG_TYPE_ (0x000F0000)
+#define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_ (0x00000000)
+
+#define PTP_TX_CAP_INFO (0x0AB8)
+#define PTP_TX_CAP_INFO_TX_CH_MASK_ GENMASK(1, 0)
+#define PTP_TX_DOMAIN (0x0ABC)
+#define PTP_TX_DOMAIN_MASK_ GENMASK(23, 16)
+#define PTP_TX_DOMAIN_RANGE_EN_ BIT(15)
+#define PTP_TX_DOMAIN_RANGE_MASK_ GENMASK(7, 0)
+#define PTP_TX_SDOID (0x0AC0)
+#define PTP_TX_SDOID_MASK_ GENMASK(23, 16)
+#define PTP_TX_SDOID_RANGE_EN_ BIT(15)
+#define PTP_TX_SDOID_11_0_MASK_ GENMASK(7, 0)
+#define PTP_IO_CAP_CONFIG (0x0AC4)
+#define PTP_IO_CAP_CONFIG_LOCK_FE_(channel) BIT(24 + (channel))
+#define PTP_IO_CAP_CONFIG_LOCK_RE_(channel) BIT(16 + (channel))
+#define PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel) BIT(8 + (channel))
+#define PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel) BIT(0 + (channel))
+#define PTP_IO_RE_LTC_SEC_CAP_X (0x0AC8)
+#define PTP_IO_RE_LTC_NS_CAP_X (0x0ACC)
+#define PTP_IO_FE_LTC_SEC_CAP_X (0x0AD0)
+#define PTP_IO_FE_LTC_NS_CAP_X (0x0AD4)
+#define PTP_IO_EVENT_OUTPUT_CFG (0x0AD8)
+#define PTP_IO_EVENT_OUTPUT_CFG_SEL_(channel) BIT(16 + (channel))
+#define PTP_IO_EVENT_OUTPUT_CFG_EN_(channel) BIT(0 + (channel))
+#define PTP_IO_PIN_CFG (0x0ADC)
+#define PTP_IO_PIN_CFG_OBUF_TYPE_(channel) BIT(0 + (channel))
+#define PTP_LTC_RD_SEC_HI (0x0AF0)
+#define PTP_LTC_RD_SEC_HI_SEC_47_32_MASK_ GENMASK(15, 0)
+#define PTP_LTC_RD_SEC_LO (0x0AF4)
+#define PTP_LTC_RD_NS (0x0AF8)
+#define PTP_LTC_RD_NS_29_0_MASK_ GENMASK(29, 0)
+#define PTP_LTC_RD_SUBNS (0x0AFC)
+#define PTP_RX_USER_MAC_HI (0x0B00)
+#define PTP_RX_USER_MAC_HI_47_32_MASK_ GENMASK(15, 0)
+#define PTP_RX_USER_MAC_LO (0x0B04)
+#define PTP_RX_USER_IP_ADDR_0 (0x0B20)
+#define PTP_RX_USER_IP_ADDR_1 (0x0B24)
+#define PTP_RX_USER_IP_ADDR_2 (0x0B28)
+#define PTP_RX_USER_IP_ADDR_3 (0x0B2C)
+#define PTP_RX_USER_IP_MASK_0 (0x0B30)
+#define PTP_RX_USER_IP_MASK_1 (0x0B34)
+#define PTP_RX_USER_IP_MASK_2 (0x0B38)
+#define PTP_RX_USER_IP_MASK_3 (0x0B3C)
+#define PTP_TX_USER_MAC_HI (0x0B40)
+#define PTP_TX_USER_MAC_HI_47_32_MASK_ GENMASK(15, 0)
+#define PTP_TX_USER_MAC_LO (0x0B44)
+#define PTP_TX_USER_IP_ADDR_0 (0x0B60)
+#define PTP_TX_USER_IP_ADDR_1 (0x0B64)
+#define PTP_TX_USER_IP_ADDR_2 (0x0B68)
+#define PTP_TX_USER_IP_ADDR_3 (0x0B6C)
+#define PTP_TX_USER_IP_MASK_0 (0x0B70)
+#define PTP_TX_USER_IP_MASK_1 (0x0B74)
+#define PTP_TX_USER_IP_MASK_2 (0x0B78)
+#define PTP_TX_USER_IP_MASK_3 (0x0B7C)
+
+#define DMAC_CFG (0xC00)
+#define DMAC_CFG_COAL_EN_ BIT(16)
+#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000)
+#define DMAC_CFG_MAX_READ_REQ_MASK_ (0x00000070)
+#define DMAC_CFG_MAX_READ_REQ_SET_(val) \
+ ((((u32)(val)) << 4) & DMAC_CFG_MAX_READ_REQ_MASK_)
+#define DMAC_CFG_MAX_DSPACE_16_ (0x00000000)
+#define DMAC_CFG_MAX_DSPACE_32_ (0x00000001)
+#define DMAC_CFG_MAX_DSPACE_64_ BIT(1)
+#define DMAC_CFG_MAX_DSPACE_128_ (0x00000003)
+
+#define DMAC_COAL_CFG (0xC04)
+#define DMAC_COAL_CFG_TIMER_LIMIT_MASK_ (0xFFF00000)
+#define DMAC_COAL_CFG_TIMER_LIMIT_SET_(val) \
+ ((((u32)(val)) << 20) & DMAC_COAL_CFG_TIMER_LIMIT_MASK_)
+#define DMAC_COAL_CFG_TIMER_TX_START_ BIT(19)
+#define DMAC_COAL_CFG_FLUSH_INTS_ BIT(18)
+#define DMAC_COAL_CFG_INT_EXIT_COAL_ BIT(17)
+#define DMAC_COAL_CFG_CSR_EXIT_COAL_ BIT(16)
+#define DMAC_COAL_CFG_TX_THRES_MASK_ (0x0000FF00)
+#define DMAC_COAL_CFG_TX_THRES_SET_(val) \
+ ((((u32)(val)) << 8) & DMAC_COAL_CFG_TX_THRES_MASK_)
+#define DMAC_COAL_CFG_RX_THRES_MASK_ (0x000000FF)
+#define DMAC_COAL_CFG_RX_THRES_SET_(val) \
+ (((u32)(val)) & DMAC_COAL_CFG_RX_THRES_MASK_)
+
+#define DMAC_OBFF_CFG (0xC08)
+#define DMAC_OBFF_TX_THRES_MASK_ (0x0000FF00)
+#define DMAC_OBFF_TX_THRES_SET_(val) \
+ ((((u32)(val)) << 8) & DMAC_OBFF_TX_THRES_MASK_)
+#define DMAC_OBFF_RX_THRES_MASK_ (0x000000FF)
+#define DMAC_OBFF_RX_THRES_SET_(val) \
+ (((u32)(val)) & DMAC_OBFF_RX_THRES_MASK_)
+
+#define DMAC_CMD (0xC0C)
+#define DMAC_CMD_SWR_ BIT(31)
+#define DMAC_CMD_TX_SWR_(channel) BIT(24 + (channel))
+#define DMAC_CMD_START_T_(channel) BIT(20 + (channel))
+#define DMAC_CMD_STOP_T_(channel) BIT(16 + (channel))
+#define DMAC_CMD_RX_SWR_(channel) BIT(8 + (channel))
+#define DMAC_CMD_START_R_(channel) BIT(4 + (channel))
+#define DMAC_CMD_STOP_R_(channel) BIT(0 + (channel))
+
+#define DMAC_INT_STS (0xC10)
+#define DMAC_INT_EN_SET (0xC14)
+#define DMAC_INT_EN_CLR (0xC18)
+#define DMAC_INT_BIT_RXFRM_(channel) BIT(16 + (channel))
+#define DMAC_INT_BIT_TX_IOC_(channel) BIT(0 + (channel))
+
+#define RX_CFG_A(channel) (0xC40 + ((channel) << 6))
+#define RX_CFG_A_RX_WB_ON_INT_TMR_ BIT(30)
+#define RX_CFG_A_RX_WB_THRES_MASK_ (0x1F000000)
+#define RX_CFG_A_RX_WB_THRES_SET_(val) \
+ ((((u32)(val)) << 24) & RX_CFG_A_RX_WB_THRES_MASK_)
+#define RX_CFG_A_RX_PF_THRES_MASK_ (0x001F0000)
+#define RX_CFG_A_RX_PF_THRES_SET_(val) \
+ ((((u32)(val)) << 16) & RX_CFG_A_RX_PF_THRES_MASK_)
+#define RX_CFG_A_RX_PF_PRI_THRES_MASK_ (0x00001F00)
+#define RX_CFG_A_RX_PF_PRI_THRES_SET_(val) \
+ ((((u32)(val)) << 8) & RX_CFG_A_RX_PF_PRI_THRES_MASK_)
+#define RX_CFG_A_RX_HP_WB_EN_ BIT(5)
+
+#define RX_CFG_B(channel) (0xC44 + ((channel) << 6))
+#define RX_CFG_B_TS_ALL_RX_ BIT(29)
+#define RX_CFG_B_RX_PAD_MASK_ (0x03000000)
+#define RX_CFG_B_RX_PAD_0_ (0x00000000)
+#define RX_CFG_B_RX_PAD_2_ (0x02000000)
+#define RX_CFG_B_RDMABL_512_ (0x00040000)
+#define RX_CFG_B_RX_RING_LEN_MASK_ (0x0000FFFF)
+
+#define RX_BASE_ADDRH(channel) (0xC48 + ((channel) << 6))
+
+#define RX_BASE_ADDRL(channel) (0xC4C + ((channel) << 6))
+
+#define RX_HEAD_WRITEBACK_ADDRH(channel) (0xC50 + ((channel) << 6))
+
+#define RX_HEAD_WRITEBACK_ADDRL(channel) (0xC54 + ((channel) << 6))
+
+#define RX_HEAD(channel) (0xC58 + ((channel) << 6))
+
+#define RX_TAIL(channel) (0xC5C + ((channel) << 6))
+#define RX_TAIL_SET_TOP_INT_EN_ BIT(30)
+#define RX_TAIL_SET_TOP_INT_VEC_EN_ BIT(29)
+
+#define RX_CFG_C(channel) (0xC64 + ((channel) << 6))
+#define RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_ BIT(6)
+#define RX_CFG_C_RX_INT_EN_R2C_ BIT(4)
+#define RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_ BIT(3)
+#define RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_ (0x00000007)
+
+#define TX_CFG_A(channel) (0xD40 + ((channel) << 6))
+#define TX_CFG_A_TX_HP_WB_ON_INT_TMR_ BIT(30)
+#define TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ (0x10000000)
+#define TX_CFG_A_TX_PF_THRES_MASK_ (0x001F0000)
+#define TX_CFG_A_TX_PF_THRES_SET_(value) \
+ ((((u32)(value)) << 16) & TX_CFG_A_TX_PF_THRES_MASK_)
+#define TX_CFG_A_TX_PF_PRI_THRES_MASK_ (0x00001F00)
+#define TX_CFG_A_TX_PF_PRI_THRES_SET_(value) \
+ ((((u32)(value)) << 8) & TX_CFG_A_TX_PF_PRI_THRES_MASK_)
+#define TX_CFG_A_TX_HP_WB_EN_ BIT(5)
+#define TX_CFG_A_TX_HP_WB_THRES_MASK_ (0x0000000F)
+#define TX_CFG_A_TX_HP_WB_THRES_SET_(value) \
+ (((u32)(value)) & TX_CFG_A_TX_HP_WB_THRES_MASK_)
+
+#define TX_CFG_B(channel) (0xD44 + ((channel) << 6))
+#define TX_CFG_B_TDMABL_512_ (0x00040000)
+#define TX_CFG_B_TX_RING_LEN_MASK_ (0x0000FFFF)
+
+#define TX_BASE_ADDRH(channel) (0xD48 + ((channel) << 6))
+
+#define TX_BASE_ADDRL(channel) (0xD4C + ((channel) << 6))
+
+#define TX_HEAD_WRITEBACK_ADDRH(channel) (0xD50 + ((channel) << 6))
+
+#define TX_HEAD_WRITEBACK_ADDRL(channel) (0xD54 + ((channel) << 6))
+
+#define TX_HEAD(channel) (0xD58 + ((channel) << 6))
+
+#define TX_TAIL(channel) (0xD5C + ((channel) << 6))
+#define TX_TAIL_SET_DMAC_INT_EN_ BIT(31)
+#define TX_TAIL_SET_TOP_INT_EN_ BIT(30)
+#define TX_TAIL_SET_TOP_INT_VEC_EN_ BIT(29)
+
+#define TX_CFG_C(channel) (0xD64 + ((channel) << 6))
+#define TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_ BIT(6)
+#define TX_CFG_C_TX_DMA_INT_EN_AUTO_CLR_ BIT(5)
+#define TX_CFG_C_TX_INT_EN_R2C_ BIT(4)
+#define TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_ BIT(3)
+#define TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_ (0x00000007)
+
+#define OTP_PWR_DN (0x1000)
+#define OTP_PWR_DN_PWRDN_N_ BIT(0)
+
+#define OTP_ADDR_HIGH (0x1004)
+#define OTP_ADDR_LOW (0x1008)
+
+#define OTP_PRGM_DATA (0x1010)
+
+#define OTP_PRGM_MODE (0x1014)
+#define OTP_PRGM_MODE_BYTE_ BIT(0)
+
+#define OTP_READ_DATA (0x1018)
+
+#define OTP_FUNC_CMD (0x1020)
+#define OTP_FUNC_CMD_READ_ BIT(0)
+
+#define OTP_TST_CMD (0x1024)
+#define OTP_TST_CMD_PRGVRFY_ BIT(3)
+
+#define OTP_CMD_GO (0x1028)
+#define OTP_CMD_GO_GO_ BIT(0)
+
+#define OTP_STATUS (0x1030)
+#define OTP_STATUS_BUSY_ BIT(0)
+
+/* Hearthstone OTP block registers */
+#define HS_OTP_BLOCK_BASE (ETH_SYS_REG_ADDR_BASE + \
+ ETH_OTP_REG_ADDR_BASE)
+#define HS_OTP_PWR_DN (HS_OTP_BLOCK_BASE + 0x0)
+#define HS_OTP_ADDR_HIGH (HS_OTP_BLOCK_BASE + 0x4)
+#define HS_OTP_ADDR_LOW (HS_OTP_BLOCK_BASE + 0x8)
+#define HS_OTP_PRGM_DATA (HS_OTP_BLOCK_BASE + 0x10)
+#define HS_OTP_PRGM_MODE (HS_OTP_BLOCK_BASE + 0x14)
+#define HS_OTP_READ_DATA (HS_OTP_BLOCK_BASE + 0x18)
+#define HS_OTP_FUNC_CMD (HS_OTP_BLOCK_BASE + 0x20)
+#define HS_OTP_TST_CMD (HS_OTP_BLOCK_BASE + 0x24)
+#define HS_OTP_CMD_GO (HS_OTP_BLOCK_BASE + 0x28)
+#define HS_OTP_STATUS (HS_OTP_BLOCK_BASE + 0x30)
+
+/* MAC statistics registers */
+#define STAT_RX_FCS_ERRORS (0x1200)
+#define STAT_RX_ALIGNMENT_ERRORS (0x1204)
+#define STAT_RX_FRAGMENT_ERRORS (0x1208)
+#define STAT_RX_JABBER_ERRORS (0x120C)
+#define STAT_RX_UNDERSIZE_FRAME_ERRORS (0x1210)
+#define STAT_RX_OVERSIZE_FRAME_ERRORS (0x1214)
+#define STAT_RX_DROPPED_FRAMES (0x1218)
+#define STAT_RX_UNICAST_BYTE_COUNT (0x121C)
+#define STAT_RX_BROADCAST_BYTE_COUNT (0x1220)
+#define STAT_RX_MULTICAST_BYTE_COUNT (0x1224)
+#define STAT_RX_UNICAST_FRAMES (0x1228)
+#define STAT_RX_BROADCAST_FRAMES (0x122C)
+#define STAT_RX_MULTICAST_FRAMES (0x1230)
+#define STAT_RX_PAUSE_FRAMES (0x1234)
+#define STAT_RX_64_BYTE_FRAMES (0x1238)
+#define STAT_RX_65_127_BYTE_FRAMES (0x123C)
+#define STAT_RX_128_255_BYTE_FRAMES (0x1240)
+#define STAT_RX_256_511_BYTES_FRAMES (0x1244)
+#define STAT_RX_512_1023_BYTE_FRAMES (0x1248)
+#define STAT_RX_1024_1518_BYTE_FRAMES (0x124C)
+#define STAT_RX_GREATER_1518_BYTE_FRAMES (0x1250)
+#define STAT_RX_TOTAL_FRAMES (0x1254)
+#define STAT_EEE_RX_LPI_TRANSITIONS (0x1258)
+#define STAT_EEE_RX_LPI_TIME (0x125C)
+#define STAT_RX_COUNTER_ROLLOVER_STATUS (0x127C)
+
+#define STAT_TX_FCS_ERRORS (0x1280)
+#define STAT_TX_EXCESS_DEFERRAL_ERRORS (0x1284)
+#define STAT_TX_CARRIER_ERRORS (0x1288)
+#define STAT_TX_BAD_BYTE_COUNT (0x128C)
+#define STAT_TX_SINGLE_COLLISIONS (0x1290)
+#define STAT_TX_MULTIPLE_COLLISIONS (0x1294)
+#define STAT_TX_EXCESSIVE_COLLISION (0x1298)
+#define STAT_TX_LATE_COLLISIONS (0x129C)
+#define STAT_TX_UNICAST_BYTE_COUNT (0x12A0)
+#define STAT_TX_BROADCAST_BYTE_COUNT (0x12A4)
+#define STAT_TX_MULTICAST_BYTE_COUNT (0x12A8)
+#define STAT_TX_UNICAST_FRAMES (0x12AC)
+#define STAT_TX_BROADCAST_FRAMES (0x12B0)
+#define STAT_TX_MULTICAST_FRAMES (0x12B4)
+#define STAT_TX_PAUSE_FRAMES (0x12B8)
+#define STAT_TX_64_BYTE_FRAMES (0x12BC)
+#define STAT_TX_65_127_BYTE_FRAMES (0x12C0)
+#define STAT_TX_128_255_BYTE_FRAMES (0x12C4)
+#define STAT_TX_256_511_BYTES_FRAMES (0x12C8)
+#define STAT_TX_512_1023_BYTE_FRAMES (0x12CC)
+#define STAT_TX_1024_1518_BYTE_FRAMES (0x12D0)
+#define STAT_TX_GREATER_1518_BYTE_FRAMES (0x12D4)
+#define STAT_TX_TOTAL_FRAMES (0x12D8)
+#define STAT_EEE_TX_LPI_TRANSITIONS (0x12DC)
+#define STAT_EEE_TX_LPI_TIME (0x12E0)
+#define STAT_TX_COUNTER_ROLLOVER_STATUS (0x12FC)
+
+/* End of Register definitions */
+
+#define LAN743X_MAX_RX_CHANNELS (4)
+#define LAN743X_MAX_TX_CHANNELS (1)
+#define PCI11X1X_MAX_TX_CHANNELS (4)
+struct lan743x_adapter;
+
+#define LAN743X_USED_RX_CHANNELS (4)
+#define LAN743X_USED_TX_CHANNELS (1)
+#define PCI11X1X_USED_TX_CHANNELS (4)
+#define LAN743X_INT_MOD (400)
+
+#if (LAN743X_USED_RX_CHANNELS > LAN743X_MAX_RX_CHANNELS)
+#error Invalid LAN743X_USED_RX_CHANNELS
+#endif
+#if (LAN743X_USED_TX_CHANNELS > LAN743X_MAX_TX_CHANNELS)
+#error Invalid LAN743X_USED_TX_CHANNELS
+#endif
+#if (PCI11X1X_USED_TX_CHANNELS > PCI11X1X_MAX_TX_CHANNELS)
+#error Invalid PCI11X1X_USED_TX_CHANNELS
+#endif
+
+/* PCI */
+/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */
+#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR
+#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430)
+#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431)
+#define PCI_DEVICE_ID_SMSC_A011 (0xA011)
+#define PCI_DEVICE_ID_SMSC_A041 (0xA041)
+
+#define PCI_CONFIG_LENGTH (0x1000)
+
+/* CSR */
+#define CSR_LENGTH (0x2000)
+
+#define LAN743X_CSR_FLAG_IS_A0 BIT(0)
+#define LAN743X_CSR_FLAG_IS_B0 BIT(1)
+#define LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR BIT(8)
+
+struct lan743x_csr {
+ u32 flags;
+ u8 __iomem *csr_address;
+ u32 id_rev;
+ u32 fpga_rev;
+};
+
+/* INTERRUPTS */
+typedef void(*lan743x_vector_handler)(void *context, u32 int_sts, u32 flags);
+
+#define LAN743X_VECTOR_FLAG_IRQ_SHARED BIT(0)
+#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ BIT(1)
+#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C BIT(2)
+#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C BIT(3)
+#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK BIT(4)
+#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR BIT(5)
+#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C BIT(6)
+#define LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR BIT(7)
+#define LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET BIT(8)
+#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR BIT(9)
+#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET BIT(10)
+#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR BIT(11)
+#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET BIT(12)
+#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR BIT(13)
+#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET BIT(14)
+#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR BIT(15)
+
+struct lan743x_vector {
+ int irq;
+ u32 flags;
+ struct lan743x_adapter *adapter;
+ int vector_index;
+ u32 int_mask;
+ lan743x_vector_handler handler;
+ void *context;
+};
+
+#define LAN743X_MAX_VECTOR_COUNT (8)
+#define PCI11X1X_MAX_VECTOR_COUNT (16)
+
+struct lan743x_intr {
+ int flags;
+
+ unsigned int irq;
+
+ struct lan743x_vector vector_list[PCI11X1X_MAX_VECTOR_COUNT];
+ int number_of_vectors;
+ bool using_vectors;
+
+ bool software_isr_flag;
+ wait_queue_head_t software_isr_wq;
+};
+
+#define LAN743X_MAX_FRAME_SIZE (9 * 1024)
+
+/* PHY */
+struct lan743x_phy {
+ bool fc_autoneg;
+ u8 fc_request_control;
+};
+
+/* TX */
+struct lan743x_tx_descriptor;
+struct lan743x_tx_buffer_info;
+
+#define GPIO_QUEUE_STARTED (0)
+#define GPIO_TX_FUNCTION (1)
+#define GPIO_TX_COMPLETION (2)
+#define GPIO_TX_FRAGMENT (3)
+
+#define TX_FRAME_FLAG_IN_PROGRESS BIT(0)
+
+#define TX_TS_FLAG_TIMESTAMPING_ENABLED BIT(0)
+#define TX_TS_FLAG_ONE_STEP_SYNC BIT(1)
+
+struct lan743x_tx {
+ struct lan743x_adapter *adapter;
+ u32 ts_flags;
+ u32 vector_flags;
+ int channel_number;
+
+ int ring_size;
+ size_t ring_allocation_size;
+ struct lan743x_tx_descriptor *ring_cpu_ptr;
+ dma_addr_t ring_dma_ptr;
+ /* ring_lock: used to prevent concurrent access to tx ring */
+ spinlock_t ring_lock;
+ u32 frame_flags;
+ u32 frame_first;
+ u32 frame_data0;
+ u32 frame_tail;
+
+ struct lan743x_tx_buffer_info *buffer_info;
+
+ __le32 *head_cpu_ptr;
+ dma_addr_t head_dma_ptr;
+ int last_head;
+ int last_tail;
+
+ struct napi_struct napi;
+ u32 frame_count;
+ u32 rqd_descriptors;
+};
+
+void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
+ bool enable_timestamping,
+ bool enable_onestep_sync);
+
+/* RX */
+struct lan743x_rx_descriptor;
+struct lan743x_rx_buffer_info;
+
+struct lan743x_rx {
+ struct lan743x_adapter *adapter;
+ u32 vector_flags;
+ int channel_number;
+
+ int ring_size;
+ size_t ring_allocation_size;
+ struct lan743x_rx_descriptor *ring_cpu_ptr;
+ dma_addr_t ring_dma_ptr;
+
+ struct lan743x_rx_buffer_info *buffer_info;
+
+ __le32 *head_cpu_ptr;
+ dma_addr_t head_dma_ptr;
+ u32 last_head;
+ u32 last_tail;
+
+ struct napi_struct napi;
+
+ u32 frame_count;
+
+ struct sk_buff *skb_head, *skb_tail;
+};
+
+/* SGMII Link Speed Duplex status */
+enum lan743x_sgmii_lsd {
+ POWER_DOWN = 0,
+ LINK_DOWN,
+ ANEG_BUSY,
+ LINK_10HD,
+ LINK_10FD,
+ LINK_100HD,
+ LINK_100FD,
+ LINK_1000_MASTER,
+ LINK_1000_SLAVE,
+ LINK_2500_MASTER,
+ LINK_2500_SLAVE
+};
+
+struct lan743x_adapter {
+ struct net_device *netdev;
+ struct mii_bus *mdiobus;
+ int msg_enable;
+#ifdef CONFIG_PM
+ u32 wolopts;
+ u8 sopass[SOPASS_MAX];
+#endif
+ struct pci_dev *pdev;
+ struct lan743x_csr csr;
+ struct lan743x_intr intr;
+
+ struct lan743x_gpio gpio;
+ struct lan743x_ptp ptp;
+
+ u8 mac_address[ETH_ALEN];
+
+ struct lan743x_phy phy;
+ struct lan743x_tx tx[PCI11X1X_USED_TX_CHANNELS];
+ struct lan743x_rx rx[LAN743X_USED_RX_CHANNELS];
+ bool is_pci11x1x;
+ bool is_sgmii_en;
+ /* protect ethernet syslock */
+ spinlock_t eth_syslock_spinlock;
+ bool eth_syslock_en;
+ u32 eth_syslock_acquire_cnt;
+ struct mutex sgmii_rw_lock;
+ /* SGMII Link Speed & Duplex status */
+ enum lan743x_sgmii_lsd sgmii_lsd;
+ u8 max_tx_channels;
+ u8 used_tx_channels;
+ u8 max_vector_count;
+
+#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
+ u32 flags;
+ u32 hw_cfg;
+ phy_interface_t phy_interface;
+};
+
+#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
+
+#define INTR_FLAG_IRQ_REQUESTED(vector_index) BIT(0 + vector_index)
+#define INTR_FLAG_MSI_ENABLED BIT(8)
+#define INTR_FLAG_MSIX_ENABLED BIT(9)
+
+#define MAC_MII_READ 1
+#define MAC_MII_WRITE 0
+
+#define PHY_FLAG_OPENED BIT(0)
+#define PHY_FLAG_ATTACHED BIT(1)
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define DMA_ADDR_HIGH32(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
+#else
+#define DMA_ADDR_HIGH32(dma_addr) ((u32)(0))
+#endif
+#define DMA_ADDR_LOW32(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
+#define DMA_DESCRIPTOR_SPACING_16 (16)
+#define DMA_DESCRIPTOR_SPACING_32 (32)
+#define DMA_DESCRIPTOR_SPACING_64 (64)
+#define DMA_DESCRIPTOR_SPACING_128 (128)
+#define DEFAULT_DMA_DESCRIPTOR_SPACING (L1_CACHE_BYTES)
+
+#define DMAC_CHANNEL_STATE_SET(start_bit, stop_bit) \
+ (((start_bit) ? 2 : 0) | ((stop_bit) ? 1 : 0))
+#define DMAC_CHANNEL_STATE_INITIAL DMAC_CHANNEL_STATE_SET(0, 0)
+#define DMAC_CHANNEL_STATE_STARTED DMAC_CHANNEL_STATE_SET(1, 0)
+#define DMAC_CHANNEL_STATE_STOP_PENDING DMAC_CHANNEL_STATE_SET(1, 1)
+#define DMAC_CHANNEL_STATE_STOPPED DMAC_CHANNEL_STATE_SET(0, 1)
+
+/* TX Descriptor bits */
+#define TX_DESC_DATA0_DTYPE_MASK_ (0xC0000000)
+#define TX_DESC_DATA0_DTYPE_DATA_ (0x00000000)
+#define TX_DESC_DATA0_DTYPE_EXT_ (0x40000000)
+#define TX_DESC_DATA0_FS_ (0x20000000)
+#define TX_DESC_DATA0_LS_ (0x10000000)
+#define TX_DESC_DATA0_EXT_ (0x08000000)
+#define TX_DESC_DATA0_IOC_ (0x04000000)
+#define TX_DESC_DATA0_ICE_ (0x00400000)
+#define TX_DESC_DATA0_IPE_ (0x00200000)
+#define TX_DESC_DATA0_TPE_ (0x00100000)
+#define TX_DESC_DATA0_FCS_ (0x00020000)
+#define TX_DESC_DATA0_TSE_ (0x00010000)
+#define TX_DESC_DATA0_BUF_LENGTH_MASK_ (0x0000FFFF)
+#define TX_DESC_DATA0_EXT_LSO_ (0x00200000)
+#define TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_ (0x000FFFFF)
+#define TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_ (0x3FFF0000)
+
+struct lan743x_tx_descriptor {
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
+} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
+
+#define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
+#define TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED BIT(1)
+#define TX_BUFFER_INFO_FLAG_IGNORE_SYNC BIT(2)
+#define TX_BUFFER_INFO_FLAG_SKB_FRAGMENT BIT(3)
+struct lan743x_tx_buffer_info {
+ int flags;
+ struct sk_buff *skb;
+ dma_addr_t dma_ptr;
+ unsigned int buffer_length;
+};
+
+#define LAN743X_TX_RING_SIZE (128)
+
+/* OWN bit is set. ie, Descs are owned by RX DMAC */
+#define RX_DESC_DATA0_OWN_ (0x00008000)
+/* OWN bit is clear. ie, Descs are owned by host */
+#define RX_DESC_DATA0_FS_ (0x80000000)
+#define RX_DESC_DATA0_LS_ (0x40000000)
+#define RX_DESC_DATA0_FRAME_LENGTH_MASK_ (0x3FFF0000)
+#define RX_DESC_DATA0_FRAME_LENGTH_GET_(data0) \
+ (((data0) & RX_DESC_DATA0_FRAME_LENGTH_MASK_) >> 16)
+#define RX_DESC_DATA0_EXT_ (0x00004000)
+#define RX_DESC_DATA0_BUF_LENGTH_MASK_ (0x00003FFF)
+#define RX_DESC_DATA1_STATUS_ICE_ (0x00020000)
+#define RX_DESC_DATA1_STATUS_TCE_ (0x00010000)
+#define RX_DESC_DATA1_STATUS_ICSM_ (0x00000001)
+#define RX_DESC_DATA2_TS_NS_MASK_ (0x3FFFFFFF)
+
+#if ((NET_IP_ALIGN != 0) && (NET_IP_ALIGN != 2))
+#error NET_IP_ALIGN must be 0 or 2
+#endif
+
+#define RX_HEAD_PADDING NET_IP_ALIGN
+
+struct lan743x_rx_descriptor {
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
+} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
+
+#define RX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
+struct lan743x_rx_buffer_info {
+ int flags;
+ struct sk_buff *skb;
+
+ dma_addr_t dma_ptr;
+ unsigned int buffer_length;
+};
+
+#define LAN743X_RX_RING_SIZE (128)
+
+#define RX_PROCESS_RESULT_NOTHING_TO_DO (0)
+#define RX_PROCESS_RESULT_BUFFER_RECEIVED (1)
+
+u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset);
+void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data);
+int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter, u16 timeout);
+void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
+void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
+ bool tx_enable, bool rx_enable);
+int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr);
+
+#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
new file mode 100644
index 0000000000..39e1066ecd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -0,0 +1,1792 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#include <linux/netdevice.h>
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/net_tstamp.h>
+#include "lan743x_main.h"
+
+#include "lan743x_ptp.h"
+
+#define LAN743X_LED0_ENABLE 20 /* LED0 offset in HW_CFG */
+#define LAN743X_LED_ENABLE(pin) BIT(LAN743X_LED0_ENABLE + (pin))
+
+#define LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB (31249999)
+#define LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM (2047999934)
+
+static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter);
+static void lan743x_ptp_enable(struct lan743x_adapter *adapter);
+static void lan743x_ptp_disable(struct lan743x_adapter *adapter);
+static void lan743x_ptp_reset(struct lan743x_adapter *adapter);
+static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 sub_nano_seconds);
+
+static int lan743x_get_channel(u32 ch_map)
+{
+ int idx;
+
+ for (idx = 0; idx < 32; idx++) {
+ if (ch_map & (0x1 << idx))
+ return idx;
+ }
+
+ return -EINVAL;
+}
+
+int lan743x_gpio_init(struct lan743x_adapter *adapter)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+
+ spin_lock_init(&gpio->gpio_lock);
+
+ gpio->gpio_cfg0 = 0; /* set all direction to input, data = 0 */
+ gpio->gpio_cfg1 = 0x0FFF0000;/* disable all gpio, set to open drain */
+ gpio->gpio_cfg2 = 0;/* set all to 1588 low polarity level */
+ gpio->gpio_cfg3 = 0;/* disable all 1588 output */
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+ lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
+ lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
+
+ return 0;
+}
+
+static void lan743x_ptp_wait_till_cmd_done(struct lan743x_adapter *adapter,
+ u32 bit_mask)
+{
+ int timeout = 1000;
+ u32 data = 0;
+
+ while (timeout &&
+ (data = (lan743x_csr_read(adapter, PTP_CMD_CTL) &
+ bit_mask))) {
+ usleep_range(1000, 20000);
+ timeout--;
+ }
+ if (data) {
+ netif_err(adapter, drv, adapter->netdev,
+ "timeout waiting for cmd to be done, cmd = 0x%08X\n",
+ bit_mask);
+ }
+}
+
+static void lan743x_ptp_tx_ts_enqueue_ts(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 header)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->tx_ts_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ ptp->tx_ts_seconds_queue[ptp->tx_ts_queue_size] = seconds;
+ ptp->tx_ts_nseconds_queue[ptp->tx_ts_queue_size] = nano_seconds;
+ ptp->tx_ts_header_queue[ptp->tx_ts_queue_size] = header;
+ ptp->tx_ts_queue_size++;
+ } else {
+ netif_err(adapter, drv, adapter->netdev,
+ "tx ts queue overflow\n");
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static void lan743x_ptp_tx_ts_complete(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct skb_shared_hwtstamps tstamps;
+ u32 header, nseconds, seconds;
+ bool ignore_sync = false;
+ struct sk_buff *skb;
+ int c, i;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ c = ptp->tx_ts_skb_queue_size;
+
+ if (c > ptp->tx_ts_queue_size)
+ c = ptp->tx_ts_queue_size;
+ if (c <= 0)
+ goto done;
+
+ for (i = 0; i < c; i++) {
+ ignore_sync = ((ptp->tx_ts_ignore_sync_queue &
+ BIT(i)) != 0);
+ skb = ptp->tx_ts_skb_queue[i];
+ nseconds = ptp->tx_ts_nseconds_queue[i];
+ seconds = ptp->tx_ts_seconds_queue[i];
+ header = ptp->tx_ts_header_queue[i];
+
+ memset(&tstamps, 0, sizeof(tstamps));
+ tstamps.hwtstamp = ktime_set(seconds, nseconds);
+ if (!ignore_sync ||
+ ((header & PTP_TX_MSG_HEADER_MSG_TYPE_) !=
+ PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_))
+ skb_tstamp_tx(skb, &tstamps);
+
+ dev_kfree_skb(skb);
+
+ ptp->tx_ts_skb_queue[i] = NULL;
+ ptp->tx_ts_seconds_queue[i] = 0;
+ ptp->tx_ts_nseconds_queue[i] = 0;
+ ptp->tx_ts_header_queue[i] = 0;
+ }
+
+ /* shift queue */
+ ptp->tx_ts_ignore_sync_queue >>= c;
+ for (i = c; i < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS; i++) {
+ ptp->tx_ts_skb_queue[i - c] = ptp->tx_ts_skb_queue[i];
+ ptp->tx_ts_seconds_queue[i - c] = ptp->tx_ts_seconds_queue[i];
+ ptp->tx_ts_nseconds_queue[i - c] = ptp->tx_ts_nseconds_queue[i];
+ ptp->tx_ts_header_queue[i - c] = ptp->tx_ts_header_queue[i];
+
+ ptp->tx_ts_skb_queue[i] = NULL;
+ ptp->tx_ts_seconds_queue[i] = 0;
+ ptp->tx_ts_nseconds_queue[i] = 0;
+ ptp->tx_ts_header_queue[i] = 0;
+ }
+ ptp->tx_ts_skb_queue_size -= c;
+ ptp->tx_ts_queue_size -= c;
+done:
+ ptp->pending_tx_timestamps -= c;
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter,
+ int event_channel)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int result = -ENODEV;
+
+ mutex_lock(&ptp->command_lock);
+ if (!(test_bit(event_channel, &ptp->used_event_ch))) {
+ ptp->used_event_ch |= BIT(event_channel);
+ result = event_channel;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "attempted to reserved a used event_channel = %d\n",
+ event_channel);
+ }
+ mutex_unlock(&ptp->command_lock);
+ return result;
+}
+
+static void lan743x_ptp_release_event_ch(struct lan743x_adapter *adapter,
+ int event_channel)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ if (test_bit(event_channel, &ptp->used_event_ch)) {
+ ptp->used_event_ch &= ~BIT(event_channel);
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "attempted release on a not used event_channel = %d\n",
+ event_channel);
+ }
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
+ u32 *seconds, u32 *nano_seconds,
+ u32 *sub_nano_seconds);
+static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter,
+ u32 *sec, u32 *nsec, u32 *sub_nsec);
+static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
+ s64 time_step_ns);
+
+static void lan743x_led_mux_enable(struct lan743x_adapter *adapter,
+ int pin, bool enable)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ if (ptp->leds_multiplexed &&
+ ptp->led_enabled[pin]) {
+ u32 val = lan743x_csr_read(adapter, HW_CFG);
+
+ if (enable)
+ val |= LAN743X_LED_ENABLE(pin);
+ else
+ val &= ~LAN743X_LED_ENABLE(pin);
+
+ lan743x_csr_write(adapter, HW_CFG, val);
+ }
+}
+
+static void lan743x_led_mux_save(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (id_rev == ID_REV_ID_LAN7430_) {
+ int i;
+ u32 val = lan743x_csr_read(adapter, HW_CFG);
+
+ for (i = 0; i < LAN7430_N_LED; i++) {
+ bool led_enabled = (val & LAN743X_LED_ENABLE(i)) != 0;
+
+ ptp->led_enabled[i] = led_enabled;
+ }
+ ptp->leds_multiplexed = true;
+ } else {
+ ptp->leds_multiplexed = false;
+ }
+}
+
+static void lan743x_led_mux_restore(struct lan743x_adapter *adapter)
+{
+ u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (id_rev == ID_REV_ID_LAN7430_) {
+ int i;
+
+ for (i = 0; i < LAN7430_N_LED; i++)
+ lan743x_led_mux_enable(adapter, i, true);
+ }
+}
+
+static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter,
+ int pin, int event_channel)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+ unsigned long irq_flags = 0;
+ int bit_mask = BIT(pin);
+ int ret = -EBUSY;
+
+ spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
+
+ if (!(gpio->used_bits & bit_mask)) {
+ gpio->used_bits |= bit_mask;
+ gpio->output_bits |= bit_mask;
+ gpio->ptp_bits |= bit_mask;
+
+ /* assign pin to GPIO function */
+ lan743x_led_mux_enable(adapter, pin, false);
+
+ /* set as output, and zero initial value */
+ gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(pin);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin);
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+
+ /* enable gpio, and set buffer type to push pull */
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(pin);
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(pin);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+
+ /* set 1588 polarity to high */
+ gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(pin);
+ lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
+
+ if (event_channel == 0) {
+ /* use channel A */
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(pin);
+ } else {
+ /* use channel B */
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(pin);
+ }
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(pin);
+ lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
+
+ ret = pin;
+ }
+ spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
+ return ret;
+}
+
+static void lan743x_gpio_release(struct lan743x_adapter *adapter, int pin)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+ unsigned long irq_flags = 0;
+ int bit_mask = BIT(pin);
+
+ spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
+ if (gpio->used_bits & bit_mask) {
+ gpio->used_bits &= ~bit_mask;
+ if (gpio->output_bits & bit_mask) {
+ gpio->output_bits &= ~bit_mask;
+
+ if (gpio->ptp_bits & bit_mask) {
+ gpio->ptp_bits &= ~bit_mask;
+ /* disable ptp output */
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(pin);
+ lan743x_csr_write(adapter, GPIO_CFG3,
+ gpio->gpio_cfg3);
+ }
+ /* release gpio output */
+
+ /* disable gpio */
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(pin);
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(pin);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+
+ /* reset back to input */
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(pin);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin);
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+
+ /* assign pin to original function */
+ lan743x_led_mux_enable(adapter, pin, true);
+ }
+ }
+ spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
+}
+
+static int lan743x_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 lan743x_rate_adj = 0;
+ u64 u64_delta;
+
+ if ((scaled_ppm < (-LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM)) ||
+ scaled_ppm > LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM) {
+ return -EINVAL;
+ }
+
+ /* diff_by_scaled_ppm returns true if the difference is negative */
+ if (diff_by_scaled_ppm(1ULL << 35, scaled_ppm, &u64_delta))
+ lan743x_rate_adj = (u32)u64_delta;
+ else
+ lan743x_rate_adj = (u32)u64_delta | PTP_CLOCK_RATE_ADJ_DIR_;
+
+ lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ,
+ lan743x_rate_adj);
+
+ return 0;
+}
+
+static int lan743x_ptpci_adjtime(struct ptp_clock_info *ptpci, s64 delta)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+
+ lan743x_ptp_clock_step(adapter, delta);
+
+ return 0;
+}
+
+static int lan743x_ptpci_gettime64(struct ptp_clock_info *ptpci,
+ struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 nano_seconds = 0;
+ u32 seconds = 0;
+
+ if (adapter->is_pci11x1x)
+ lan743x_ptp_io_clock_get(adapter, &seconds, &nano_seconds,
+ NULL);
+ else
+ lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nano_seconds;
+
+ return 0;
+}
+
+static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
+ const struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 nano_seconds = 0;
+ u32 seconds = 0;
+
+ if (ts) {
+ if (ts->tv_sec > 0xFFFFFFFFLL ||
+ ts->tv_sec < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_sec out of range, %lld\n",
+ ts->tv_sec);
+ return -ERANGE;
+ }
+ if (ts->tv_nsec >= 1000000000L ||
+ ts->tv_nsec < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_nsec out of range, %ld\n",
+ ts->tv_nsec);
+ return -ERANGE;
+ }
+ seconds = ts->tv_sec;
+ nano_seconds = ts->tv_nsec;
+ lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
+ } else {
+ netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter,
+ unsigned int index)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 general_config = 0;
+ struct lan743x_ptp_perout *perout = &ptp->perout[index];
+
+ if (perout->gpio_pin >= 0) {
+ lan743x_gpio_release(adapter, perout->gpio_pin);
+ perout->gpio_pin = -1;
+ }
+
+ if (perout->event_ch >= 0) {
+ /* set target to far in the future, effectively disabling it */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch),
+ 0);
+
+ general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
+ general_config |= PTP_GENERAL_CONFIG_RELOAD_ADD_X_
+ (perout->event_ch);
+ lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
+ lan743x_ptp_release_event_ch(adapter, perout->event_ch);
+ perout->event_ch = -1;
+ }
+}
+
+static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
+ struct ptp_perout_request *perout_request)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 period_sec = 0, period_nsec = 0;
+ u32 start_sec = 0, start_nsec = 0;
+ u32 general_config = 0;
+ int pulse_width = 0;
+ int perout_pin = 0;
+ unsigned int index = perout_request->index;
+ struct lan743x_ptp_perout *perout = &ptp->perout[index];
+ int ret = 0;
+
+ /* Reject requests with unsupported flags */
+ if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE)
+ return -EOPNOTSUPP;
+
+ if (on) {
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
+ perout_request->index);
+ if (perout_pin < 0)
+ return -EBUSY;
+ } else {
+ lan743x_ptp_perout_off(adapter, index);
+ return 0;
+ }
+
+ if (perout->event_ch >= 0 ||
+ perout->gpio_pin >= 0) {
+ /* already on, turn off first */
+ lan743x_ptp_perout_off(adapter, index);
+ }
+
+ perout->event_ch = lan743x_ptp_reserve_event_ch(adapter, index);
+
+ if (perout->event_ch < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve event channel %d for PEROUT\n",
+ index);
+ ret = -EBUSY;
+ goto failed;
+ }
+
+ perout->gpio_pin = lan743x_gpio_rsrv_ptp_out(adapter,
+ perout_pin,
+ perout->event_ch);
+
+ if (perout->gpio_pin < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve gpio %d for PEROUT\n",
+ perout_pin);
+ ret = -EBUSY;
+ goto failed;
+ }
+
+ start_sec = perout_request->start.sec;
+ start_sec += perout_request->start.nsec / 1000000000;
+ start_nsec = perout_request->start.nsec % 1000000000;
+
+ period_sec = perout_request->period.sec;
+ period_sec += perout_request->period.nsec / 1000000000;
+ period_nsec = perout_request->period.nsec % 1000000000;
+
+ if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on, ts_period;
+ s64 wf_high, period64, half;
+ s32 reminder;
+
+ ts_on.tv_sec = perout_request->on.sec;
+ ts_on.tv_nsec = perout_request->on.nsec;
+ wf_high = timespec64_to_ns(&ts_on);
+ ts_period.tv_sec = perout_request->period.sec;
+ ts_period.tv_nsec = perout_request->period.nsec;
+ period64 = timespec64_to_ns(&ts_period);
+
+ if (period64 < 200) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ if (wf_high >= period64) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "pulse width must be smaller than period\n");
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* Check if we can do 50% toggle on an even value of period.
+ * If the period number is odd, then check if the requested
+ * pulse width is the same as one of pre-defined width values.
+ * Otherwise, return failure.
+ */
+ half = div_s64_rem(period64, 2, &reminder);
+ if (!reminder) {
+ if (half == wf_high) {
+ /* It's 50% match. Use the toggle option */
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_;
+ /* In this case, devide period value by 2 */
+ ts_period = ns_to_timespec64(div_s64(period64, 2));
+ period_sec = ts_period.tv_sec;
+ period_nsec = ts_period.tv_nsec;
+
+ goto program;
+ }
+ }
+ /* if we can't do toggle, then the width option needs to be the exact match */
+ if (wf_high == 200000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (wf_high == 10000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (wf_high == 1000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (wf_high == 100000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (wf_high == 10000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (wf_high == 100) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "duty cycle specified is not supported\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ } else {
+ if (period_sec == 0) {
+ if (period_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (period_nsec >= 20000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (period_nsec >= 2000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (period_nsec >= 200000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (period_nsec >= 20000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (period_nsec >= 200) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ } else {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
+ }
+program:
+
+ /* turn off by setting target far in future */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch), 0);
+
+ /* Configure to pulse every period */
+ general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
+ general_config &= ~(PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
+ (perout->event_ch));
+ general_config |= PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
+ (perout->event_ch, pulse_width);
+ general_config &= ~PTP_GENERAL_CONFIG_RELOAD_ADD_X_
+ (perout->event_ch);
+ lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
+
+ /* set the reload to one toggle cycle */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(perout->event_ch),
+ period_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_NS_X(perout->event_ch),
+ period_nsec);
+
+ /* set the start time */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
+ start_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch),
+ start_nsec);
+
+ return 0;
+
+failed:
+ lan743x_ptp_perout_off(adapter, index);
+ return ret;
+}
+
+static void lan743x_ptp_io_perout_off(struct lan743x_adapter *adapter,
+ u32 index)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int perout_pin;
+ int event_ch;
+ u32 gen_cfg;
+ int val;
+
+ event_ch = ptp->ptp_io_perout[index];
+ if (event_ch >= 0) {
+ /* set target to far in the future, effectively disabling it */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch),
+ 0);
+
+ gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
+ (event_ch));
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch));
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch);
+ lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg);
+ if (event_ch)
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_TIMER_INT_B_);
+ else
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_TIMER_INT_A_);
+ lan743x_ptp_release_event_ch(adapter, event_ch);
+ ptp->ptp_io_perout[index] = -1;
+ }
+
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index);
+
+ /* Deselect Event output */
+ val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG);
+
+ /* Disables the output of Local Time Target compare events */
+ val &= ~PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val);
+
+ /* Configured as an opendrain driver*/
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+ val &= ~PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val);
+ /* Dummy read to make sure write operation success */
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+}
+
+static int lan743x_ptp_io_perout(struct lan743x_adapter *adapter, int on,
+ struct ptp_perout_request *perout_request)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 period_sec, period_nsec;
+ u32 start_sec, start_nsec;
+ u32 pulse_sec, pulse_nsec;
+ int pulse_width;
+ int perout_pin;
+ int event_ch;
+ u32 gen_cfg;
+ u32 index;
+ int val;
+
+ index = perout_request->index;
+ event_ch = ptp->ptp_io_perout[index];
+
+ if (on) {
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index);
+ if (perout_pin < 0)
+ return -EBUSY;
+ } else {
+ lan743x_ptp_io_perout_off(adapter, index);
+ return 0;
+ }
+
+ if (event_ch >= LAN743X_PTP_N_EVENT_CHAN) {
+ /* already on, turn off first */
+ lan743x_ptp_io_perout_off(adapter, index);
+ }
+
+ event_ch = lan743x_ptp_reserve_event_ch(adapter, index);
+ if (event_ch < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve event channel %d for PEROUT\n",
+ index);
+ goto failed;
+ }
+ ptp->ptp_io_perout[index] = event_ch;
+
+ if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ pulse_sec = perout_request->on.sec;
+ pulse_sec += perout_request->on.nsec / 1000000000;
+ pulse_nsec = perout_request->on.nsec % 1000000000;
+ } else {
+ pulse_sec = perout_request->period.sec;
+ pulse_sec += perout_request->period.nsec / 1000000000;
+ pulse_nsec = perout_request->period.nsec % 1000000000;
+ }
+
+ if (pulse_sec == 0) {
+ if (pulse_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (pulse_nsec >= 200000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_;
+ } else if (pulse_nsec >= 100000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_;
+ } else if (pulse_nsec >= 20000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (pulse_nsec >= 10000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_;
+ } else if (pulse_nsec >= 2000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (pulse_nsec >= 1000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_;
+ } else if (pulse_nsec >= 200000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (pulse_nsec >= 100000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_;
+ } else if (pulse_nsec >= 20000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (pulse_nsec >= 10000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_;
+ } else if (pulse_nsec >= 2000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_;
+ } else if (pulse_nsec >= 1000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_;
+ } else if (pulse_nsec >= 200) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, min is 200nS\n");
+ goto failed;
+ }
+ } else {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
+
+ /* turn off by setting target far in future */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch), 0);
+
+ /* Configure to pulse every period */
+ gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(event_ch));
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
+ (event_ch, pulse_width);
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch));
+ lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg);
+
+ /* set the reload to one toggle cycle */
+ period_sec = perout_request->period.sec;
+ period_sec += perout_request->period.nsec / 1000000000;
+ period_nsec = perout_request->period.nsec % 1000000000;
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(event_ch),
+ period_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_NS_X(event_ch),
+ period_nsec);
+
+ start_sec = perout_request->start.sec;
+ start_sec += perout_request->start.nsec / 1000000000;
+ start_nsec = perout_request->start.nsec % 1000000000;
+
+ /* set the start time */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ start_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch),
+ start_nsec);
+
+ /* Enable LTC Target Read */
+ val = lan743x_csr_read(adapter, PTP_CMD_CTL);
+ val |= PTP_CMD_CTL_PTP_LTC_TARGET_READ_;
+ lan743x_csr_write(adapter, PTP_CMD_CTL, val);
+
+ /* Configure as an push/pull driver */
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+ val |= PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val);
+
+ /* Select Event output */
+ val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG);
+ if (event_ch)
+ /* Channel B as the output */
+ val |= PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin);
+ else
+ /* Channel A as the output */
+ val &= ~PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin);
+
+ /* Enables the output of Local Time Target compare events */
+ val |= PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val);
+
+ return 0;
+
+failed:
+ lan743x_ptp_io_perout_off(adapter, index);
+ return -ENODEV;
+}
+
+static void lan743x_ptp_io_extts_off(struct lan743x_adapter *adapter,
+ u32 index)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct lan743x_extts *extts;
+ int val;
+
+ extts = &ptp->extts[index];
+ /* PTP Interrupt Enable Clear Register */
+ if (extts->flags & PTP_FALLING_EDGE)
+ val = PTP_INT_EN_FE_EN_CLR_(index);
+ else
+ val = PTP_INT_EN_RE_EN_CLR_(index);
+ lan743x_csr_write(adapter, PTP_INT_EN_CLR, val);
+
+ /* Disables PTP-IO edge lock */
+ val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG);
+ if (extts->flags & PTP_FALLING_EDGE) {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(index);
+ val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(index);
+ } else {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(index);
+ val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(index);
+ }
+ lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val);
+
+ /* PTP-IO De-select register */
+ val = lan743x_csr_read(adapter, PTP_IO_SEL);
+ val &= ~PTP_IO_SEL_MASK_;
+ lan743x_csr_write(adapter, PTP_IO_SEL, val);
+
+ /* Clear timestamp */
+ memset(&extts->ts, 0, sizeof(struct timespec64));
+ extts->flags = 0;
+}
+
+static int lan743x_ptp_io_event_cap_en(struct lan743x_adapter *adapter,
+ u32 flags, u32 channel)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int val;
+
+ if ((flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptp->command_lock);
+ /* PTP-IO Event Capture Enable */
+ val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG);
+ if (flags & PTP_FALLING_EDGE) {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(channel);
+ val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel);
+ val |= PTP_IO_CAP_CONFIG_LOCK_FE_(channel);
+ val |= PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel);
+ } else {
+ /* Rising eventing as Default */
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(channel);
+ val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel);
+ val |= PTP_IO_CAP_CONFIG_LOCK_RE_(channel);
+ val |= PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel);
+ }
+ lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val);
+
+ /* PTP-IO Select */
+ val = lan743x_csr_read(adapter, PTP_IO_SEL);
+ val &= ~PTP_IO_SEL_MASK_;
+ val |= channel << PTP_IO_SEL_SHIFT_;
+ lan743x_csr_write(adapter, PTP_IO_SEL, val);
+
+ /* PTP Interrupt Enable Register */
+ if (flags & PTP_FALLING_EDGE)
+ val = PTP_INT_EN_FE_EN_SET_(channel);
+ else
+ val = PTP_INT_EN_RE_EN_SET_(channel);
+ lan743x_csr_write(adapter, PTP_INT_EN_SET, val);
+
+ mutex_unlock(&ptp->command_lock);
+
+ return 0;
+}
+
+static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
+ struct ptp_extts_request *extts_request)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 flags = extts_request->flags;
+ u32 index = extts_request->index;
+ struct lan743x_extts *extts;
+ int extts_pin;
+ int ret = 0;
+
+ extts = &ptp->extts[index];
+
+ if (on) {
+ extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index);
+ if (extts_pin < 0)
+ return -EBUSY;
+
+ ret = lan743x_ptp_io_event_cap_en(adapter, flags, index);
+ if (!ret)
+ extts->flags = flags;
+ } else {
+ lan743x_ptp_io_extts_off(adapter, index);
+ }
+
+ return ret;
+}
+
+static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *request, int on)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+
+ if (request) {
+ switch (request->type) {
+ case PTP_CLK_REQ_EXTTS:
+ if (request->extts.index < ptpci->n_ext_ts)
+ return lan743x_ptp_io_extts(adapter, on,
+ &request->extts);
+ return -EINVAL;
+ case PTP_CLK_REQ_PEROUT:
+ if (request->perout.index < ptpci->n_per_out) {
+ if (adapter->is_pci11x1x)
+ return lan743x_ptp_io_perout(adapter, on,
+ &request->perout);
+ else
+ return lan743x_ptp_perout(adapter, on,
+ &request->perout);
+ }
+ return -EINVAL;
+ case PTP_CLK_REQ_PPS:
+ return -EINVAL;
+ default:
+ netif_err(adapter, drv, adapter->netdev,
+ "request->type == %d, Unknown\n",
+ request->type);
+ break;
+ }
+ } else {
+ netif_err(adapter, drv, adapter->netdev, "request == NULL\n");
+ }
+ return 0;
+}
+
+static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
+ unsigned int pin,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{
+ struct lan743x_ptp *lan_ptp =
+ container_of(ptp, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(lan_ptp, struct lan743x_adapter, ptp);
+ int result = 0;
+
+ /* Confirm the requested function is supported. Parameter
+ * validation is done by the caller.
+ */
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ if (!adapter->is_pci11x1x)
+ result = -1;
+ break;
+ case PTP_PF_PHYSYNC:
+ default:
+ result = -1;
+ break;
+ }
+ return result;
+}
+
+static void lan743x_ptp_io_event_clock_get(struct lan743x_adapter *adapter,
+ bool fe, u8 channel,
+ struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct lan743x_extts *extts;
+ u32 sec, nsec;
+
+ mutex_lock(&ptp->command_lock);
+ if (fe) {
+ sec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_SEC_CAP_X);
+ nsec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_NS_CAP_X);
+ } else {
+ sec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_SEC_CAP_X);
+ nsec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_NS_CAP_X);
+ }
+
+ mutex_unlock(&ptp->command_lock);
+
+ /* Update Local timestamp */
+ extts = &ptp->extts[channel];
+ extts->ts.tv_sec = sec;
+ extts->ts.tv_nsec = nsec;
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 cap_info, cause, header, nsec, seconds;
+ bool new_timestamp_available = false;
+ struct ptp_clock_event ptp_event;
+ struct timespec64 ts;
+ int ptp_int_sts;
+ int count = 0;
+ int channel;
+ s64 ns;
+
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
+ while ((count < 100) && ptp_int_sts) {
+ count++;
+
+ if (ptp_int_sts & PTP_INT_BIT_TX_TS_) {
+ cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
+
+ if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
+ seconds = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_SEC);
+ nsec = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_NS);
+ cause = (nsec &
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
+ header = lan743x_csr_read(adapter,
+ PTP_TX_MSG_HEADER);
+
+ if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
+ nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
+ lan743x_ptp_tx_ts_enqueue_ts(adapter,
+ seconds,
+ nsec,
+ header);
+ new_timestamp_available = true;
+ } else if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Auto capture cause not supported\n");
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "unknown tx timestamp capture cause\n");
+ }
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "TX TS INT but no TX TS CNT\n");
+ }
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TX_TS_);
+ }
+
+ if (ptp_int_sts & PTP_INT_IO_FE_MASK_) {
+ do {
+ channel = lan743x_get_channel((ptp_int_sts &
+ PTP_INT_IO_FE_MASK_) >>
+ PTP_INT_IO_FE_SHIFT_);
+ if (channel >= 0 &&
+ channel < PCI11X1X_PTP_IO_MAX_CHANNELS) {
+ lan743x_ptp_io_event_clock_get(adapter,
+ true,
+ channel,
+ &ts);
+ /* PTP Falling Event post */
+ ns = timespec64_to_ns(&ts);
+ ptp_event.timestamp = ns;
+ ptp_event.index = channel;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(ptp->ptp_clock,
+ &ptp_event);
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_FE_SET_
+ (channel));
+ ptp_int_sts &= ~(1 <<
+ (PTP_INT_IO_FE_SHIFT_ +
+ channel));
+ } else {
+ /* Clear falling event interrupts */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_FE_MASK_);
+ ptp_int_sts &= ~PTP_INT_IO_FE_MASK_;
+ }
+ } while (ptp_int_sts & PTP_INT_IO_FE_MASK_);
+ }
+
+ if (ptp_int_sts & PTP_INT_IO_RE_MASK_) {
+ do {
+ channel = lan743x_get_channel((ptp_int_sts &
+ PTP_INT_IO_RE_MASK_) >>
+ PTP_INT_IO_RE_SHIFT_);
+ if (channel >= 0 &&
+ channel < PCI11X1X_PTP_IO_MAX_CHANNELS) {
+ lan743x_ptp_io_event_clock_get(adapter,
+ false,
+ channel,
+ &ts);
+ /* PTP Rising Event post */
+ ns = timespec64_to_ns(&ts);
+ ptp_event.timestamp = ns;
+ ptp_event.index = channel;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(ptp->ptp_clock,
+ &ptp_event);
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_RE_SET_
+ (channel));
+ ptp_int_sts &= ~(1 <<
+ (PTP_INT_IO_RE_SHIFT_ +
+ channel));
+ } else {
+ /* Clear Rising event interrupt */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_RE_MASK_);
+ ptp_int_sts &= ~PTP_INT_IO_RE_MASK_;
+ }
+ } while (ptp_int_sts & PTP_INT_IO_RE_MASK_);
+ }
+
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
+ }
+
+ if (new_timestamp_available)
+ lan743x_ptp_tx_ts_complete(adapter);
+
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+
+ return -1;
+}
+
+static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
+ u32 *seconds, u32 *nano_seconds,
+ u32 *sub_nano_seconds)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ if (seconds)
+ (*seconds) = lan743x_csr_read(adapter, PTP_CLOCK_SEC);
+
+ if (nano_seconds)
+ (*nano_seconds) = lan743x_csr_read(adapter, PTP_CLOCK_NS);
+
+ if (sub_nano_seconds)
+ (*sub_nano_seconds) =
+ lan743x_csr_read(adapter, PTP_CLOCK_SUBNS);
+
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter,
+ u32 *sec, u32 *nsec, u32 *sub_nsec)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ if (sec)
+ (*sec) = lan743x_csr_read(adapter, PTP_LTC_RD_SEC_LO);
+
+ if (nsec)
+ (*nsec) = lan743x_csr_read(adapter, PTP_LTC_RD_NS);
+
+ if (sub_nsec)
+ (*sub_nsec) =
+ lan743x_csr_read(adapter, PTP_LTC_RD_SUBNS);
+
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
+ s64 time_step_ns)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 nano_seconds_step = 0;
+ u64 abs_time_step_ns = 0;
+ u32 unsigned_seconds = 0;
+ u32 nano_seconds = 0;
+ u32 remainder = 0;
+ s32 seconds = 0;
+
+ if (time_step_ns > 15000000000LL) {
+ /* convert to clock set */
+ if (adapter->is_pci11x1x)
+ lan743x_ptp_io_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ else
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds += remainder;
+ if (nano_seconds >= 1000000000) {
+ unsigned_seconds++;
+ nano_seconds -= 1000000000;
+ }
+ lan743x_ptp_clock_set(adapter, unsigned_seconds,
+ nano_seconds, 0);
+ return;
+ } else if (time_step_ns < -15000000000LL) {
+ /* convert to clock set */
+ time_step_ns = -time_step_ns;
+
+ if (adapter->is_pci11x1x) {
+ lan743x_ptp_io_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ } else {
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ }
+ unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds_step = remainder;
+ if (nano_seconds < nano_seconds_step) {
+ unsigned_seconds--;
+ nano_seconds += 1000000000;
+ }
+ nano_seconds -= nano_seconds_step;
+ lan743x_ptp_clock_set(adapter, unsigned_seconds,
+ nano_seconds, 0);
+ return;
+ }
+
+ /* do clock step */
+ if (time_step_ns >= 0) {
+ abs_time_step_ns = (u64)(time_step_ns);
+ seconds = (s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder);
+ nano_seconds = (u32)remainder;
+ } else {
+ abs_time_step_ns = (u64)(-time_step_ns);
+ seconds = -((s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder));
+ nano_seconds = (u32)remainder;
+ if (nano_seconds > 0) {
+ /* subtracting nano seconds is not allowed
+ * convert to subtracting from seconds,
+ * and adding to nanoseconds
+ */
+ seconds--;
+ nano_seconds = (1000000000 - nano_seconds);
+ }
+ }
+
+ if (nano_seconds > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nano_seconds += 8;
+ }
+
+ if (nano_seconds >= 1000000000) {
+ /* carry into seconds */
+ seconds++;
+ nano_seconds -= 1000000000;
+ }
+
+ while (seconds) {
+ mutex_lock(&ptp->command_lock);
+ if (seconds > 0) {
+ u32 adjustment_value = (u32)seconds;
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ PTP_CLOCK_STEP_ADJ_DIR_ |
+ adjustment_value);
+ seconds -= ((s32)adjustment_value);
+ } else {
+ u32 adjustment_value = (u32)(-seconds);
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ adjustment_value);
+ seconds += ((s32)adjustment_value);
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_);
+ lan743x_ptp_wait_till_cmd_done(adapter,
+ PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_);
+ mutex_unlock(&ptp->command_lock);
+ }
+ if (nano_seconds) {
+ mutex_lock(&ptp->command_lock);
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ PTP_CLOCK_STEP_ADJ_DIR_ |
+ (nano_seconds &
+ PTP_CLOCK_STEP_ADJ_VALUE_MASK_));
+ lan743x_csr_write(adapter, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLK_STP_NSEC_);
+ lan743x_ptp_wait_till_cmd_done(adapter,
+ PTP_CMD_CTL_PTP_CLK_STP_NSEC_);
+ mutex_unlock(&ptp->command_lock);
+ }
+}
+
+void lan743x_ptp_isr(void *context)
+{
+ struct lan743x_adapter *adapter = (struct lan743x_adapter *)context;
+ struct lan743x_ptp *ptp = NULL;
+ int enable_flag = 1;
+ u32 ptp_int_sts = 0;
+
+ ptp = &adapter->ptp;
+
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_);
+
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
+ ptp_int_sts &= lan743x_csr_read(adapter, PTP_INT_EN_SET);
+
+ if (ptp_int_sts & PTP_INT_BIT_TX_TS_) {
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+ enable_flag = 0;/* tasklet will re-enable later */
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TX_SWTS_ERR_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "PTP TX Software Timestamp Error\n");
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TX_SWTS_ERR_);
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TIMER_B_) {
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TIMER_B_);
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TIMER_A_) {
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TIMER_A_);
+ }
+
+ if (enable_flag) {
+ /* re-enable isr */
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+ }
+}
+
+static void lan743x_ptp_tx_ts_enqueue_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->tx_ts_skb_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ ptp->tx_ts_skb_queue[ptp->tx_ts_skb_queue_size] = skb;
+ if (ignore_sync)
+ ptp->tx_ts_ignore_sync_queue |=
+ BIT(ptp->tx_ts_skb_queue_size);
+ ptp->tx_ts_skb_queue_size++;
+ } else {
+ /* this should never happen, so long as the tx channel
+ * calls and honors the result from
+ * lan743x_ptp_request_tx_timestamp
+ */
+ netif_err(adapter, drv, adapter->netdev,
+ "tx ts skb queue overflow\n");
+ dev_kfree_skb(skb);
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static void lan743x_ptp_sync_to_system_clock(struct lan743x_adapter *adapter)
+{
+ struct timespec64 ts;
+
+ ktime_get_clocktai_ts64(&ts);
+
+ lan743x_ptp_clock_set(adapter, ts.tv_sec, ts.tv_nsec, 0);
+}
+
+void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
+ u32 link_speed)
+{
+ switch (link_speed) {
+ case 10:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(0) |
+ PTP_LATENCY_RX_SET_(0));
+ break;
+ case 100:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(181) |
+ PTP_LATENCY_RX_SET_(594));
+ break;
+ case 1000:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(30) |
+ PTP_LATENCY_RX_SET_(525));
+ break;
+ }
+}
+
+int lan743x_ptp_init(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int i;
+
+ mutex_init(&ptp->command_lock);
+ spin_lock_init(&ptp->tx_ts_lock);
+ ptp->used_event_ch = 0;
+
+ for (i = 0; i < LAN743X_PTP_N_EVENT_CHAN; i++) {
+ ptp->perout[i].event_ch = -1;
+ ptp->perout[i].gpio_pin = -1;
+ }
+
+ lan743x_led_mux_save(adapter);
+
+ return 0;
+}
+
+int lan743x_ptp_open(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int ret = -ENODEV;
+ u32 temp;
+ int i;
+ int n_pins;
+
+ lan743x_ptp_reset(adapter);
+ lan743x_ptp_sync_to_system_clock(adapter);
+ temp = lan743x_csr_read(adapter, PTP_TX_MOD2);
+ temp |= PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_;
+ lan743x_csr_write(adapter, PTP_TX_MOD2, temp);
+ lan743x_ptp_enable(adapter);
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+ lan743x_csr_write(adapter, PTP_INT_EN_SET,
+ PTP_INT_BIT_TX_SWTS_ERR_ | PTP_INT_BIT_TX_TS_);
+ ptp->flags |= PTP_FLAG_ISR_ENABLED;
+
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK))
+ return 0;
+
+ switch (adapter->csr.id_rev & ID_REV_ID_MASK_) {
+ case ID_REV_ID_LAN7430_:
+ n_pins = LAN7430_N_GPIO;
+ break;
+ case ID_REV_ID_LAN7431_:
+ case ID_REV_ID_A011_:
+ case ID_REV_ID_A041_:
+ n_pins = LAN7431_N_GPIO;
+ break;
+ default:
+ netif_warn(adapter, drv, adapter->netdev,
+ "Unknown LAN743x (%08x). Assuming no GPIO\n",
+ adapter->csr.id_rev);
+ n_pins = 0;
+ break;
+ }
+
+ if (n_pins > LAN743X_PTP_N_GPIO)
+ n_pins = LAN743X_PTP_N_GPIO;
+
+ for (i = 0; i < n_pins; i++) {
+ struct ptp_pin_desc *ptp_pin = &ptp->pin_config[i];
+
+ snprintf(ptp_pin->name,
+ sizeof(ptp_pin->name), "lan743x_ptp_pin_%02d", i);
+ ptp_pin->index = i;
+ ptp_pin->func = PTP_PF_NONE;
+ }
+
+ ptp->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(ptp->ptp_clock_info.name, 16, "%pm",
+ adapter->netdev->dev_addr);
+ ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
+ ptp->ptp_clock_info.n_alarm = 0;
+ ptp->ptp_clock_info.n_ext_ts = LAN743X_PTP_N_EXTTS;
+ ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
+ ptp->ptp_clock_info.n_pins = n_pins;
+ ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
+ ptp->ptp_clock_info.pin_config = ptp->pin_config;
+ ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
+ ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
+ ptp->ptp_clock_info.gettime64 = lan743x_ptpci_gettime64;
+ ptp->ptp_clock_info.getcrosststamp = NULL;
+ ptp->ptp_clock_info.settime64 = lan743x_ptpci_settime64;
+ ptp->ptp_clock_info.enable = lan743x_ptpci_enable;
+ ptp->ptp_clock_info.do_aux_work = lan743x_ptpci_do_aux_work;
+ ptp->ptp_clock_info.verify = lan743x_ptpci_verify_pin_config;
+
+ ptp->ptp_clock = ptp_clock_register(&ptp->ptp_clock_info,
+ &adapter->pdev->dev);
+
+ if (IS_ERR(ptp->ptp_clock)) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "ptp_clock_register failed\n");
+ goto done;
+ }
+ ptp->flags |= PTP_FLAG_PTP_CLOCK_REGISTERED;
+ netif_info(adapter, ifup, adapter->netdev,
+ "successfully registered ptp clock\n");
+
+ return 0;
+done:
+ lan743x_ptp_close(adapter);
+ return ret;
+}
+
+void lan743x_ptp_close(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int index;
+
+ if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
+ (ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED)) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED;
+ netif_info(adapter, drv, adapter->netdev,
+ "ptp clock unregister\n");
+ }
+
+ if (ptp->flags & PTP_FLAG_ISR_ENABLED) {
+ lan743x_csr_write(adapter, PTP_INT_EN_CLR,
+ PTP_INT_BIT_TX_SWTS_ERR_ |
+ PTP_INT_BIT_TX_TS_);
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_);
+ ptp->flags &= ~PTP_FLAG_ISR_ENABLED;
+ }
+
+ /* clean up pending timestamp requests */
+ lan743x_ptp_tx_ts_complete(adapter);
+ spin_lock_bh(&ptp->tx_ts_lock);
+ for (index = 0;
+ index < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS;
+ index++) {
+ struct sk_buff *skb = ptp->tx_ts_skb_queue[index];
+
+ dev_kfree_skb(skb);
+ ptp->tx_ts_skb_queue[index] = NULL;
+ ptp->tx_ts_seconds_queue[index] = 0;
+ ptp->tx_ts_nseconds_queue[index] = 0;
+ }
+ ptp->tx_ts_skb_queue_size = 0;
+ ptp->tx_ts_queue_size = 0;
+ ptp->pending_tx_timestamps = 0;
+ spin_unlock_bh(&ptp->tx_ts_lock);
+
+ lan743x_led_mux_restore(adapter);
+
+ lan743x_ptp_disable(adapter);
+}
+
+static void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter,
+ bool ts_insert_enable)
+{
+ u32 ptp_tx_mod = lan743x_csr_read(adapter, PTP_TX_MOD);
+
+ if (ts_insert_enable)
+ ptp_tx_mod |= PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_;
+ else
+ ptp_tx_mod &= ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_;
+
+ lan743x_csr_write(adapter, PTP_TX_MOD, ptp_tx_mod);
+}
+
+static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter)
+{
+ if (lan743x_csr_read(adapter, PTP_CMD_CTL) & PTP_CMD_CTL_PTP_ENABLE_)
+ return true;
+ return false;
+}
+
+static void lan743x_ptp_enable(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ if (lan743x_ptp_is_enabled(adapter)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "PTP already enabled\n");
+ goto done;
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_disable(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ if (!lan743x_ptp_is_enabled(adapter)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "PTP already disabled\n");
+ goto done;
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_DISABLE_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_ENABLE_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_reset(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ if (lan743x_ptp_is_enabled(adapter)) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Attempting reset while enabled\n");
+ goto done;
+ }
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_RESET_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_RESET_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 sub_nano_seconds)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ lan743x_csr_write(adapter, PTP_CLOCK_SEC, seconds);
+ lan743x_csr_write(adapter, PTP_CLOCK_NS, nano_seconds);
+ lan743x_csr_write(adapter, PTP_CLOCK_SUBNS, sub_nano_seconds);
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ mutex_unlock(&ptp->command_lock);
+}
+
+bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ bool result = false;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ /* request granted */
+ ptp->pending_tx_timestamps++;
+ result = true;
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+ return result;
+}
+
+void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->pending_tx_timestamps > 0)
+ ptp->pending_tx_timestamps--;
+ else
+ netif_err(adapter, drv, adapter->netdev,
+ "unrequest failed, pending_tx_timestamps==0\n");
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync)
+{
+ lan743x_ptp_tx_ts_enqueue_skb(adapter, skb, ignore_sync);
+
+ lan743x_ptp_tx_ts_complete(adapter);
+}
+
+int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int ret = 0;
+ int index;
+
+ if (!ifr) {
+ netif_err(adapter, drv, adapter->netdev,
+ "SIOCSHWTSTAMP, ifr == NULL\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ for (index = 0; index < adapter->used_tx_channels;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ false, false);
+ lan743x_ptp_set_sync_ts_insert(adapter, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ for (index = 0; index < adapter->used_tx_channels;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ true, false);
+ lan743x_ptp_set_sync_ts_insert(adapter, false);
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ for (index = 0; index < adapter->used_tx_channels;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ true, true);
+
+ lan743x_ptp_set_sync_ts_insert(adapter, true);
+ break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ ret = -ERANGE;
+ break;
+ default:
+ netif_warn(adapter, drv, adapter->netdev,
+ " tx_type = %d, UNKNOWN\n", config.tx_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+ return ret;
+}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
new file mode 100644
index 0000000000..e26d4eff71
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#ifndef _LAN743X_PTP_H
+#define _LAN743X_PTP_H
+
+#include "linux/ptp_clock_kernel.h"
+#include "linux/netdevice.h"
+
+#define LAN7430_N_LED 4
+#define LAN7430_N_GPIO 4 /* multiplexed with PHY LEDs */
+#define LAN7431_N_GPIO 12
+
+#define LAN743X_PTP_N_GPIO LAN7431_N_GPIO
+
+/* the number of periodic outputs is limited by number of
+ * PTP clock event channels
+ */
+#define LAN743X_PTP_N_EVENT_CHAN 2
+#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
+#define LAN743X_PTP_N_EXTTS 4
+#define LAN743X_PTP_N_PPS 0
+#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
+
+struct lan743x_adapter;
+
+/* GPIO */
+struct lan743x_gpio {
+ /* gpio_lock: used to prevent concurrent access to gpio settings */
+ spinlock_t gpio_lock;
+
+ int used_bits;
+ int output_bits;
+ int ptp_bits;
+ u32 gpio_cfg0;
+ u32 gpio_cfg1;
+ u32 gpio_cfg2;
+ u32 gpio_cfg3;
+};
+
+int lan743x_gpio_init(struct lan743x_adapter *adapter);
+
+void lan743x_ptp_isr(void *context);
+bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter);
+void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter);
+void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync);
+int lan743x_ptp_init(struct lan743x_adapter *adapter);
+int lan743x_ptp_open(struct lan743x_adapter *adapter);
+void lan743x_ptp_close(struct lan743x_adapter *adapter);
+void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
+ u32 link_speed);
+
+int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+
+#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
+
+#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1)
+#define PTP_FLAG_ISR_ENABLED BIT(2)
+
+struct lan743x_ptp_perout {
+ int event_ch; /* PTP event channel (0=channel A, 1=channel B) */
+ int gpio_pin; /* GPIO pin where output appears */
+};
+
+struct lan743x_extts {
+ int flags;
+ struct timespec64 ts;
+};
+
+struct lan743x_ptp {
+ int flags;
+
+ /* command_lock: used to prevent concurrent ptp commands */
+ struct mutex command_lock;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct ptp_pin_desc pin_config[LAN743X_PTP_N_GPIO];
+
+ unsigned long used_event_ch;
+ struct lan743x_ptp_perout perout[LAN743X_PTP_N_PEROUT];
+ int ptp_io_perout[LAN743X_PTP_N_PEROUT]; /* PTP event channel (0=channel A, 1=channel B) */
+ struct lan743x_extts extts[LAN743X_PTP_N_EXTTS];
+
+ bool leds_multiplexed;
+ bool led_enabled[LAN7430_N_LED];
+
+ /* tx_ts_lock: used to prevent concurrent access to timestamp arrays */
+ spinlock_t tx_ts_lock;
+ int pending_tx_timestamps;
+ struct sk_buff *tx_ts_skb_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ unsigned int tx_ts_ignore_sync_queue;
+ int tx_ts_skb_queue_size;
+ u32 tx_ts_seconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ u32 tx_ts_nseconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ u32 tx_ts_header_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ int tx_ts_queue_size;
+};
+
+#endif /* _LAN743X_PTP_H */
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
new file mode 100644
index 0000000000..f9ebffc04e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -0,0 +1,23 @@
+config LAN966X_SWITCH
+ tristate "Lan966x switch driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on HAS_IOMEM
+ depends on OF
+ depends on NET_SWITCHDEV
+ depends on BRIDGE || BRIDGE=n
+ select PHYLINK
+ select PAGE_POOL
+ select VCAP
+ help
+ This driver supports the Lan966x network switch device.
+
+config LAN966X_DCB
+ bool "Data Center Bridging (DCB) support"
+ depends on LAN966X_SWITCH && DCB
+ default y
+ help
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver. This can be used to assign priority to traffic, based on
+ DSCP and PCP.
+
+ If unsure, set to Y.
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
new file mode 100644
index 0000000000..3b6ac33169
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip Lan966x network device drivers.
+#
+
+obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
+
+lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
+ lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
+ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
+ lan966x_ptp.o lan966x_fdma.o lan966x_lag.o \
+ lan966x_tc.o lan966x_mqprio.o lan966x_taprio.o \
+ lan966x_tbf.o lan966x_cbs.o lan966x_ets.o \
+ lan966x_tc_matchall.o lan966x_police.o lan966x_mirror.o \
+ lan966x_xdp.o lan966x_vcap_impl.o lan966x_vcap_ag_api.o \
+ lan966x_tc_flower.o lan966x_goto.o
+
+lan966x-switch-$(CONFIG_LAN966X_DCB) += lan966x_dcb.o
+lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o
+
+# Provide include files
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c
new file mode 100644
index 0000000000..70cbbf8d2b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_cbs_add(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 cir, cbs;
+ u8 se_idx;
+
+ /* Check for invalid values */
+ if (qopt->idleslope <= 0 ||
+ qopt->sendslope >= 0 ||
+ qopt->locredit >= qopt->hicredit)
+ return -EINVAL;
+
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue;
+ cir = qopt->idleslope;
+ cbs = (qopt->idleslope - qopt->sendslope) *
+ (qopt->hicredit - qopt->locredit) /
+ -qopt->sendslope;
+
+ /* Rate unit is 100 kbps */
+ cir = DIV_ROUND_UP(cir, 100);
+ /* Avoid using zero rate */
+ cir = cir ?: 1;
+ /* Burst unit is 4kB */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ /* Avoid using zero burst */
+ cbs = cbs ?: 1;
+
+ /* Check that actually the result can be written */
+ if (cir > GENMASK(15, 0) ||
+ cbs > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(1),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) |
+ QSYS_CIR_CFG_CIR_BURST_SET(cbs),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_cbs_del(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 se_idx;
+
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(0),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) |
+ QSYS_CIR_CFG_CIR_BURST_SET(0),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
new file mode 100644
index 0000000000..ed2d96d790
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+enum lan966x_dcb_apptrust_values {
+ LAN966X_DCB_APPTRUST_EMPTY,
+ LAN966X_DCB_APPTRUST_DSCP,
+ LAN966X_DCB_APPTRUST_PCP,
+ LAN966X_DCB_APPTRUST_DSCP_PCP,
+ __LAN966X_DCB_APPTRUST_MAX
+};
+
+static const struct lan966x_dcb_apptrust {
+ u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1];
+ int nselectors;
+} *lan966x_port_apptrust[NUM_PHYS_PORTS];
+
+static const char *lan966x_dcb_apptrust_names[__LAN966X_DCB_APPTRUST_MAX] = {
+ [LAN966X_DCB_APPTRUST_EMPTY] = "empty",
+ [LAN966X_DCB_APPTRUST_DSCP] = "dscp",
+ [LAN966X_DCB_APPTRUST_PCP] = "pcp",
+ [LAN966X_DCB_APPTRUST_DSCP_PCP] = "dscp pcp"
+};
+
+/* Lan966x supported apptrust policies */
+static const struct lan966x_dcb_apptrust
+ lan966x_dcb_apptrust_policies[__LAN966X_DCB_APPTRUST_MAX] = {
+ /* Empty *must* be first */
+ [LAN966X_DCB_APPTRUST_EMPTY] = { { 0 }, 0 },
+ [LAN966X_DCB_APPTRUST_DSCP] = { { IEEE_8021QAZ_APP_SEL_DSCP }, 1 },
+ [LAN966X_DCB_APPTRUST_PCP] = { { DCB_APP_SEL_PCP }, 1 },
+ [LAN966X_DCB_APPTRUST_DSCP_PCP] = { { IEEE_8021QAZ_APP_SEL_DSCP,
+ DCB_APP_SEL_PCP }, 2 },
+};
+
+static bool lan966x_dcb_apptrust_contains(int portno, u8 selector)
+{
+ const struct lan966x_dcb_apptrust *conf = lan966x_port_apptrust[portno];
+
+ for (int i = 0; i < conf->nselectors; i++)
+ if (conf->selectors[i] == selector)
+ return true;
+
+ return false;
+}
+
+static void lan966x_dcb_app_update(struct net_device *dev)
+{
+ struct dcb_ieee_app_prio_map dscp_rewr_map = {0};
+ struct dcb_rewr_prio_pcp_map pcp_rewr_map = {0};
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x_port_qos qos = {0};
+ struct dcb_app app_itr;
+ bool dscp_rewr = false;
+ bool pcp_rewr = false;
+
+ /* Get pcp ingress mapping */
+ for (int i = 0; i < ARRAY_SIZE(qos.pcp.map); i++) {
+ app_itr.selector = DCB_APP_SEL_PCP;
+ app_itr.protocol = i;
+ qos.pcp.map[i] = dcb_getapp(dev, &app_itr);
+ }
+
+ /* Get dscp ingress mapping */
+ for (int i = 0; i < ARRAY_SIZE(qos.dscp.map); i++) {
+ app_itr.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ app_itr.protocol = i;
+ qos.dscp.map[i] = dcb_getapp(dev, &app_itr);
+ }
+
+ /* Get default prio */
+ qos.default_prio = dcb_ieee_getapp_default_prio_mask(dev);
+ if (qos.default_prio)
+ qos.default_prio = fls(qos.default_prio) - 1;
+
+ /* Get pcp rewrite mapping */
+ dcb_getrewr_prio_pcp_mask_map(dev, &pcp_rewr_map);
+ for (int i = 0; i < ARRAY_SIZE(pcp_rewr_map.map); i++) {
+ if (!pcp_rewr_map.map[i])
+ continue;
+
+ pcp_rewr = true;
+ qos.pcp_rewr.map[i] = fls(pcp_rewr_map.map[i]) - 1;
+ }
+
+ /* Get dscp rewrite mapping */
+ dcb_getrewr_prio_dscp_mask_map(dev, &dscp_rewr_map);
+ for (int i = 0; i < ARRAY_SIZE(dscp_rewr_map.map); i++) {
+ if (!dscp_rewr_map.map[i])
+ continue;
+
+ dscp_rewr = true;
+ qos.dscp_rewr.map[i] = fls64(dscp_rewr_map.map[i]) - 1;
+ }
+
+ /* Enable use of pcp for queue classification */
+ if (lan966x_dcb_apptrust_contains(port->chip_port, DCB_APP_SEL_PCP)) {
+ qos.pcp.enable = true;
+
+ if (pcp_rewr)
+ qos.pcp_rewr.enable = true;
+ }
+
+ /* Enable use of dscp for queue classification */
+ if (lan966x_dcb_apptrust_contains(port->chip_port, IEEE_8021QAZ_APP_SEL_DSCP)) {
+ qos.dscp.enable = true;
+
+ if (dscp_rewr)
+ qos.dscp_rewr.enable = true;
+ }
+
+ lan966x_port_qos_set(port, &qos);
+}
+
+/* DSCP mapping is global for all ports, so set and delete app entries are
+ * replicated for each port.
+ */
+static int lan966x_dcb_ieee_dscp_setdel(struct net_device *dev,
+ struct dcb_app *app,
+ int (*setdel)(struct net_device *,
+ struct dcb_app *))
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ for (int i = 0; i < NUM_PHYS_PORTS; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ err = setdel(port->dev, app);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int lan966x_dcb_app_validate(struct net_device *dev,
+ const struct dcb_app *app)
+{
+ int err = 0;
+
+ switch (app->selector) {
+ /* Default priority checks */
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ if (app->protocol)
+ err = -EINVAL;
+ else if (app->priority >= NUM_PRIO_QUEUES)
+ err = -ERANGE;
+ break;
+ /* Dscp checks */
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ if (app->protocol >= LAN966X_PORT_QOS_DSCP_COUNT)
+ err = -EINVAL;
+ else if (app->priority >= NUM_PRIO_QUEUES)
+ err = -ERANGE;
+ break;
+ /* Pcp checks */
+ case DCB_APP_SEL_PCP:
+ if (app->protocol >= LAN966X_PORT_QOS_PCP_DEI_COUNT)
+ err = -EINVAL;
+ else if (app->priority >= NUM_PRIO_QUEUES)
+ err = -ERANGE;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ netdev_err(dev, "Invalid entry: %d:%d\n", app->protocol,
+ app->priority);
+
+ return err;
+}
+
+static int lan966x_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
+ else
+ err = dcb_ieee_delapp(dev, app);
+
+ if (err)
+ return err;
+
+ lan966x_dcb_app_update(dev);
+
+ return 0;
+}
+
+static int lan966x_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app app_itr;
+ int err;
+ u8 prio;
+
+ err = lan966x_dcb_app_validate(dev, app);
+ if (err)
+ return err;
+
+ /* Delete current mapping, if it exists */
+ prio = dcb_getapp(dev, app);
+ if (prio) {
+ app_itr = *app;
+ app_itr.priority = prio;
+ lan966x_dcb_ieee_delapp(dev, &app_itr);
+ }
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_setapp);
+ else
+ err = dcb_ieee_setapp(dev, app);
+
+ if (err)
+ return err;
+
+ lan966x_dcb_app_update(dev);
+
+ return 0;
+}
+
+static int lan966x_dcb_apptrust_validate(struct net_device *dev,
+ u8 *selectors,
+ int nselectors)
+{
+ for (int i = 0; i < ARRAY_SIZE(lan966x_dcb_apptrust_policies); i++) {
+ bool match;
+
+ if (lan966x_dcb_apptrust_policies[i].nselectors != nselectors)
+ continue;
+
+ match = true;
+ for (int j = 0; j < nselectors; j++) {
+ if (lan966x_dcb_apptrust_policies[i].selectors[j] !=
+ *(selectors + j)) {
+ match = false;
+ break;
+ }
+ }
+ if (match)
+ return i;
+ }
+
+ netdev_err(dev, "Valid apptrust configurations are:\n");
+ for (int i = 0; i < ARRAY_SIZE(lan966x_dcb_apptrust_names); i++)
+ pr_info("order: %s\n", lan966x_dcb_apptrust_names[i]);
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_dcb_setapptrust(struct net_device *dev,
+ u8 *selectors,
+ int nselectors)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int idx;
+
+ idx = lan966x_dcb_apptrust_validate(dev, selectors, nselectors);
+ if (idx < 0)
+ return idx;
+
+ lan966x_port_apptrust[port->chip_port] = &lan966x_dcb_apptrust_policies[idx];
+ lan966x_dcb_app_update(dev);
+
+ return 0;
+}
+
+static int lan966x_dcb_getapptrust(struct net_device *dev, u8 *selectors,
+ int *nselectors)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ const struct lan966x_dcb_apptrust *trust;
+
+ trust = lan966x_port_apptrust[port->chip_port];
+
+ memcpy(selectors, trust->selectors, trust->nselectors);
+ *nselectors = trust->nselectors;
+
+ return 0;
+}
+
+static int lan966x_dcb_delrewr(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_delrewr);
+ else
+ err = dcb_delrewr(dev, app);
+
+ if (err < 0)
+ return err;
+
+ lan966x_dcb_app_update(dev);
+
+ return 0;
+}
+
+static int lan966x_dcb_setrewr(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app app_itr;
+ u16 proto;
+ int err;
+
+ err = lan966x_dcb_app_validate(dev, app);
+ if (err)
+ goto out;
+
+ /* Delete current mapping, if it exists. */
+ proto = dcb_getrewr(dev, app);
+ if (proto) {
+ app_itr = *app;
+ app_itr.protocol = proto;
+ lan966x_dcb_delrewr(dev, &app_itr);
+ }
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_setrewr);
+ else
+ err = dcb_setrewr(dev, app);
+
+ if (err)
+ goto out;
+
+ lan966x_dcb_app_update(dev);
+
+out:
+ return err;
+}
+
+static const struct dcbnl_rtnl_ops lan966x_dcbnl_ops = {
+ .ieee_setapp = lan966x_dcb_ieee_setapp,
+ .ieee_delapp = lan966x_dcb_ieee_delapp,
+ .dcbnl_setapptrust = lan966x_dcb_setapptrust,
+ .dcbnl_getapptrust = lan966x_dcb_getapptrust,
+ .dcbnl_setrewr = lan966x_dcb_setrewr,
+ .dcbnl_delrewr = lan966x_dcb_delrewr,
+};
+
+void lan966x_dcb_init(struct lan966x *lan966x)
+{
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port;
+
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ port->dev->dcbnl_ops = &lan966x_dcbnl_ops;
+
+ lan966x_port_apptrust[port->chip_port] =
+ &lan966x_dcb_apptrust_policies[LAN966X_DCB_APPTRUST_DSCP_PCP];
+
+ /* Enable DSCP classification based on classified QoS class and
+ * DP, for all DSCP values, for all ports.
+ */
+ lan966x_port_qos_dscp_rewr_mode_set(port,
+ LAN966X_PORT_QOS_REWR_DSCP_ALL);
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
new file mode 100644
index 0000000000..06811c60d5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/netdevice.h>
+
+#include "lan966x_main.h"
+
+/* Number of traffic classes */
+#define LAN966X_NUM_TC 8
+#define LAN966X_STATS_CHECK_DELAY (2 * HZ)
+
+static const struct lan966x_stat_layout lan966x_stats_layout[] = {
+ { .name = "rx_octets", .offset = 0x00, },
+ { .name = "rx_unicast", .offset = 0x01, },
+ { .name = "rx_multicast", .offset = 0x02 },
+ { .name = "rx_broadcast", .offset = 0x03 },
+ { .name = "rx_short", .offset = 0x04 },
+ { .name = "rx_frag", .offset = 0x05 },
+ { .name = "rx_jabber", .offset = 0x06 },
+ { .name = "rx_crc", .offset = 0x07 },
+ { .name = "rx_symbol_err", .offset = 0x08 },
+ { .name = "rx_sz_64", .offset = 0x09 },
+ { .name = "rx_sz_65_127", .offset = 0x0a},
+ { .name = "rx_sz_128_255", .offset = 0x0b},
+ { .name = "rx_sz_256_511", .offset = 0x0c },
+ { .name = "rx_sz_512_1023", .offset = 0x0d },
+ { .name = "rx_sz_1024_1526", .offset = 0x0e },
+ { .name = "rx_sz_jumbo", .offset = 0x0f },
+ { .name = "rx_pause", .offset = 0x10 },
+ { .name = "rx_control", .offset = 0x11 },
+ { .name = "rx_long", .offset = 0x12 },
+ { .name = "rx_cat_drop", .offset = 0x13 },
+ { .name = "rx_red_prio_0", .offset = 0x14 },
+ { .name = "rx_red_prio_1", .offset = 0x15 },
+ { .name = "rx_red_prio_2", .offset = 0x16 },
+ { .name = "rx_red_prio_3", .offset = 0x17 },
+ { .name = "rx_red_prio_4", .offset = 0x18 },
+ { .name = "rx_red_prio_5", .offset = 0x19 },
+ { .name = "rx_red_prio_6", .offset = 0x1a },
+ { .name = "rx_red_prio_7", .offset = 0x1b },
+ { .name = "rx_yellow_prio_0", .offset = 0x1c },
+ { .name = "rx_yellow_prio_1", .offset = 0x1d },
+ { .name = "rx_yellow_prio_2", .offset = 0x1e },
+ { .name = "rx_yellow_prio_3", .offset = 0x1f },
+ { .name = "rx_yellow_prio_4", .offset = 0x20 },
+ { .name = "rx_yellow_prio_5", .offset = 0x21 },
+ { .name = "rx_yellow_prio_6", .offset = 0x22 },
+ { .name = "rx_yellow_prio_7", .offset = 0x23 },
+ { .name = "rx_green_prio_0", .offset = 0x24 },
+ { .name = "rx_green_prio_1", .offset = 0x25 },
+ { .name = "rx_green_prio_2", .offset = 0x26 },
+ { .name = "rx_green_prio_3", .offset = 0x27 },
+ { .name = "rx_green_prio_4", .offset = 0x28 },
+ { .name = "rx_green_prio_5", .offset = 0x29 },
+ { .name = "rx_green_prio_6", .offset = 0x2a },
+ { .name = "rx_green_prio_7", .offset = 0x2b },
+ { .name = "rx_assembly_err", .offset = 0x2c },
+ { .name = "rx_smd_err", .offset = 0x2d },
+ { .name = "rx_assembly_ok", .offset = 0x2e },
+ { .name = "rx_merge_frag", .offset = 0x2f },
+ { .name = "rx_pmac_octets", .offset = 0x30, },
+ { .name = "rx_pmac_unicast", .offset = 0x31, },
+ { .name = "rx_pmac_multicast", .offset = 0x32 },
+ { .name = "rx_pmac_broadcast", .offset = 0x33 },
+ { .name = "rx_pmac_short", .offset = 0x34 },
+ { .name = "rx_pmac_frag", .offset = 0x35 },
+ { .name = "rx_pmac_jabber", .offset = 0x36 },
+ { .name = "rx_pmac_crc", .offset = 0x37 },
+ { .name = "rx_pmac_symbol_err", .offset = 0x38 },
+ { .name = "rx_pmac_sz_64", .offset = 0x39 },
+ { .name = "rx_pmac_sz_65_127", .offset = 0x3a },
+ { .name = "rx_pmac_sz_128_255", .offset = 0x3b },
+ { .name = "rx_pmac_sz_256_511", .offset = 0x3c },
+ { .name = "rx_pmac_sz_512_1023", .offset = 0x3d },
+ { .name = "rx_pmac_sz_1024_1526", .offset = 0x3e },
+ { .name = "rx_pmac_sz_jumbo", .offset = 0x3f },
+ { .name = "rx_pmac_pause", .offset = 0x40 },
+ { .name = "rx_pmac_control", .offset = 0x41 },
+ { .name = "rx_pmac_long", .offset = 0x42 },
+
+ { .name = "tx_octets", .offset = 0x80, },
+ { .name = "tx_unicast", .offset = 0x81, },
+ { .name = "tx_multicast", .offset = 0x82 },
+ { .name = "tx_broadcast", .offset = 0x83 },
+ { .name = "tx_col", .offset = 0x84 },
+ { .name = "tx_drop", .offset = 0x85 },
+ { .name = "tx_pause", .offset = 0x86 },
+ { .name = "tx_sz_64", .offset = 0x87 },
+ { .name = "tx_sz_65_127", .offset = 0x88 },
+ { .name = "tx_sz_128_255", .offset = 0x89 },
+ { .name = "tx_sz_256_511", .offset = 0x8a },
+ { .name = "tx_sz_512_1023", .offset = 0x8b },
+ { .name = "tx_sz_1024_1526", .offset = 0x8c },
+ { .name = "tx_sz_jumbo", .offset = 0x8d },
+ { .name = "tx_yellow_prio_0", .offset = 0x8e },
+ { .name = "tx_yellow_prio_1", .offset = 0x8f },
+ { .name = "tx_yellow_prio_2", .offset = 0x90 },
+ { .name = "tx_yellow_prio_3", .offset = 0x91 },
+ { .name = "tx_yellow_prio_4", .offset = 0x92 },
+ { .name = "tx_yellow_prio_5", .offset = 0x93 },
+ { .name = "tx_yellow_prio_6", .offset = 0x94 },
+ { .name = "tx_yellow_prio_7", .offset = 0x95 },
+ { .name = "tx_green_prio_0", .offset = 0x96 },
+ { .name = "tx_green_prio_1", .offset = 0x97 },
+ { .name = "tx_green_prio_2", .offset = 0x98 },
+ { .name = "tx_green_prio_3", .offset = 0x99 },
+ { .name = "tx_green_prio_4", .offset = 0x9a },
+ { .name = "tx_green_prio_5", .offset = 0x9b },
+ { .name = "tx_green_prio_6", .offset = 0x9c },
+ { .name = "tx_green_prio_7", .offset = 0x9d },
+ { .name = "tx_aged", .offset = 0x9e },
+ { .name = "tx_llct", .offset = 0x9f },
+ { .name = "tx_ct", .offset = 0xa0 },
+ { .name = "tx_mm_hold", .offset = 0xa1 },
+ { .name = "tx_merge_frag", .offset = 0xa2 },
+ { .name = "tx_pmac_octets", .offset = 0xa3, },
+ { .name = "tx_pmac_unicast", .offset = 0xa4, },
+ { .name = "tx_pmac_multicast", .offset = 0xa5 },
+ { .name = "tx_pmac_broadcast", .offset = 0xa6 },
+ { .name = "tx_pmac_pause", .offset = 0xa7 },
+ { .name = "tx_pmac_sz_64", .offset = 0xa8 },
+ { .name = "tx_pmac_sz_65_127", .offset = 0xa9 },
+ { .name = "tx_pmac_sz_128_255", .offset = 0xaa },
+ { .name = "tx_pmac_sz_256_511", .offset = 0xab },
+ { .name = "tx_pmac_sz_512_1023", .offset = 0xac },
+ { .name = "tx_pmac_sz_1024_1526", .offset = 0xad },
+ { .name = "tx_pmac_sz_jumbo", .offset = 0xae },
+
+ { .name = "dr_local", .offset = 0x100 },
+ { .name = "dr_tail", .offset = 0x101 },
+ { .name = "dr_yellow_prio_0", .offset = 0x102 },
+ { .name = "dr_yellow_prio_1", .offset = 0x103 },
+ { .name = "dr_yellow_prio_2", .offset = 0x104 },
+ { .name = "dr_yellow_prio_3", .offset = 0x105 },
+ { .name = "dr_yellow_prio_4", .offset = 0x106 },
+ { .name = "dr_yellow_prio_5", .offset = 0x107 },
+ { .name = "dr_yellow_prio_6", .offset = 0x108 },
+ { .name = "dr_yellow_prio_7", .offset = 0x109 },
+ { .name = "dr_green_prio_0", .offset = 0x10a },
+ { .name = "dr_green_prio_1", .offset = 0x10b },
+ { .name = "dr_green_prio_2", .offset = 0x10c },
+ { .name = "dr_green_prio_3", .offset = 0x10d },
+ { .name = "dr_green_prio_4", .offset = 0x10e },
+ { .name = "dr_green_prio_5", .offset = 0x10f },
+ { .name = "dr_green_prio_6", .offset = 0x110 },
+ { .name = "dr_green_prio_7", .offset = 0x111 },
+};
+
+/* The following numbers are indexes into lan966x_stats_layout[] */
+#define SYS_COUNT_RX_OCT 0
+#define SYS_COUNT_RX_UC 1
+#define SYS_COUNT_RX_MC 2
+#define SYS_COUNT_RX_BC 3
+#define SYS_COUNT_RX_SHORT 4
+#define SYS_COUNT_RX_FRAG 5
+#define SYS_COUNT_RX_JABBER 6
+#define SYS_COUNT_RX_CRC 7
+#define SYS_COUNT_RX_SYMBOL_ERR 8
+#define SYS_COUNT_RX_SZ_64 9
+#define SYS_COUNT_RX_SZ_65_127 10
+#define SYS_COUNT_RX_SZ_128_255 11
+#define SYS_COUNT_RX_SZ_256_511 12
+#define SYS_COUNT_RX_SZ_512_1023 13
+#define SYS_COUNT_RX_SZ_1024_1526 14
+#define SYS_COUNT_RX_SZ_JUMBO 15
+#define SYS_COUNT_RX_PAUSE 16
+#define SYS_COUNT_RX_CONTROL 17
+#define SYS_COUNT_RX_LONG 18
+#define SYS_COUNT_RX_CAT_DROP 19
+#define SYS_COUNT_RX_RED_PRIO_0 20
+#define SYS_COUNT_RX_RED_PRIO_1 21
+#define SYS_COUNT_RX_RED_PRIO_2 22
+#define SYS_COUNT_RX_RED_PRIO_3 23
+#define SYS_COUNT_RX_RED_PRIO_4 24
+#define SYS_COUNT_RX_RED_PRIO_5 25
+#define SYS_COUNT_RX_RED_PRIO_6 26
+#define SYS_COUNT_RX_RED_PRIO_7 27
+#define SYS_COUNT_RX_YELLOW_PRIO_0 28
+#define SYS_COUNT_RX_YELLOW_PRIO_1 29
+#define SYS_COUNT_RX_YELLOW_PRIO_2 30
+#define SYS_COUNT_RX_YELLOW_PRIO_3 31
+#define SYS_COUNT_RX_YELLOW_PRIO_4 32
+#define SYS_COUNT_RX_YELLOW_PRIO_5 33
+#define SYS_COUNT_RX_YELLOW_PRIO_6 34
+#define SYS_COUNT_RX_YELLOW_PRIO_7 35
+#define SYS_COUNT_RX_GREEN_PRIO_0 36
+#define SYS_COUNT_RX_GREEN_PRIO_1 37
+#define SYS_COUNT_RX_GREEN_PRIO_2 38
+#define SYS_COUNT_RX_GREEN_PRIO_3 39
+#define SYS_COUNT_RX_GREEN_PRIO_4 40
+#define SYS_COUNT_RX_GREEN_PRIO_5 41
+#define SYS_COUNT_RX_GREEN_PRIO_6 42
+#define SYS_COUNT_RX_GREEN_PRIO_7 43
+#define SYS_COUNT_RX_ASSEMBLY_ERR 44
+#define SYS_COUNT_RX_SMD_ERR 45
+#define SYS_COUNT_RX_ASSEMBLY_OK 46
+#define SYS_COUNT_RX_MERGE_FRAG 47
+#define SYS_COUNT_RX_PMAC_OCT 48
+#define SYS_COUNT_RX_PMAC_UC 49
+#define SYS_COUNT_RX_PMAC_MC 50
+#define SYS_COUNT_RX_PMAC_BC 51
+#define SYS_COUNT_RX_PMAC_SHORT 52
+#define SYS_COUNT_RX_PMAC_FRAG 53
+#define SYS_COUNT_RX_PMAC_JABBER 54
+#define SYS_COUNT_RX_PMAC_CRC 55
+#define SYS_COUNT_RX_PMAC_SYMBOL_ERR 56
+#define SYS_COUNT_RX_PMAC_SZ_64 57
+#define SYS_COUNT_RX_PMAC_SZ_65_127 58
+#define SYS_COUNT_RX_PMAC_SZ_128_255 59
+#define SYS_COUNT_RX_PMAC_SZ_256_511 60
+#define SYS_COUNT_RX_PMAC_SZ_512_1023 61
+#define SYS_COUNT_RX_PMAC_SZ_1024_1526 62
+#define SYS_COUNT_RX_PMAC_SZ_JUMBO 63
+#define SYS_COUNT_RX_PMAC_PAUSE 64
+#define SYS_COUNT_RX_PMAC_CONTROL 65
+#define SYS_COUNT_RX_PMAC_LONG 66
+
+#define SYS_COUNT_TX_OCT 67
+#define SYS_COUNT_TX_UC 68
+#define SYS_COUNT_TX_MC 69
+#define SYS_COUNT_TX_BC 70
+#define SYS_COUNT_TX_COL 71
+#define SYS_COUNT_TX_DROP 72
+#define SYS_COUNT_TX_PAUSE 73
+#define SYS_COUNT_TX_SZ_64 74
+#define SYS_COUNT_TX_SZ_65_127 75
+#define SYS_COUNT_TX_SZ_128_255 76
+#define SYS_COUNT_TX_SZ_256_511 77
+#define SYS_COUNT_TX_SZ_512_1023 78
+#define SYS_COUNT_TX_SZ_1024_1526 79
+#define SYS_COUNT_TX_SZ_JUMBO 80
+#define SYS_COUNT_TX_YELLOW_PRIO_0 81
+#define SYS_COUNT_TX_YELLOW_PRIO_1 82
+#define SYS_COUNT_TX_YELLOW_PRIO_2 83
+#define SYS_COUNT_TX_YELLOW_PRIO_3 84
+#define SYS_COUNT_TX_YELLOW_PRIO_4 85
+#define SYS_COUNT_TX_YELLOW_PRIO_5 86
+#define SYS_COUNT_TX_YELLOW_PRIO_6 87
+#define SYS_COUNT_TX_YELLOW_PRIO_7 88
+#define SYS_COUNT_TX_GREEN_PRIO_0 89
+#define SYS_COUNT_TX_GREEN_PRIO_1 90
+#define SYS_COUNT_TX_GREEN_PRIO_2 91
+#define SYS_COUNT_TX_GREEN_PRIO_3 92
+#define SYS_COUNT_TX_GREEN_PRIO_4 93
+#define SYS_COUNT_TX_GREEN_PRIO_5 94
+#define SYS_COUNT_TX_GREEN_PRIO_6 95
+#define SYS_COUNT_TX_GREEN_PRIO_7 96
+#define SYS_COUNT_TX_AGED 97
+#define SYS_COUNT_TX_LLCT 98
+#define SYS_COUNT_TX_CT 99
+#define SYS_COUNT_TX_MM_HOLD 100
+#define SYS_COUNT_TX_MERGE_FRAG 101
+#define SYS_COUNT_TX_PMAC_OCT 102
+#define SYS_COUNT_TX_PMAC_UC 103
+#define SYS_COUNT_TX_PMAC_MC 104
+#define SYS_COUNT_TX_PMAC_BC 105
+#define SYS_COUNT_TX_PMAC_PAUSE 106
+#define SYS_COUNT_TX_PMAC_SZ_64 107
+#define SYS_COUNT_TX_PMAC_SZ_65_127 108
+#define SYS_COUNT_TX_PMAC_SZ_128_255 109
+#define SYS_COUNT_TX_PMAC_SZ_256_511 110
+#define SYS_COUNT_TX_PMAC_SZ_512_1023 111
+#define SYS_COUNT_TX_PMAC_SZ_1024_1526 112
+#define SYS_COUNT_TX_PMAC_SZ_JUMBO 113
+
+#define SYS_COUNT_DR_LOCAL 114
+#define SYS_COUNT_DR_TAIL 115
+#define SYS_COUNT_DR_YELLOW_PRIO_0 116
+#define SYS_COUNT_DR_YELLOW_PRIO_1 117
+#define SYS_COUNT_DR_YELLOW_PRIO_2 118
+#define SYS_COUNT_DR_YELLOW_PRIO_3 119
+#define SYS_COUNT_DR_YELLOW_PRIO_4 120
+#define SYS_COUNT_DR_YELLOW_PRIO_5 121
+#define SYS_COUNT_DR_YELLOW_PRIO_6 122
+#define SYS_COUNT_DR_YELLOW_PRIO_7 123
+#define SYS_COUNT_DR_GREEN_PRIO_0 124
+#define SYS_COUNT_DR_GREEN_PRIO_1 125
+#define SYS_COUNT_DR_GREEN_PRIO_2 126
+#define SYS_COUNT_DR_GREEN_PRIO_3 127
+#define SYS_COUNT_DR_GREEN_PRIO_4 128
+#define SYS_COUNT_DR_GREEN_PRIO_5 129
+#define SYS_COUNT_DR_GREEN_PRIO_6 130
+#define SYS_COUNT_DR_GREEN_PRIO_7 131
+
+/* Add a possibly wrapping 32 bit value to a 64 bit counter */
+static void lan966x_add_cnt(u64 *cnt, u32 val)
+{
+ if (val < (*cnt & U32_MAX))
+ *cnt += (u64)1 << 32; /* value has wrapped */
+
+ *cnt = (*cnt & ~(u64)U32_MAX) + val;
+}
+
+static void lan966x_stats_update(struct lan966x *lan966x)
+{
+ int i, j;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ uint idx = i * lan966x->num_stats;
+
+ lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(i),
+ lan966x, SYS_STAT_CFG);
+
+ for (j = 0; j < lan966x->num_stats; j++) {
+ u32 offset = lan966x->stats_layout[j].offset;
+
+ lan966x_add_cnt(&lan966x->stats[idx++],
+ lan_rd(lan966x, SYS_CNT(offset)));
+ }
+ }
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static int lan966x_get_sset_count(struct net_device *dev, int sset)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return lan966x->num_stats;
+}
+
+static void lan966x_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct lan966x_port *port = netdev_priv(netdev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < lan966x->num_stats; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ lan966x->stats_layout[i].name, ETH_GSTRING_LEN);
+}
+
+static void lan966x_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ /* check and update now */
+ lan966x_stats_update(lan966x);
+
+ /* Copy all counters */
+ for (i = 0; i < lan966x->num_stats; i++)
+ *data++ = lan966x->stats[port->chip_port *
+ lan966x->num_stats + i];
+}
+
+static void lan966x_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+
+ lan966x_stats_update(lan966x);
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ mac_stats->FramesTransmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_UC] +
+ lan966x->stats[idx + SYS_COUNT_TX_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_BC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_UC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
+ mac_stats->SingleCollisionFrames =
+ lan966x->stats[idx + SYS_COUNT_TX_COL];
+ mac_stats->MultipleCollisionFrames = 0;
+ mac_stats->FramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_UC] +
+ lan966x->stats[idx + SYS_COUNT_RX_MC] +
+ lan966x->stats[idx + SYS_COUNT_RX_BC];
+ mac_stats->FrameCheckSequenceErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC];
+ mac_stats->AlignmentErrors = 0;
+ mac_stats->OctetsTransmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
+ mac_stats->FramesWithDeferredXmissions =
+ lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD];
+ mac_stats->LateCollisions = 0;
+ mac_stats->FramesAbortedDueToXSColls = 0;
+ mac_stats->FramesLostDueToIntMACXmitError = 0;
+ mac_stats->CarrierSenseErrors = 0;
+ mac_stats->OctetsReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_OCT];
+ mac_stats->FramesLostDueToIntMACRcvError = 0;
+ mac_stats->MulticastFramesXmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC];
+ mac_stats->BroadcastFramesXmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_BC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
+ mac_stats->FramesWithExcessiveDeferral = 0;
+ mac_stats->MulticastFramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_MC];
+ mac_stats->BroadcastFramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_BC];
+ mac_stats->InRangeLengthErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_CRC];
+ mac_stats->OutOfRangeLengthField =
+ lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+ mac_stats->FrameTooLongErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {}
+};
+
+static void lan966x_get_eth_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+
+ lan966x_stats_update(lan966x);
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ rmon_stats->undersize_pkts =
+ lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT];
+ rmon_stats->oversize_pkts =
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+ rmon_stats->fragments =
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG];
+ rmon_stats->jabbers =
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER];
+ rmon_stats->hist[0] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64];
+ rmon_stats->hist[1] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127];
+ rmon_stats->hist[2] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255];
+ rmon_stats->hist[3] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511];
+ rmon_stats->hist[4] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023];
+ rmon_stats->hist[5] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
+ rmon_stats->hist[6] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
+
+ rmon_stats->hist_tx[0] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64];
+ rmon_stats->hist_tx[1] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127];
+ rmon_stats->hist_tx[2] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255];
+ rmon_stats->hist_tx[3] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511];
+ rmon_stats->hist_tx[4] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023];
+ rmon_stats->hist_tx[5] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
+ rmon_stats->hist_tx[6] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
+
+ mutex_unlock(&lan966x->stats_lock);
+
+ *ranges = lan966x_rmon_ranges;
+}
+
+static int lan966x_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int lan966x_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static void lan966x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int lan966x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
+static int lan966x_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ if (!lan966x->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+
+ info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+const struct ethtool_ops lan966x_ethtool_ops = {
+ .get_link_ksettings = lan966x_get_link_ksettings,
+ .set_link_ksettings = lan966x_set_link_ksettings,
+ .get_pauseparam = lan966x_get_pauseparam,
+ .set_pauseparam = lan966x_set_pauseparam,
+ .get_sset_count = lan966x_get_sset_count,
+ .get_strings = lan966x_get_strings,
+ .get_ethtool_stats = lan966x_get_ethtool_stats,
+ .get_eth_mac_stats = lan966x_get_eth_mac_stats,
+ .get_rmon_stats = lan966x_get_eth_rmon_stats,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = lan966x_get_ts_info,
+};
+
+static void lan966x_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct lan966x *lan966x = container_of(del_work, struct lan966x,
+ stats_work);
+
+ lan966x_stats_update(lan966x);
+
+ queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
+ LAN966X_STATS_CHECK_DELAY);
+}
+
+void lan966x_stats_get(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+ int i;
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
+
+ stats->rx_packets = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_JUMBO] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_JUMBO];
+
+ stats->multicast = lan966x->stats[idx + SYS_COUNT_RX_MC] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_MC];
+
+ stats->rx_errors = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG];
+
+ stats->rx_dropped = dev->stats.rx_dropped +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_DR_LOCAL] +
+ lan966x->stats[idx + SYS_COUNT_DR_TAIL] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_0] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_1] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_2] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_3] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_4] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_5] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_6] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_7];
+
+ for (i = 0; i < LAN966X_NUM_TC; i++) {
+ stats->rx_dropped +=
+ (lan966x->stats[idx + SYS_COUNT_DR_YELLOW_PRIO_0 + i] +
+ lan966x->stats[idx + SYS_COUNT_DR_GREEN_PRIO_0 + i]);
+ }
+
+ /* Get Tx stats */
+ stats->tx_bytes = lan966x->stats[idx + SYS_COUNT_TX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
+
+ stats->tx_packets = lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_JUMBO] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_JUMBO];
+
+ stats->tx_dropped = lan966x->stats[idx + SYS_COUNT_TX_DROP] +
+ lan966x->stats[idx + SYS_COUNT_TX_AGED];
+
+ stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+int lan966x_stats_init(struct lan966x *lan966x)
+{
+ char queue_name[32];
+
+ lan966x->stats_layout = lan966x_stats_layout;
+ lan966x->num_stats = ARRAY_SIZE(lan966x_stats_layout);
+ lan966x->stats = devm_kcalloc(lan966x->dev, lan966x->num_phys_ports *
+ lan966x->num_stats,
+ sizeof(u64), GFP_KERNEL);
+ if (!lan966x->stats)
+ return -ENOMEM;
+
+ /* Init stats worker */
+ mutex_init(&lan966x->stats_lock);
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(lan966x->dev));
+ lan966x->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!lan966x->stats_queue)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work);
+ queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
+ LAN966X_STATS_CHECK_DELAY);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c
new file mode 100644
index 0000000000..8310d3f354
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define DWRR_COST_BIT_WIDTH BIT(5)
+
+static u32 lan966x_ets_hw_cost(u32 w_min, u32 weight)
+{
+ u32 res;
+
+ /* Round half up: Multiply with 16 before division,
+ * add 8 and divide result with 16 again
+ */
+ res = (((DWRR_COST_BIT_WIDTH << 4) * w_min / weight) + 8) >> 4;
+ return max_t(u32, 1, res) - 1;
+}
+
+int lan966x_ets_add(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params;
+ struct lan966x *lan966x = port->lan966x;
+ u32 w_min = 100;
+ u8 count = 0;
+ u32 se_idx;
+ u8 i;
+
+ /* Check the input */
+ if (qopt->parent != TC_H_ROOT)
+ return -EINVAL;
+
+ params = &qopt->replace_params;
+ if (params->bands != NUM_PRIO_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < params->bands; ++i) {
+ /* In the switch the DWRR is always on the lowest consecutive
+ * priorities. Due to this, the first priority must map to the
+ * first DWRR band.
+ */
+ if (params->priomap[i] != (7 - i))
+ return -EINVAL;
+
+ if (params->quanta[i] && params->weights[i] == 0)
+ return -EINVAL;
+ }
+
+ se_idx = SE_IDX_PORT + port->chip_port;
+
+ /* Find minimum weight */
+ for (i = 0; i < params->bands; ++i) {
+ if (params->quanta[i] == 0)
+ continue;
+
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < params->bands; ++i) {
+ if (params->quanta[i] == 0)
+ continue;
+
+ ++count;
+
+ lan_wr(lan966x_ets_hw_cost(w_min, params->weights[i]),
+ lan966x, QSYS_SE_DWRR_CFG(se_idx, 7 - i));
+ }
+
+ lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(count) |
+ QSYS_SE_CFG_SE_RR_ENA_SET(0),
+ QSYS_SE_CFG_SE_DWRR_CNT |
+ QSYS_SE_CFG_SE_RR_ENA,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_ets_del(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 se_idx;
+ int i;
+
+ se_idx = SE_IDX_PORT + port->chip_port;
+
+ for (i = 0; i < NUM_PRIO_QUEUES; ++i)
+ lan_wr(0, lan966x, QSYS_SE_DWRR_CFG(se_idx, i));
+
+ lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(0) |
+ QSYS_SE_CFG_SE_RR_ENA_SET(0),
+ QSYS_SE_CFG_SE_DWRR_CNT |
+ QSYS_SE_CFG_SE_RR_ENA,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
new file mode 100644
index 0000000000..2ea263e893
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ struct net_device *orig_dev;
+ struct lan966x *lan966x;
+ unsigned long event;
+};
+
+struct lan966x_fdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u32 references;
+};
+
+static struct lan966x_fdb_entry *
+lan966x_fdb_find_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr))
+ return fdb_entry;
+ }
+
+ return NULL;
+}
+
+static void lan966x_fdb_add_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ fdb_entry = lan966x_fdb_find_entry(lan966x, fdb_info);
+ if (fdb_entry) {
+ fdb_entry->references++;
+ return;
+ }
+
+ fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
+ if (!fdb_entry)
+ return;
+
+ ether_addr_copy(fdb_entry->mac, fdb_info->addr);
+ fdb_entry->vid = fdb_info->vid;
+ fdb_entry->references = 1;
+ list_add_tail(&fdb_entry->list, &lan966x->fdb_entries);
+}
+
+static bool lan966x_fdb_del_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries,
+ list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr)) {
+ fdb_entry->references--;
+ if (!fdb_entry->references) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ return true;
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_forget(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+static void lan966x_fdb_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, list) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ }
+}
+
+int lan966x_fdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->fdb_entries);
+ lan966x->fdb_work = alloc_ordered_workqueue("lan966x_order", 0);
+ if (!lan966x->fdb_work)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void lan966x_fdb_deinit(struct lan966x *lan966x)
+{
+ destroy_workqueue(lan966x->fdb_work);
+ lan966x_fdb_purge_entries(lan966x);
+}
+
+void lan966x_fdb_flush_workqueue(struct lan966x *lan966x)
+{
+ flush_workqueue(lan966x->fdb_work);
+}
+
+static void lan966x_fdb_port_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+
+ lan966x = fdb_work->lan966x;
+ port = netdev_priv(fdb_work->orig_dev);
+ fdb_info = &fdb_work->fdb_info;
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_bridge_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x *lan966x;
+ int ret;
+
+ lan966x = fdb_work->lan966x;
+ fdb_info = &fdb_work->fdb_info;
+
+ /* In case the bridge is called */
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ /* If there is no front port in this vlan, there is no
+ * point to copy the frame to CPU because it would be
+ * just dropped at later point. So add it only if
+ * there is a port but it is required to store the fdb
+ * entry for later point when a port actually gets in
+ * the vlan.
+ */
+ lan966x_fdb_add_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ret = lan966x_fdb_del_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ if (ret)
+ lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_lag_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+
+ if (!lan966x_lag_first_port(fdb_work->orig_dev, fdb_work->dev))
+ return;
+
+ lan966x = fdb_work->lan966x;
+ port = netdev_priv(fdb_work->dev);
+ fdb_info = &fdb_work->fdb_info;
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr, fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_event_work(struct work_struct *work)
+{
+ struct lan966x_fdb_event_work *fdb_work =
+ container_of(work, struct lan966x_fdb_event_work, work);
+
+ if (lan966x_netdevice_check(fdb_work->orig_dev))
+ lan966x_fdb_port_event_work(fdb_work);
+ else if (netif_is_bridge_master(fdb_work->orig_dev))
+ lan966x_fdb_bridge_event_work(fdb_work);
+ else if (netif_is_lag_master(fdb_work->orig_dev))
+ lan966x_fdb_lag_event_work(fdb_work);
+
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+}
+
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_fdb_event_work *fdb_work;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (lan966x_netdevice_check(orig_dev) &&
+ !fdb_info->added_by_user)
+ break;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (!fdb_work)
+ return -ENOMEM;
+
+ fdb_work->dev = dev;
+ fdb_work->orig_dev = orig_dev;
+ fdb_work->lan966x = lan966x;
+ fdb_work->event = event;
+ INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
+ memcpy(&fdb_work->fdb_info, fdb_info, sizeof(fdb_work->fdb_info));
+ fdb_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!fdb_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr);
+
+ queue_work(lan966x->fdb_work, &fdb_work->work);
+ break;
+ }
+
+ return 0;
+err_addr_alloc:
+ kfree(fdb_work);
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
new file mode 100644
index 0000000000..3960534ac2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -0,0 +1,1073 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <net/page_pool/helpers.h>
+
+#include "lan966x_main.h"
+
+static int lan966x_fdma_channel_active(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, FDMA_CH_ACTIVE);
+}
+
+static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
+ struct lan966x_db *db)
+{
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return NULL;
+
+ db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
+
+ return page;
+}
+
+static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
+{
+ int i, j;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
+ page_pool_put_full_page(rx->page_pool,
+ rx->page[i][j], false);
+ }
+}
+
+static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
+{
+ struct page *page;
+
+ page = rx->page[rx->dcb_index][rx->db_index];
+ if (unlikely(!page))
+ return;
+
+ page_pool_recycle_direct(rx->page_pool, page);
+}
+
+static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
+ struct lan966x_rx_dcb *dcb,
+ u64 nextptr)
+{
+ struct lan966x_db *db;
+ int i;
+
+ for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
+ db = &dcb->db[i];
+ db->status = FDMA_DCB_STATUS_INTR;
+ }
+
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
+
+ rx->last_entry->nextptr = nextptr;
+ rx->last_entry = dcb;
+}
+
+static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct page_pool_params pp_params = {
+ .order = rx->page_order,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = FDMA_DCB_MAX,
+ .nid = NUMA_NO_NODE,
+ .dev = lan966x->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = XDP_PACKET_HEADROOM,
+ .max_len = rx->max_mtu -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ };
+
+ if (lan966x_xdp_present(lan966x))
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+
+ rx->page_pool = page_pool_create(&pp_params);
+
+ for (int i = 0; i < lan966x->num_phys_ports; ++i) {
+ struct lan966x_port *port;
+
+ if (!lan966x->ports[i])
+ continue;
+
+ port = lan966x->ports[i];
+ xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
+ xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rx->page_pool);
+ }
+
+ return PTR_ERR_OR_ZERO(rx->page_pool);
+}
+
+static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_rx_dcb *dcb;
+ struct lan966x_db *db;
+ struct page *page;
+ int i, j;
+ int size;
+
+ if (lan966x_fdma_rx_alloc_page_pool(rx))
+ return PTR_ERR(rx->page_pool);
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+
+ rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
+ if (!rx->dcbs)
+ return -ENOMEM;
+
+ rx->last_entry = rx->dcbs;
+ rx->db_index = 0;
+ rx->dcb_index = 0;
+
+ /* Now for each dcb allocate the dbs */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &rx->dcbs[i];
+ dcb->info = 0;
+
+ /* For each db allocate a page and map it to the DB dataptr. */
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (!page)
+ return -ENOMEM;
+
+ db->status = 0;
+ rx->page[i][j] = page;
+ }
+
+ lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
+ }
+
+ return 0;
+}
+
+static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
+{
+ rx->dcb_index++;
+ rx->dcb_index &= FDMA_DCB_MAX - 1;
+}
+
+static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 size;
+
+ /* Now it is possible to do the cleanup of dcb */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
+}
+
+static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP(rx->channel_id));
+ lan_wr(upper_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP1(rx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(rx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
+ FDMA_PORT_CTRL_XTR_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(rx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(rx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+}
+
+static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
+ struct lan966x_tx_dcb *dcb)
+{
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = 0;
+}
+
+static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ struct lan966x_tx_dcb *dcb;
+ struct lan966x_db *db;
+ int size;
+ int i, j;
+
+ tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
+ GFP_KERNEL);
+ if (!tx->dcbs_buf)
+ return -ENOMEM;
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
+ if (!tx->dcbs)
+ goto out;
+
+ /* Now for each dcb allocate the db */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &tx->dcbs[i];
+
+ for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ db->dataptr = 0;
+ db->status = 0;
+ }
+
+ lan966x_fdma_tx_add_dcb(tx, dcb);
+ }
+
+ return 0;
+
+out:
+ kfree(tx->dcbs_buf);
+ return -ENOMEM;
+}
+
+static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ int size;
+
+ kfree(tx->dcbs_buf);
+
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
+}
+
+static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP(tx->channel_id));
+ lan_wr(upper_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP1(tx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(tx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
+ FDMA_PORT_CTRL_INJ_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(tx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(tx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+
+ tx->activated = false;
+ tx->last_in_use = -1;
+}
+
+static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+
+ /* Write the registers to reload the channel */
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ if (netif_queue_stopped(port->dev))
+ netif_wake_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ netif_stop_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
+{
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_rx *rx = &lan966x->rx;
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ struct xdp_frame_bulk bq;
+ struct lan966x_db *db;
+ unsigned long flags;
+ bool clear = false;
+ int i;
+
+ xdp_frame_bulk_init(&bq);
+
+ spin_lock_irqsave(&lan966x->tx_lock, flags);
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+
+ if (!dcb_buf->used)
+ continue;
+
+ db = &tx->dcbs[i].db[0];
+ if (!(db->status & FDMA_DCB_STATUS_DONE))
+ continue;
+
+ dcb_buf->dev->stats.tx_packets++;
+ dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
+
+ dcb_buf->used = false;
+ if (dcb_buf->use_skb) {
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->len,
+ DMA_TO_DEVICE);
+
+ if (!dcb_buf->ptp)
+ napi_consume_skb(dcb_buf->data.skb, weight);
+ } else {
+ if (dcb_buf->xdp_ndo)
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->len,
+ DMA_TO_DEVICE);
+
+ if (dcb_buf->xdp_ndo)
+ xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
+ else
+ page_pool_recycle_direct(rx->page_pool,
+ dcb_buf->data.page);
+ }
+
+ clear = true;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ if (clear)
+ lan966x_fdma_wakeup_netdev(lan966x);
+
+ spin_unlock_irqrestore(&lan966x->tx_lock, flags);
+}
+
+static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
+{
+ struct lan966x_db *db;
+
+ /* Check if there is any data */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
+ return false;
+
+ return true;
+}
+
+static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_port *port;
+ struct lan966x_db *db;
+ struct page *page;
+
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ page = rx->page[rx->dcb_index][rx->db_index];
+ if (unlikely(!page))
+ return FDMA_ERROR;
+
+ dma_sync_single_for_cpu(lan966x->dev,
+ (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
+ FDMA_DCB_STATUS_BLOCKL(db->status),
+ DMA_FROM_DEVICE);
+
+ lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
+ src_port);
+ if (WARN_ON(*src_port >= lan966x->num_phys_ports))
+ return FDMA_ERROR;
+
+ port = lan966x->ports[*src_port];
+ if (!lan966x_xdp_port_present(port))
+ return FDMA_PASS;
+
+ return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
+}
+
+static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
+ u64 src_port)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+ u64 timestamp;
+
+ /* Get the received frame and unmap it */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ page = rx->page[rx->dcb_index][rx->db_index];
+
+ skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
+ if (unlikely(!skb))
+ goto free_page;
+
+ skb_mark_for_recycle(skb);
+
+ skb_reserve(skb, XDP_PACKET_HEADROOM);
+ skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
+
+ lan966x_ifh_get_timestamp(skb->data, &timestamp);
+
+ skb->dev = lan966x->ports[src_port]->dev;
+ skb_pull(skb, IFH_LEN_BYTES);
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (lan966x->bridge_mask & BIT(src_port)) {
+ skb->offload_fwd_mark = 1;
+
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+free_page:
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ return NULL;
+}
+
+static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
+ struct lan966x_rx *rx = &lan966x->rx;
+ int dcb_reload = rx->dcb_index;
+ struct lan966x_rx_dcb *old_dcb;
+ struct lan966x_db *db;
+ bool redirect = false;
+ struct sk_buff *skb;
+ struct page *page;
+ int counter = 0;
+ u64 src_port;
+ u64 nextptr;
+
+ lan966x_fdma_tx_clear_buf(lan966x, weight);
+
+ /* Get all received skb */
+ while (counter < weight) {
+ if (!lan966x_fdma_rx_more_frames(rx))
+ break;
+
+ counter++;
+
+ switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
+ case FDMA_PASS:
+ break;
+ case FDMA_ERROR:
+ lan966x_fdma_rx_free_page(rx);
+ lan966x_fdma_rx_advance_dcb(rx);
+ goto allocate_new;
+ case FDMA_REDIRECT:
+ redirect = true;
+ fallthrough;
+ case FDMA_TX:
+ lan966x_fdma_rx_advance_dcb(rx);
+ continue;
+ case FDMA_DROP:
+ lan966x_fdma_rx_free_page(rx);
+ lan966x_fdma_rx_advance_dcb(rx);
+ continue;
+ }
+
+ skb = lan966x_fdma_rx_get_frame(rx, src_port);
+ lan966x_fdma_rx_advance_dcb(rx);
+ if (!skb)
+ goto allocate_new;
+
+ napi_gro_receive(&lan966x->napi, skb);
+ }
+
+allocate_new:
+ /* Allocate new pages and map them */
+ while (dcb_reload != rx->dcb_index) {
+ db = &rx->dcbs[dcb_reload].db[rx->db_index];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (unlikely(!page))
+ break;
+ rx->page[dcb_reload][rx->db_index] = page;
+
+ old_dcb = &rx->dcbs[dcb_reload];
+ dcb_reload++;
+ dcb_reload &= FDMA_DCB_MAX - 1;
+
+ nextptr = rx->dma + ((unsigned long)old_dcb -
+ (unsigned long)rx->dcbs);
+ lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
+ lan966x_fdma_rx_reload(rx);
+ }
+
+ if (redirect)
+ xdp_do_flush();
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ u32 db, err, err_type;
+
+ db = lan_rd(lan966x, FDMA_INTR_DB);
+ err = lan_rd(lan966x, FDMA_INTR_ERR);
+
+ if (db) {
+ lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
+ lan_wr(db, lan966x, FDMA_INTR_DB);
+
+ napi_schedule(&lan966x->napi);
+ }
+
+ if (err) {
+ err_type = lan_rd(lan966x, FDMA_ERRORS);
+
+ WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
+
+ lan_wr(err, lan966x, FDMA_INTR_ERR);
+ lan_wr(err_type, lan966x, FDMA_ERRORS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
+{
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ int i;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+ if (!dcb_buf->used && i != tx->last_in_use)
+ return i;
+ }
+
+ return -1;
+}
+
+static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
+ int next_to_use, int len,
+ dma_addr_t dma_addr)
+{
+ struct lan966x_tx_dcb *next_dcb;
+ struct lan966x_db *next_db;
+
+ next_dcb = &tx->dcbs[next_to_use];
+ next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
+
+ next_db = &next_dcb->db[0];
+ next_db->dataptr = dma_addr;
+ next_db->status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(len);
+}
+
+static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ struct lan966x_tx_dcb *dcb;
+
+ if (likely(lan966x->tx.activated)) {
+ /* Connect current dcb to the next db */
+ dcb = &tx->dcbs[tx->last_in_use];
+ dcb->nextptr = tx->dma + (next_to_use *
+ sizeof(struct lan966x_tx_dcb));
+
+ lan966x_fdma_tx_reload(tx);
+ } else {
+ /* Because it is first time, then just activate */
+ lan966x->tx.activated = true;
+ lan966x_fdma_tx_activate(tx);
+ }
+
+ /* Move to next dcb because this last in use */
+ tx->last_in_use = next_to_use;
+}
+
+int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_tx_dcb_buf *next_dcb_buf;
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ struct page *page;
+ int next_to_use;
+ __be32 *ifh;
+ int ret = 0;
+
+ spin_lock(&lan966x->tx_lock);
+
+ /* Get next index */
+ next_to_use = lan966x_fdma_get_next_dcb(tx);
+ if (next_to_use < 0) {
+ netif_stop_queue(port->dev);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Get the next buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+
+ /* Generate new IFH */
+ if (!len) {
+ xdpf = ptr;
+
+ if (xdpf->headroom < IFH_LEN_BYTES) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ ifh = xdpf->data - IFH_LEN_BYTES;
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+
+ dma_addr = dma_map_single(lan966x->dev,
+ xdpf->data - IFH_LEN_BYTES,
+ xdpf->len + IFH_LEN_BYTES,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lan966x->dev, dma_addr)) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ next_dcb_buf->data.xdpf = xdpf;
+ next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
+
+ /* Setup next dcb */
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use,
+ xdpf->len + IFH_LEN_BYTES,
+ dma_addr);
+ } else {
+ page = ptr;
+
+ ifh = page_address(page) + XDP_PACKET_HEADROOM;
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+
+ dma_addr = page_pool_get_dma_addr(page);
+ dma_sync_single_for_device(lan966x->dev,
+ dma_addr + XDP_PACKET_HEADROOM,
+ len + IFH_LEN_BYTES,
+ DMA_TO_DEVICE);
+
+ next_dcb_buf->data.page = page;
+ next_dcb_buf->len = len + IFH_LEN_BYTES;
+
+ /* Setup next dcb */
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use,
+ len + IFH_LEN_BYTES,
+ dma_addr + XDP_PACKET_HEADROOM);
+ }
+
+ /* Fill up the buffer */
+ next_dcb_buf->use_skb = false;
+ next_dcb_buf->xdp_ndo = !len;
+ next_dcb_buf->dma_addr = dma_addr;
+ next_dcb_buf->used = true;
+ next_dcb_buf->ptp = false;
+ next_dcb_buf->dev = port->dev;
+
+ /* Start the transmission */
+ lan966x_fdma_tx_start(tx, next_to_use);
+
+out:
+ spin_unlock(&lan966x->tx_lock);
+
+ return ret;
+}
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_tx_dcb_buf *next_dcb_buf;
+ struct lan966x_tx *tx = &lan966x->tx;
+ int needed_headroom;
+ int needed_tailroom;
+ dma_addr_t dma_addr;
+ int next_to_use;
+ int err;
+
+ /* Get next index */
+ next_to_use = lan966x_fdma_get_next_dcb(tx);
+ if (next_to_use < 0) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* skb processing */
+ needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+ }
+
+ skb_tx_timestamp(skb);
+ skb_push(skb, IFH_LEN_BYTES);
+ memcpy(skb->data, ifh, IFH_LEN_BYTES);
+ skb_put(skb, 4);
+
+ dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lan966x->dev, dma_addr)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+
+ /* Setup next dcb */
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
+
+ /* Fill up the buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+ next_dcb_buf->use_skb = true;
+ next_dcb_buf->data.skb = skb;
+ next_dcb_buf->xdp_ndo = false;
+ next_dcb_buf->len = skb->len;
+ next_dcb_buf->dma_addr = dma_addr;
+ next_dcb_buf->used = true;
+ next_dcb_buf->ptp = false;
+ next_dcb_buf->dev = dev;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ next_dcb_buf->ptp = true;
+
+ /* Start the transmission */
+ lan966x_fdma_tx_start(tx, next_to_use);
+
+ return NETDEV_TX_OK;
+
+release:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
+{
+ int max_mtu = 0;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ struct lan966x_port *port;
+ int mtu;
+
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ if (mtu > max_mtu)
+ max_mtu = mtu;
+ }
+
+ return max_mtu;
+}
+
+static int lan966x_qsys_sw_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
+}
+
+static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
+{
+ struct page_pool *page_pool;
+ dma_addr_t rx_dma;
+ void *rx_dcbs;
+ u32 size;
+ int err;
+
+ /* Store these for later to free them */
+ rx_dma = lan966x->rx.dma;
+ rx_dcbs = lan966x->rx.dcbs;
+ page_pool = lan966x->rx.page_pool;
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+ lan966x_fdma_stop_netdev(lan966x);
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
+ lan966x->rx.max_mtu = new_mtu;
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ goto restore;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+
+ page_pool_destroy(page_pool);
+
+ lan966x_fdma_wakeup_netdev(lan966x);
+ napi_enable(&lan966x->napi);
+
+ return err;
+restore:
+ lan966x->rx.page_pool = page_pool;
+ lan966x->rx.dma = rx_dma;
+ lan966x->rx.dcbs = rx_dcbs;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ return err;
+}
+
+static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
+{
+ return lan966x_fdma_get_max_mtu(lan966x) +
+ IFH_LEN_BYTES +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ VLAN_HLEN * 2 +
+ XDP_PACKET_HEADROOM;
+}
+
+static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
+{
+ int err;
+ u32 val;
+
+ /* Disable the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Flush the CPU queues */
+ readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
+ val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ /* Add a sleep in case there are frames between the queues and the CPU
+ * port
+ */
+ usleep_range(1000, 2000);
+
+ err = lan966x_fdma_reload(lan966x, max_mtu);
+
+ /* Enable back the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ return err;
+}
+
+int lan966x_fdma_change_mtu(struct lan966x *lan966x)
+{
+ int max_mtu;
+
+ max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ if (max_mtu == lan966x->rx.max_mtu)
+ return 0;
+
+ return __lan966x_fdma_reload(lan966x, max_mtu);
+}
+
+int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
+{
+ int max_mtu;
+
+ max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ return __lan966x_fdma_reload(lan966x, max_mtu);
+}
+
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev)
+ return;
+
+ lan966x->fdma_ndev = dev;
+ netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
+ napi_enable(&lan966x->napi);
+}
+
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev == dev) {
+ netif_napi_del(&lan966x->napi);
+ lan966x->fdma_ndev = NULL;
+ }
+}
+
+int lan966x_fdma_init(struct lan966x *lan966x)
+{
+ int err;
+
+ if (!lan966x->fdma)
+ return 0;
+
+ lan966x->rx.lan966x = lan966x;
+ lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ lan966x->tx.lan966x = lan966x;
+ lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
+ lan966x->tx.last_in_use = -1;
+
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ return err;
+
+ err = lan966x_fdma_tx_alloc(&lan966x->tx);
+ if (err) {
+ lan966x_fdma_rx_free(&lan966x->rx);
+ return err;
+ }
+
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ return 0;
+}
+
+void lan966x_fdma_deinit(struct lan966x *lan966x)
+{
+ if (!lan966x->fdma)
+ return;
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_tx_disable(&lan966x->tx);
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x_fdma_rx_free(&lan966x->rx);
+ page_pool_destroy(lan966x->rx.page_pool);
+ lan966x_fdma_tx_free(&lan966x->tx);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
new file mode 100644
index 0000000000..9b18156eea
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "vcap_api_client.h"
+
+int lan966x_goto_port_add(struct lan966x_port *port,
+ int from_cid, int to_cid,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev,
+ from_cid, to_cid, goto_id,
+ true);
+ if (err == -EFAULT) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported goto chain");
+ return -EOPNOTSUPP;
+ }
+
+ if (err == -EADDRINUSE) {
+ NL_SET_ERR_MSG_MOD(extack, "VCAP already enabled");
+ return -EOPNOTSUPP;
+ }
+
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not enable VCAP lookups");
+ return err;
+ }
+
+ return 0;
+}
+
+int lan966x_goto_port_del(struct lan966x_port *port,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev, 0, 0,
+ goto_id, false);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not disable VCAP lookups");
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
new file mode 100644
index 0000000000..f3b1e0d318
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __LAN966X_IFH_H__
+#define __LAN966X_IFH_H__
+
+/* Fields with description (*) should just be cleared upon injection
+ * IFH is transmitted MSByte first (Highest bit pos sent as MSB of first byte)
+ */
+
+#define IFH_LEN 7
+#define IFH_LEN_BYTES (IFH_LEN * sizeof(u32))
+
+/* Timestamp for frame */
+#define IFH_POS_TIMESTAMP 192
+
+/* Bypass analyzer with a prefilled IFH */
+#define IFH_POS_BYPASS 191
+
+/* Masqueraded injection with masq_port defining logical source port */
+#define IFH_POS_MASQ 190
+
+/* Masqueraded port number for injection */
+#define IFH_POS_MASQ_PORT 186
+
+/* Frame length (*) */
+#define IFH_POS_LEN 178
+
+/* Cell filling mode. Full(0),Etype(1), LlctOpt(2), Llct(3) */
+#define IFH_POS_WRDMODE 176
+
+/* Frame has 16 bits rtag removed compared to line data */
+#define IFH_POS_RTAG48 175
+
+/* Frame has a redundancy tag */
+#define IFH_POS_HAS_RED_TAG 174
+
+/* Frame has been cut through forwarded (*) */
+#define IFH_POS_CUTTHRU 173
+
+/* Rewriter command */
+#define IFH_POS_REW_CMD 163
+
+/* Enable OAM-related rewriting. PDU_TYPE encodes OAM type. */
+#define IFH_POS_REW_OAM 162
+
+/* PDU type. Encoding: (0-NONE, 1-Y1731_CCM, 2-MRP_TST, 3-MRP_ITST, 4-DLR_BCN,
+ * 5-DLR_ADV, 6-RTE_NULL_INJ, 7-IPV4, 8-IPV6, 9-Y1731_NON_CCM).
+ */
+#define IFH_POS_PDU_TYPE 158
+
+/* Update FCS before transmission */
+#define IFH_POS_FCS_UPD 157
+
+/* Classified DSCP value of frame */
+#define IFH_POS_DSCP 151
+
+/* Yellow indication */
+#define IFH_POS_DP 150
+
+/* Process in RTE/inbound */
+#define IFH_POS_RTE_INB_UPDATE 149
+
+/* Number of tags to pop from frame */
+#define IFH_POS_POP_CNT 147
+
+/* Number of tags in front of the ethertype */
+#define IFH_POS_ETYPE_OFS 145
+
+/* Logical source port of frame (*) */
+#define IFH_POS_SRCPORT 141
+
+/* Sequence number in redundancy tag */
+#define IFH_POS_SEQ_NUM 120
+
+/* Stagd flag and classified TCI of frame (PCP/DEI/VID) */
+#define IFH_POS_TCI 103
+
+/* Classified internal priority for queuing */
+#define IFH_POS_QOS_CLASS 100
+
+/* Bit mask with eight cpu copy classses */
+#define IFH_POS_CPUQ 92
+
+/* Relearn + learn flags (*) */
+#define IFH_POS_LEARN_FLAGS 90
+
+/* SFLOW identifier for frame (0-8: Tx port, 9: Rx sampling, 15: No sampling) */
+#define IFH_POS_SFLOW_ID 86
+
+/* Set if an ACL/S2 rule was hit (*).
+ * Super priority: acl_hit=0 and acl_hit(4)=1.
+ */
+#define IFH_POS_ACL_HIT 85
+
+/* S2 rule index hit (*) */
+#define IFH_POS_ACL_IDX 79
+
+/* ISDX as classified by S1 */
+#define IFH_POS_ISDX 71
+
+/* Destination ports for frame */
+#define IFH_POS_DSTS 62
+
+/* Storm policer to be applied: None/Uni/Multi/Broad (*) */
+#define IFH_POS_FLOOD 60
+
+/* Redundancy tag operation */
+#define IFH_POS_SEQ_OP 58
+
+/* Classified internal priority for resourcemgt, tagging etc */
+#define IFH_POS_IPV 55
+
+/* Frame is for AFI use */
+#define IFH_POS_AFI 54
+
+/* Internal aging value (*) */
+#define IFH_POS_AGED 52
+
+/* RTP Identifier */
+#define IFH_POS_RTP_ID 42
+
+/* RTP MRPD flow */
+#define IFH_POS_RTP_SUBID 41
+
+/* Profinet DataStatus or opcua GroupVersion MSB */
+#define IFH_POS_PN_DATA_STATUS 33
+
+/* Profinet transfer status (1 iff the status is 0) */
+#define IFH_POS_PN_TRANSF_STATUS_ZERO 32
+
+/* Profinet cycle counter or opcua NetworkMessageNumber */
+#define IFH_POS_PN_CC 16
+
+#define IFH_WID_TIMESTAMP 32
+#define IFH_WID_BYPASS 1
+#define IFH_WID_MASQ 1
+#define IFH_WID_MASQ_PORT 4
+#define IFH_WID_LEN 14
+#define IFH_WID_WRDMODE 2
+#define IFH_WID_RTAG48 1
+#define IFH_WID_HAS_RED_TAG 1
+#define IFH_WID_CUTTHRU 1
+#define IFH_WID_REW_CMD 10
+#define IFH_WID_REW_OAM 1
+#define IFH_WID_PDU_TYPE 4
+#define IFH_WID_FCS_UPD 1
+#define IFH_WID_DSCP 6
+#define IFH_WID_DP 1
+#define IFH_WID_RTE_INB_UPDATE 1
+#define IFH_WID_POP_CNT 2
+#define IFH_WID_ETYPE_OFS 2
+#define IFH_WID_SRCPORT 4
+#define IFH_WID_SEQ_NUM 16
+#define IFH_WID_TCI 17
+#define IFH_WID_QOS_CLASS 3
+#define IFH_WID_CPUQ 8
+#define IFH_WID_LEARN_FLAGS 2
+#define IFH_WID_SFLOW_ID 4
+#define IFH_WID_ACL_HIT 1
+#define IFH_WID_ACL_IDX 6
+#define IFH_WID_ISDX 8
+#define IFH_WID_DSTS 9
+#define IFH_WID_FLOOD 2
+#define IFH_WID_SEQ_OP 2
+#define IFH_WID_IPV 3
+#define IFH_WID_AFI 1
+#define IFH_WID_AGED 2
+#define IFH_WID_RTP_ID 10
+#define IFH_WID_RTP_SUBID 1
+#define IFH_WID_PN_DATA_STATUS 8
+#define IFH_WID_PN_TRANSF_STATUS_ZERO 1
+#define IFH_WID_PN_CC 16
+
+#endif /* __LAN966X_IFH_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
new file mode 100644
index 0000000000..41fa2523d9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+
+#include "lan966x_main.h"
+
+static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
+{
+ u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0);
+ int p, lag, i;
+
+ /* Reset destination and aggregation PGIDS */
+ for (p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_wr(ANA_PGID_PGID_SET(BIT(p)),
+ lan966x, ANA_PGID(p));
+
+ for (p = PGID_AGGR; p < PGID_SRC; ++p)
+ lan_wr(ANA_PGID_PGID_SET(visited),
+ lan966x, ANA_PGID(p));
+
+ /* The visited ports bitmask holds the list of ports offloading any
+ * bonding interface. Initially we mark all these ports as unvisited,
+ * then every time we visit a port in this bitmask, we know that it is
+ * the lowest numbered port, i.e. the one whose logical ID == physical
+ * port ID == LAG ID. So we mark as visited all further ports in the
+ * bitmask that are offloading the same bonding interface. This way,
+ * we set up the aggregation PGIDs only once per bonding interface.
+ */
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port || !port->bond)
+ continue;
+
+ visited &= ~BIT(p);
+ }
+
+ /* Now, set PGIDs for each active LAG */
+ for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
+ struct net_device *bond = lan966x->ports[lag]->bond;
+ int num_active_ports = 0;
+ unsigned long bond_mask;
+ u8 aggr_idx[16];
+
+ if (!bond || (visited & BIT(lag)))
+ continue;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+
+ for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ lan_wr(ANA_PGID_PGID_SET(bond_mask),
+ lan966x, ANA_PGID(p));
+ if (port->lag_tx_active)
+ aggr_idx[num_active_ports++] = p;
+ }
+
+ for (i = PGID_AGGR; i < PGID_SRC; ++i) {
+ u32 ac;
+
+ ac = lan_rd(lan966x, ANA_PGID(i));
+ ac &= ~bond_mask;
+ /* Don't do division by zero if there was no active
+ * port. Just make all aggregation codes zero.
+ */
+ if (num_active_ports)
+ ac |= BIT(aggr_idx[i % num_active_ports]);
+ lan_wr(ANA_PGID_PGID_SET(ac),
+ lan966x, ANA_PGID(i));
+ }
+
+ /* Mark all ports in the same LAG as visited to avoid applying
+ * the same config again.
+ */
+ for (p = lag; p < lan966x->num_phys_ports; p++) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port)
+ continue;
+
+ if (port->bond == bond)
+ visited |= BIT(p);
+ }
+ }
+}
+
+static void lan966x_lag_set_port_ids(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ u32 bond_mask;
+ u32 lag_id;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ lag_id = port->chip_port;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
+ if (bond_mask)
+ lag_id = __ffs(bond_mask);
+
+ lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id),
+ ANA_PORT_CFG_PORTID_VAL,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ }
+}
+
+static void lan966x_lag_update_ids(struct lan966x *lan966x)
+{
+ lan966x_lag_set_port_ids(lan966x);
+ lan966x_update_fwd_mask(lan966x);
+ lan966x_lag_set_aggr_pgids(lan966x);
+}
+
+int lan966x_lag_port_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bond,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ u32 lag_id = -1;
+ u32 bond_mask;
+ int err;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+ if (bond_mask)
+ lag_id = __ffs(bond_mask);
+
+ port->bond = bond;
+ lan966x_lag_update_ids(lan966x);
+
+ err = switchdev_bridge_port_offload(brport_dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ goto out;
+
+ lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev));
+
+ if (lan966x_lag_first_port(port->bond, port->dev) &&
+ lag_id != -1)
+ lan966x_mac_lag_replace_port_entry(lan966x,
+ lan966x->ports[lag_id],
+ port);
+
+ return 0;
+
+out:
+ port->bond = NULL;
+ lan966x_lag_update_ids(lan966x);
+
+ return err;
+}
+
+void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 bond_mask;
+ u32 lag_id;
+
+ if (lan966x_lag_first_port(port->bond, port->dev)) {
+ bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
+ bond_mask &= ~BIT(port->chip_port);
+ if (bond_mask) {
+ lag_id = __ffs(bond_mask);
+ lan966x_mac_lag_replace_port_entry(lan966x, port,
+ lan966x->ports[lag_id]);
+ } else {
+ lan966x_mac_lag_remove_port_entry(lan966x, port);
+ }
+ }
+
+ port->bond = NULL;
+ lan966x_lag_update_ids(lan966x);
+ lan966x_port_stp_state_set(port, BR_STATE_FORWARDING);
+}
+
+static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x,
+ enum netdev_lag_hash hash_type)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port || !port->bond)
+ continue;
+
+ if (port->hash_type != hash_type)
+ return false;
+ }
+
+ return true;
+}
+
+int lan966x_lag_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct netdev_lag_upper_info *lui;
+ struct netlink_ext_ack *extack;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ lui = info->upper_info;
+ if (!lui) {
+ port->hash_type = NETDEV_LAG_HASH_NONE;
+ return NOTIFY_DONE;
+ }
+
+ if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG device using unsupported Tx type");
+ return -EINVAL;
+ }
+
+ if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG devices can have only the same hash_type");
+ return -EINVAL;
+ }
+
+ switch (lui->hash_type) {
+ case NETDEV_LAG_HASH_L2:
+ lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_SMAC_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ case NETDEV_LAG_HASH_L34:
+ lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ case NETDEV_LAG_HASH_L23:
+ lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG device using unsupported hash type");
+ return -EINVAL;
+ }
+
+ port->hash_type = lui->hash_type;
+
+ return NOTIFY_OK;
+}
+
+int lan966x_lag_port_changelowerstate(struct net_device *dev,
+ struct netdev_notifier_changelowerstate_info *info)
+{
+ struct netdev_lag_lower_state_info *lag = info->lower_state_info;
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool is_active;
+
+ if (!port->bond)
+ return NOTIFY_DONE;
+
+ is_active = lag->link_up && lag->tx_enabled;
+ if (port->lag_tx_active == is_active)
+ return NOTIFY_DONE;
+
+ port->lag_tx_active = is_active;
+ lan966x_lag_set_aggr_pgids(lan966x);
+
+ return NOTIFY_OK;
+}
+
+int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port;
+ struct net_device *lower;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!lan966x_netdevice_check(lower))
+ continue;
+
+ port = netdev_priv(lower);
+ if (port->bond != dev)
+ continue;
+
+ err = lan966x_port_prechangeupper(lower, dev, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int lan966x_lag_netdev_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port;
+ struct net_device *lower;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!lan966x_netdevice_check(lower))
+ continue;
+
+ port = netdev_priv(lower);
+ if (port->bond != dev)
+ continue;
+
+ err = lan966x_port_changeupper(lower, dev, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long bond_mask;
+
+ if (port->bond != lag)
+ return false;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, lag);
+ if (bond_mask && port->chip_port == __ffs(bond_mask))
+ return true;
+
+ return false;
+}
+
+u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond)
+{
+ struct lan966x_port *port;
+ u32 mask = 0;
+ int p;
+
+ if (!bond)
+ return mask;
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ if (port->bond == bond)
+ mask |= BIT(p);
+ }
+
+ return mask;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
new file mode 100644
index 0000000000..baa3a30c03
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -0,0 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+#include "lan966x_main.h"
+
+#define LAN966X_MAC_COLUMNS 4
+#define MACACCESS_CMD_IDLE 0
+#define MACACCESS_CMD_LEARN 1
+#define MACACCESS_CMD_FORGET 2
+#define MACACCESS_CMD_AGE 3
+#define MACACCESS_CMD_GET_NEXT 4
+#define MACACCESS_CMD_INIT 5
+#define MACACCESS_CMD_READ 6
+#define MACACCESS_CMD_WRITE 7
+#define MACACCESS_CMD_SYNC_GET_NEXT 8
+
+#define LAN966X_MAC_INVALID_ROW -1
+
+struct lan966x_mac_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u16 port_index;
+ int row;
+ bool lag;
+};
+
+struct lan966x_mac_raw_entry {
+ u32 mach;
+ u32 macl;
+ u32 maca;
+ bool processed;
+};
+
+static int lan966x_mac_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_MACACCESS);
+}
+
+static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(lan966x_mac_get_status,
+ lan966x, val,
+ (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US,
+ TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_mac_select(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid)
+{
+ u32 macl = 0, mach = 0;
+
+ /* Set the MAC address to handle and the vlan associated in a format
+ * understood by the hardware.
+ */
+ mach |= vid << 16;
+ mach |= mac[0] << 8;
+ mach |= mac[1] << 0;
+ macl |= mac[2] << 24;
+ macl |= mac[3] << 16;
+ macl |= mac[4] << 8;
+ macl |= mac[5] << 0;
+
+ lan_wr(macl, lan966x, ANA_MACLDATA);
+ lan_wr(mach, lan966x, ANA_MACHDATA);
+}
+
+static int __lan966x_mac_learn_locked(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lockdep_assert_held(&lan966x->mac_lock);
+
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a write command */
+ lan_wr(ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_CHANGE2SW_SET(0) |
+ ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) |
+ ANA_MACACCESS_DEST_IDX_SET(pgid) |
+ ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
+ lan966x, ANA_MACACCESS);
+
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
+ ret = __lan966x_mac_learn_locked(lan966x, pgid, cpu_copy, mac, vid, type);
+ spin_unlock(&lan966x->mac_lock);
+
+ return ret;
+}
+
+/* The mask of the front ports is encoded inside the mac parameter via a call
+ * to lan966x_mdb_encode_mac().
+ */
+int lan966x_mac_ip_learn(struct lan966x *lan966x,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_MACV4 && type != ENTRYTYPE_MACV6);
+
+ return __lan966x_mac_learn(lan966x, 0, cpu_copy, mac, vid, type);
+}
+
+int lan966x_mac_learn(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
+
+ return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
+}
+
+static int lan966x_mac_learn_locked(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
+
+ return __lan966x_mac_learn_locked(lan966x, port, false, mac, vid, type);
+}
+
+static int lan966x_mac_forget_locked(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lockdep_assert_held(&lan966x->mac_lock);
+
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a forget command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET),
+ lan966x, ANA_MACACCESS);
+
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
+ ret = lan966x_mac_forget_locked(lan966x, mac, vid, type);
+ spin_unlock(&lan966x->mac_lock);
+
+ return ret;
+}
+
+int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
+{
+ return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
+}
+
+int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid)
+{
+ return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED);
+}
+
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing)
+{
+ lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2),
+ ANA_AUTOAGE_AGE_PERIOD,
+ lan966x, ANA_AUTOAGE);
+}
+
+void lan966x_mac_init(struct lan966x *lan966x)
+{
+ /* Clear the MAC table */
+ lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ spin_lock_init(&lan966x->mac_lock);
+ INIT_LIST_HEAD(&lan966x->mac_entries);
+}
+
+static struct lan966x_mac_entry *lan966x_mac_alloc_entry(struct lan966x_port *port,
+ const unsigned char *mac,
+ u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC);
+ if (!mac_entry)
+ return NULL;
+
+ memcpy(mac_entry->mac, mac, ETH_ALEN);
+ mac_entry->vid = vid;
+ mac_entry->port_index = port->chip_port;
+ mac_entry->row = LAN966X_MAC_INVALID_ROW;
+ mac_entry->lag = port->bond ? true : false;
+ return mac_entry;
+}
+
+static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct lan966x_mac_entry *res = NULL;
+ struct lan966x_mac_entry *mac_entry;
+
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac, mac_entry->mac) &&
+ mac_entry->port_index == port_index) {
+ res = mac_entry;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int lan966x_mac_lookup(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+{
+ int ret;
+
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a read command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ),
+ lan966x, ANA_MACACCESS);
+
+ ret = lan966x_mac_wait_for_completion(lan966x);
+ if (ret)
+ return ret;
+
+ return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS));
+}
+
+static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev)
+{
+ struct switchdev_notifier_fdb_info info = { 0 };
+
+ info.addr = mac;
+ info.vid = vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
+ const unsigned char *addr, u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) {
+ spin_unlock(&lan966x->mac_lock);
+ return 0;
+ }
+
+ /* In case the entry already exists, don't add it again to SW,
+ * just update HW, but we need to look in the actual HW because
+ * it is possible for an entry to be learn by HW and before we
+ * get the interrupt the frame will reach CPU and the CPU will
+ * add the entry but without the extern_learn flag.
+ */
+ mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
+ if (mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ goto mac_learn;
+ }
+
+ mac_entry = lan966x_mac_alloc_entry(port, addr, vid);
+ if (!mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ return -ENOMEM;
+ }
+
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid,
+ port->bond ?: port->dev);
+
+mac_learn:
+ lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
+
+ return 0;
+}
+
+int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
+ u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(addr, mac_entry->mac)) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ return 0;
+}
+
+void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src,
+ struct lan966x_port *dst)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->port_index == src->chip_port &&
+ mac_entry->lag) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ lan966x_mac_learn_locked(lan966x, dst->chip_port,
+ mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+ mac_entry->port_index = dst->chip_port;
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->port_index == src->chip_port &&
+ mac_entry->lag) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+void lan966x_mac_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid, ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+static void lan966x_mac_notifiers(enum switchdev_notifier_type type,
+ unsigned char *mac, u32 vid,
+ struct net_device *dev)
+{
+ rtnl_lock();
+ lan966x_fdb_call_notifiers(type, mac, vid, dev);
+ rtnl_unlock();
+}
+
+static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry,
+ u8 *mac, u16 *vid, u32 *dest_idx)
+{
+ mac[0] = (raw_entry->mach >> 8) & 0xff;
+ mac[1] = (raw_entry->mach >> 0) & 0xff;
+ mac[2] = (raw_entry->macl >> 24) & 0xff;
+ mac[3] = (raw_entry->macl >> 16) & 0xff;
+ mac[4] = (raw_entry->macl >> 8) & 0xff;
+ mac[5] = (raw_entry->macl >> 0) & 0xff;
+
+ *vid = (raw_entry->mach >> 16) & 0xfff;
+ *dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca);
+}
+
+static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
+ struct lan966x_mac_raw_entry *raw_entries)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ struct list_head mac_deleted_entries;
+ struct lan966x_port *port;
+ u32 dest_idx;
+ u32 column;
+ u16 vid;
+
+ INIT_LIST_HEAD(&mac_deleted_entries);
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
+ bool found = false;
+
+ if (mac_entry->row != row)
+ continue;
+
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row,
+ * so when get one invalid entry it can just skip the
+ * rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+ continue;
+
+ /* If the entry in SW is found, then there is nothing
+ * to do
+ */
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac_entry->mac, mac) &&
+ mac_entry->port_index == dest_idx) {
+ raw_entries[column].processed = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ list_del(&mac_entry->list);
+ /* Move the entry from SW list to a tmp list such that
+ * it would be deleted later
+ */
+ list_add_tail(&mac_entry->list, &mac_deleted_entries);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) {
+ /* Notify the bridge that the entry doesn't exist
+ * anymore in the HW
+ */
+ port = lan966x->ports[mac_entry->port_index];
+ lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mac_entry->mac, mac_entry->vid,
+ port->bond ?: port->dev);
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+
+ /* Now go to the list of columns and see if any entry was not in the SW
+ * list, then that means that the entry is new so it needs to notify the
+ * bridge.
+ */
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row, so when
+ * get one invalid entry it can just skip the rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ /* If the entry already exists then don't do anything */
+ if (raw_entries[column].processed)
+ continue;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+ continue;
+
+ spin_lock(&lan966x->mac_lock);
+ mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx);
+ if (mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ continue;
+ }
+
+ port = lan966x->ports[dest_idx];
+ mac_entry = lan966x_mac_alloc_entry(port, mac, vid);
+ if (!mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ return;
+ }
+
+ mac_entry->row = row;
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ mac, vid, port->bond ?: port->dev);
+ }
+}
+
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
+{
+ struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 };
+ u32 index, column;
+ bool stop = true;
+ u32 val;
+
+ /* Start the scan from 0, 0 */
+ lan_wr(ANA_MACTINDX_M_INDEX_SET(0) |
+ ANA_MACTINDX_BUCKET_SET(0),
+ lan966x, ANA_MACTINDX);
+
+ while (1) {
+ spin_lock(&lan966x->mac_lock);
+ lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
+ ANA_MACACCESS_MAC_TABLE_CMD,
+ lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ val = lan_rd(lan966x, ANA_MACTINDX);
+ index = ANA_MACTINDX_M_INDEX_GET(val);
+ column = ANA_MACTINDX_BUCKET_GET(val);
+
+ /* The SYNC-GET-NEXT returns all the entries(4) in a row in
+ * which is suffered a change. By change it means that new entry
+ * was added or an entry was removed because of ageing.
+ * It would return all the columns for that row. And after that
+ * it would return the next row The stop conditions of the
+ * SYNC-GET-NEXT is when it reaches 'directly' to row 0
+ * column 3. So if SYNC-GET-NEXT returns row 0 and column 0
+ * then it is required to continue to read more even if it
+ * reaches row 0 and column 3.
+ */
+ if (index == 0 && column == 0)
+ stop = false;
+
+ if (column == LAN966X_MAC_COLUMNS - 1 &&
+ index == 0 && stop) {
+ spin_unlock(&lan966x->mac_lock);
+ break;
+ }
+
+ entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
+ entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
+ entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
+ spin_unlock(&lan966x->mac_lock);
+
+ /* Once all the columns are read process them */
+ if (column == LAN966X_MAC_COLUMNS - 1) {
+ lan966x_mac_irq_process(lan966x, index, entry);
+ /* A row was processed so it is safe to assume that the
+ * next row/column can be the stop condition
+ */
+ stop = true;
+ }
+ }
+
+ lan_rmw(ANA_ANAINTR_INTR_SET(0),
+ ANA_ANAINTR_INTR,
+ lan966x, ANA_ANAINTR);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
new file mode 100644
index 0000000000..0d6e79af24
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -0,0 +1,1324 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/ip.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <net/addrconf.h>
+
+#include "lan966x_main.h"
+
+#define XTR_EOF_0 0x00000080U
+#define XTR_EOF_1 0x01000080U
+#define XTR_EOF_2 0x02000080U
+#define XTR_EOF_3 0x03000080U
+#define XTR_PRUNED 0x04000080U
+#define XTR_ABORT 0x05000080U
+#define XTR_ESCAPE 0x06000080U
+#define XTR_NOT_READY 0x07000080U
+#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
+
+#define IO_RANGES 2
+
+static const struct of_device_id lan966x_match[] = {
+ { .compatible = "microchip,lan966x-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lan966x_match);
+
+struct lan966x_main_io_resource {
+ enum lan966x_target id;
+ phys_addr_t offset;
+ int range;
+};
+
+static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
+ { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */
+ { TARGET_ORG, 0, 1 }, /* 0xe2000000 */
+ { TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
+ { TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
+ { TARGET_PTP, 0xc000, 1 }, /* 0xe200c000 */
+ { TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */
+ { TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */
+ { TARGET_VCAP, 0x18000, 1 }, /* 0xe2018000 */
+ { TARGET_VCAP + 1, 0x20000, 1 }, /* 0xe2020000 */
+ { TARGET_VCAP + 2, 0x24000, 1 }, /* 0xe2024000 */
+ { TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */
+ { TARGET_DEV, 0x34000, 1 }, /* 0xe2034000 */
+ { TARGET_DEV + 1, 0x38000, 1 }, /* 0xe2038000 */
+ { TARGET_DEV + 2, 0x3c000, 1 }, /* 0xe203c000 */
+ { TARGET_DEV + 3, 0x40000, 1 }, /* 0xe2040000 */
+ { TARGET_DEV + 4, 0x44000, 1 }, /* 0xe2044000 */
+ { TARGET_DEV + 5, 0x48000, 1 }, /* 0xe2048000 */
+ { TARGET_DEV + 6, 0x4c000, 1 }, /* 0xe204c000 */
+ { TARGET_DEV + 7, 0x50000, 1 }, /* 0xe2050000 */
+ { TARGET_QSYS, 0x100000, 1 }, /* 0xe2100000 */
+ { TARGET_AFI, 0x120000, 1 }, /* 0xe2120000 */
+ { TARGET_ANA, 0x140000, 1 }, /* 0xe2140000 */
+};
+
+static int lan966x_create_targets(struct platform_device *pdev,
+ struct lan966x *lan966x)
+{
+ struct resource *iores[IO_RANGES];
+ void __iomem *begin[IO_RANGES];
+ int idx;
+
+ /* Initially map the entire range and after that update each target to
+ * point inside the region at the correct offset. It is possible that
+ * other devices access the same region so don't add any checks about
+ * this.
+ */
+ for (idx = 0; idx < IO_RANGES; idx++) {
+ iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM,
+ idx);
+ if (!iores[idx]) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
+
+ begin[idx] = devm_ioremap(&pdev->dev,
+ iores[idx]->start,
+ resource_size(iores[idx]));
+ if (!begin[idx]) {
+ dev_err(&pdev->dev, "Unable to get registers: %s\n",
+ iores[idx]->name);
+ return -ENOMEM;
+ }
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) {
+ const struct lan966x_main_io_resource *iomap =
+ &lan966x_main_iomap[idx];
+
+ lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+ }
+
+ return 0;
+}
+
+static bool lan966x_port_unique_address(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ port = lan966x->ports[p];
+ if (!port || port->dev == dev)
+ continue;
+
+ if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
+ return false;
+ }
+
+ return true;
+}
+
+static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ const struct sockaddr *addr = p;
+ int ret;
+
+ if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+ return 0;
+
+ /* Learn the new net device MAC address in the mac table. */
+ ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
+ if (ret)
+ return ret;
+
+ /* If there is another port with the same address as the dev, then don't
+ * delete it from the MAC table
+ */
+ if (!lan966x_port_unique_address(dev))
+ goto out;
+
+ /* Then forget the previous one. */
+ ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
+ if (ret)
+ return ret;
+
+out:
+ eth_hw_addr_set(dev, addr->sa_data);
+ return ret;
+}
+
+static int lan966x_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = snprintf(buf, len, "p%d", port->chip_port);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_port_open(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) |
+ ANA_PORT_CFG_RECV_ENA_SET(1) |
+ ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port),
+ ANA_PORT_CFG_LEARNAUTO |
+ ANA_PORT_CFG_RECV_ENA |
+ ANA_PORT_CFG_PORTID_VAL,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
+ if (err) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return err;
+ }
+
+ phylink_start(port->phylink);
+
+ return 0;
+}
+
+static int lan966x_port_stop(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ lan966x_port_config_down(port);
+ phylink_stop(port->phylink);
+ phylink_disconnect_phy(port->phylink);
+
+ return 0;
+}
+
+static int lan966x_port_inj_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QS_INJ_STATUS);
+}
+
+static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
+{
+ u32 val;
+
+ if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp)))
+ return 0;
+
+ return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
+ QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+}
+
+static int lan966x_port_ifh_xmit(struct sk_buff *skb,
+ __be32 *ifh,
+ struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, count, last;
+ u8 grp = 0;
+ u32 val;
+ int err;
+
+ val = lan_rd(lan966x, QS_INJ_STATUS);
+ if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
+ (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
+ goto err;
+
+ /* Write start of frame */
+ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_SOF_SET(1),
+ lan966x, QS_INJ_CTRL(grp));
+
+ /* Write IFH header */
+ for (i = 0; i < IFH_LEN; ++i) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ goto err;
+
+ lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
+ }
+
+ /* Write frame */
+ count = DIV_ROUND_UP(skb->len, 4);
+ last = skb->len % 4;
+ for (i = 0; i < count; ++i) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ goto err;
+
+ lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
+ }
+
+ /* Add padding */
+ while (i < (LAN966X_BUFFER_MIN_SZ / 4)) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ goto err;
+
+ lan_wr(0, lan966x, QS_INJ_WR(grp));
+ ++i;
+ }
+
+ /* Inidcate EOF and valid bytes in the last word */
+ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
+ 0 : last) |
+ QS_INJ_CTRL_EOF_SET(1),
+ lan966x, QS_INJ_CTRL(grp));
+
+ /* Add dummy CRC */
+ lan_wr(0, lan966x, QS_INJ_WR(grp));
+ skb_tx_timestamp(skb);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ return NETDEV_TX_OK;
+
+ dev_consume_skb_any(skb);
+ return NETDEV_TX_OK;
+
+err:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ return NETDEV_TX_BUSY;
+}
+
+static void lan966x_ifh_set(u8 *ifh, size_t val, size_t pos, size_t length)
+{
+ int i = 0;
+
+ do {
+ u8 p = IFH_LEN_BYTES - (pos + i) / 8 - 1;
+ u8 v = val >> i & 0xff;
+
+ /* There is no need to check for limits of the array, as these
+ * will never be written
+ */
+ ifh[p] |= v << ((pos + i) % 8);
+ ifh[p - 1] |= v >> (8 - (pos + i) % 8);
+
+ i += 8;
+ } while (i < length);
+}
+
+void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
+{
+ lan966x_ifh_set(ifh, bypass, IFH_POS_BYPASS, IFH_WID_BYPASS);
+}
+
+void lan966x_ifh_set_port(void *ifh, u64 port)
+{
+ lan966x_ifh_set(ifh, port, IFH_POS_DSTS, IFH_WID_DSTS);
+}
+
+static void lan966x_ifh_set_qos_class(void *ifh, u64 qos)
+{
+ lan966x_ifh_set(ifh, qos, IFH_POS_QOS_CLASS, IFH_WID_QOS_CLASS);
+}
+
+static void lan966x_ifh_set_ipv(void *ifh, u64 ipv)
+{
+ lan966x_ifh_set(ifh, ipv, IFH_POS_IPV, IFH_WID_IPV);
+}
+
+static void lan966x_ifh_set_vid(void *ifh, u64 vid)
+{
+ lan966x_ifh_set(ifh, vid, IFH_POS_TCI, IFH_WID_TCI);
+}
+
+static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
+{
+ lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
+}
+
+static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
+{
+ lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
+}
+
+static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ __be32 ifh[IFH_LEN];
+ int err;
+
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+ lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+
+ if (port->lan966x->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ err = lan966x_ptp_txtstamp_request(port, skb);
+ if (err)
+ return err;
+
+ lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
+ lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
+ }
+
+ spin_lock(&lan966x->tx_lock);
+ if (port->lan966x->fdma)
+ err = lan966x_fdma_xmit(skb, ifh, dev);
+ else
+ err = lan966x_port_ifh_xmit(skb, ifh, dev);
+ spin_unlock(&lan966x->tx_lock);
+
+ return err;
+}
+
+static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int old_mtu = dev->mtu;
+ int err;
+
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = new_mtu;
+
+ if (!lan966x->fdma)
+ return 0;
+
+ err = lan966x_fdma_change_mtu(lan966x);
+ if (err) {
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(old_mtu)),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = old_mtu;
+ }
+
+ return err;
+}
+
+static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED);
+}
+
+static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID);
+}
+
+static void lan966x_port_set_rx_mode(struct net_device *dev)
+{
+ __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync);
+}
+
+static int lan966x_port_get_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ ppid->id_len = sizeof(lan966x->base_mac);
+ memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static int lan966x_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (!port->lan966x->ptp)
+ return -EOPNOTSUPP;
+
+ lan966x_ptp_hwtstamp_get(port, cfg);
+
+ return 0;
+}
+
+static int lan966x_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (cfg->source != HWTSTAMP_SOURCE_NETDEV &&
+ cfg->source != HWTSTAMP_SOURCE_PHYLIB)
+ return -EOPNOTSUPP;
+
+ err = lan966x_ptp_setup_traps(port, cfg);
+ if (err)
+ return err;
+
+ if (cfg->source == HWTSTAMP_SOURCE_NETDEV) {
+ if (!port->lan966x->ptp)
+ return -EOPNOTSUPP;
+
+ err = lan966x_ptp_hwtstamp_set(port, cfg, extack);
+ if (err) {
+ lan966x_ptp_del_traps(port);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops lan966x_port_netdev_ops = {
+ .ndo_open = lan966x_port_open,
+ .ndo_stop = lan966x_port_stop,
+ .ndo_start_xmit = lan966x_port_xmit,
+ .ndo_change_mtu = lan966x_port_change_mtu,
+ .ndo_set_rx_mode = lan966x_port_set_rx_mode,
+ .ndo_get_phys_port_name = lan966x_port_get_phys_port_name,
+ .ndo_get_stats64 = lan966x_stats_get,
+ .ndo_set_mac_address = lan966x_port_set_mac_address,
+ .ndo_get_port_parent_id = lan966x_port_get_parent_id,
+ .ndo_eth_ioctl = phy_do_ioctl,
+ .ndo_setup_tc = lan966x_tc_setup,
+ .ndo_bpf = lan966x_xdp,
+ .ndo_xdp_xmit = lan966x_xdp_xmit,
+ .ndo_hwtstamp_get = lan966x_port_hwtstamp_get,
+ .ndo_hwtstamp_set = lan966x_port_hwtstamp_set,
+};
+
+bool lan966x_netdevice_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &lan966x_port_netdev_ops;
+}
+
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
+{
+ u32 val;
+
+ /* The IGMP and MLD frames are not forward by the HW if
+ * multicast snooping is enabled, therefor don't mark as
+ * offload to allow the SW to forward the frames accordingly.
+ */
+ val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port));
+ if (!(val & (ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
+ return true;
+
+ if (eth_type_vlan(skb->protocol)) {
+ skb = skb_vlan_untag(skb);
+ if (unlikely(!skb))
+ return false;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol == IPPROTO_IGMP)
+ return false;
+
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
+ !ipv6_mc_check_mld(skb))
+ return false;
+
+ return true;
+}
+
+static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
+{
+ return lan_rd(lan966x, QS_XTR_RD(grp));
+}
+
+static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp)
+{
+ u32 val;
+
+ return read_poll_timeout(lan966x_port_xtr_status, val,
+ val != XTR_NOT_READY,
+ READL_SLEEP_US, READL_TIMEOUT_US, false,
+ lan966x, grp);
+}
+
+static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
+{
+ u32 bytes_valid;
+ u32 val;
+ int err;
+
+ val = lan_rd(lan966x, QS_XTR_RD(grp));
+ if (val == XTR_NOT_READY) {
+ err = lan966x_port_xtr_ready(lan966x, grp);
+ if (err)
+ return -EIO;
+ }
+
+ switch (val) {
+ case XTR_ABORT:
+ return -EIO;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ case XTR_PRUNED:
+ bytes_valid = XTR_VALID_BYTES(val);
+ val = lan_rd(lan966x, QS_XTR_RD(grp));
+ if (val == XTR_ESCAPE)
+ *rval = lan_rd(lan966x, QS_XTR_RD(grp));
+ else
+ *rval = val;
+
+ return bytes_valid;
+ case XTR_ESCAPE:
+ *rval = lan_rd(lan966x, QS_XTR_RD(grp));
+
+ return 4;
+ default:
+ *rval = val;
+
+ return 4;
+ }
+}
+
+static u64 lan966x_ifh_get(u8 *ifh, size_t pos, size_t length)
+{
+ u64 val = 0;
+ u8 v;
+
+ for (int i = 0; i < length ; i++) {
+ int j = pos + i;
+ int k = j % 8;
+
+ if (i == 0 || k == 0)
+ v = ifh[IFH_LEN_BYTES - (j / 8) - 1];
+
+ if (v & (1 << k))
+ val |= (1ULL << i);
+ }
+
+ return val;
+}
+
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
+{
+ *src_port = lan966x_ifh_get(ifh, IFH_POS_SRCPORT, IFH_WID_SRCPORT);
+}
+
+static void lan966x_ifh_get_len(void *ifh, u64 *len)
+{
+ *len = lan966x_ifh_get(ifh, IFH_POS_LEN, IFH_WID_LEN);
+}
+
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
+{
+ *timestamp = lan966x_ifh_get(ifh, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
+}
+
+static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ int i, grp = 0, err = 0;
+
+ if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)))
+ return IRQ_NONE;
+
+ do {
+ u64 src_port, len, timestamp;
+ struct net_device *dev;
+ struct sk_buff *skb;
+ int sz = 0, buf_len;
+ u32 ifh[IFH_LEN];
+ u32 *buf;
+ u32 val;
+
+ for (i = 0; i < IFH_LEN; i++) {
+ err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]);
+ if (err != 4)
+ goto recover;
+ }
+
+ err = 0;
+
+ lan966x_ifh_get_src_port(ifh, &src_port);
+ lan966x_ifh_get_len(ifh, &len);
+ lan966x_ifh_get_timestamp(ifh, &timestamp);
+
+ WARN_ON(src_port >= lan966x->num_phys_ports);
+
+ dev = lan966x->ports[src_port]->dev;
+ skb = netdev_alloc_skb(dev, len);
+ if (unlikely(!skb)) {
+ netdev_err(dev, "Unable to allocate sk_buff\n");
+ err = -ENOMEM;
+ break;
+ }
+ buf_len = len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
+
+ len = 0;
+ do {
+ sz = lan966x_rx_frame_word(lan966x, grp, &val);
+ if (sz < 0) {
+ kfree_skb(skb);
+ goto recover;
+ }
+
+ *buf++ = val;
+ len += sz;
+ } while (len < buf_len);
+
+ /* Read the FCS */
+ sz = lan966x_rx_frame_word(lan966x, grp, &val);
+ if (sz < 0) {
+ kfree_skb(skb);
+ goto recover;
+ }
+
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
+
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
+
+ lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (lan966x->bridge_mask & BIT(src_port)) {
+ skb->offload_fwd_mark = 1;
+
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
+
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+
+recover:
+ if (sz < 0 || err)
+ lan_rd(lan966x, QS_XTR_RD(grp));
+
+ } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lan966x_ana_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+
+ return lan966x_mac_irq_handler(lan966x);
+}
+
+static void lan966x_cleanup_ports(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ if (port->dev)
+ unregister_netdev(port->dev);
+
+ lan966x_xdp_port_deinit(port);
+ if (lan966x->fdma && lan966x->fdma_ndev == port->dev)
+ lan966x_fdma_netdev_deinit(lan966x, port->dev);
+
+ if (port->phylink) {
+ rtnl_lock();
+ lan966x_port_stop(port->dev);
+ rtnl_unlock();
+ phylink_destroy(port->phylink);
+ port->phylink = NULL;
+ }
+
+ if (port->fwnode)
+ fwnode_handle_put(port->fwnode);
+ }
+
+ disable_irq(lan966x->xtr_irq);
+ lan966x->xtr_irq = -ENXIO;
+
+ if (lan966x->ana_irq > 0) {
+ disable_irq(lan966x->ana_irq);
+ lan966x->ana_irq = -ENXIO;
+ }
+
+ if (lan966x->fdma)
+ devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
+
+ if (lan966x->ptp_irq > 0)
+ devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
+
+ if (lan966x->ptp_ext_irq > 0)
+ devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
+}
+
+static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
+ phy_interface_t phy_mode,
+ struct fwnode_handle *portnp)
+{
+ struct lan966x_port *port;
+ struct phylink *phylink;
+ struct net_device *dev;
+ int err;
+
+ if (p >= lan966x->num_phys_ports)
+ return -EINVAL;
+
+ dev = devm_alloc_etherdev_mqs(lan966x->dev,
+ sizeof(struct lan966x_port),
+ NUM_PRIO_QUEUES, 1);
+ if (!dev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(dev, lan966x->dev);
+ port = netdev_priv(dev);
+ port->dev = dev;
+ port->lan966x = lan966x;
+ port->chip_port = p;
+ lan966x->ports[p] = port;
+
+ dev->max_mtu = ETH_MAX_MTU;
+
+ dev->netdev_ops = &lan966x_port_netdev_ops;
+ dev->ethtool_ops = &lan966x_ethtool_ops;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC;
+ dev->priv_flags |= IFF_SEE_ALL_HWTSTAMP_REQUESTS;
+ dev->needed_headroom = IFH_LEN_BYTES;
+
+ eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
+
+ lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID,
+ ENTRYTYPE_LOCKED);
+
+ port->phylink_config.dev = &port->dev->dev;
+ port->phylink_config.type = PHYLINK_NETDEV;
+ port->phylink_pcs.poll = true;
+ port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
+ port->phylink_pcs.neg_mode = true;
+
+ port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+
+ phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QUSGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&port->phylink_config,
+ portnp,
+ phy_mode,
+ &lan966x_phylink_mac_ops);
+ if (IS_ERR(phylink)) {
+ port->dev = NULL;
+ return PTR_ERR(phylink);
+ }
+
+ port->phylink = phylink;
+
+ if (lan966x->fdma)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(lan966x->dev, "register_netdev failed\n");
+ return err;
+ }
+
+ lan966x_vlan_port_set_vlan_aware(port, 0);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+
+ return 0;
+}
+
+static void lan966x_init(struct lan966x *lan966x)
+{
+ u32 p, i;
+
+ /* MAC table initialization */
+ lan966x_mac_init(lan966x);
+
+ lan966x_vlan_init(lan966x);
+
+ /* Flush queues */
+ lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) |
+ GENMASK(1, 0),
+ lan966x, QS_XTR_FLUSH);
+
+ /* Allow to drain */
+ mdelay(1);
+
+ /* All Queues normal */
+ lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) &
+ ~(GENMASK(1, 0)),
+ lan966x, QS_XTR_FLUSH);
+
+ /* Set MAC age time to default value, the entry is aged after
+ * 2 * AGE_PERIOD
+ */
+ lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ),
+ lan966x, ANA_AUTOAGE);
+
+ /* Disable learning for frames discarded by VLAN ingress filtering */
+ lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1),
+ ANA_ADVLEARN_VLAN_CHK,
+ lan966x, ANA_ADVLEARN);
+
+ /* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */
+ lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) |
+ (20000000 / 65),
+ lan966x, SYS_FRM_AGING);
+
+ /* Map the 8 CPU extraction queues to CPU port */
+ lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP);
+
+ /* Do byte-swap and expect status after last data word
+ * Extraction: Mode: manual extraction) | Byte_swap
+ */
+ lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
+ QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
+ lan966x, QS_XTR_GRP_CFG(0));
+
+ /* Injection: Mode: manual injection | Byte_swap */
+ lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
+ QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
+ lan966x, QS_INJ_GRP_CFG(0));
+
+ lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0),
+ QS_INJ_CTRL_GAP_SIZE,
+ lan966x, QS_INJ_CTRL(0));
+
+ /* Enable IFH insertion/parsing on CPU ports */
+ lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) |
+ SYS_PORT_MODE_INCL_XTR_HDR_SET(1),
+ lan966x, SYS_PORT_MODE(CPU_PORT));
+
+ /* Setup flooding PGIDs */
+ lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
+ ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MCIPV6) |
+ ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
+ lan966x, ANA_FLOODING_IPMC);
+
+ /* There are 8 priorities */
+ for (i = 0; i < 8; ++i)
+ lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) |
+ ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC),
+ ANA_FLOODING_FLD_MULTICAST |
+ ANA_FLOODING_FLD_UNICAST |
+ ANA_FLOODING_FLD_BROADCAST,
+ lan966x, ANA_FLOODING(i));
+
+ for (i = 0; i < PGID_ENTRIES; ++i)
+ /* Set all the entries to obey VLAN_VLAN */
+ lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1),
+ ANA_PGID_CFG_OBEY_VLAN,
+ lan966x, ANA_PGID_CFG(i));
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ /* Disable bridging by default */
+ lan_rmw(ANA_PGID_PGID_SET(0x0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(p + PGID_SRC));
+
+ /* Do not forward BPDU frames to the front ports and copy them
+ * to CPU
+ */
+ lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p));
+ }
+
+ /* Set source buffer size for each priority and each port to 1500 bytes */
+ for (i = 0; i <= QSYS_Q_RSRV; ++i) {
+ lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i));
+ lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i));
+ }
+
+ /* Enable switching to/from cpu port */
+ lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
+ QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
+ QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Configure and enable the CPU port */
+ lan_rmw(ANA_PGID_PGID_SET(0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(CPU_PORT));
+ lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_CPU));
+
+ /* Multicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MC));
+
+ /* This will be controlled by mrouter ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MCIPV4));
+
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MCIPV6));
+
+ /* Unicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_UC));
+
+ /* Broadcast to the CPU port and to other ports */
+ lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_BC));
+
+ lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1),
+ lan966x, REW_PORT_CFG(CPU_PORT));
+
+ lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
+ ANA_ANAINTR_INTR_ENA,
+ lan966x, ANA_ANAINTR);
+
+ spin_lock_init(&lan966x->tx_lock);
+
+ lan966x_taprio_init(lan966x);
+}
+
+static int lan966x_ram_init(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, SYS_RAM_INIT);
+}
+
+static int lan966x_reset_switch(struct lan966x *lan966x)
+{
+ struct reset_control *switch_reset;
+ int val = 0;
+ int ret;
+
+ switch_reset = devm_reset_control_get_optional_shared(lan966x->dev,
+ "switch");
+ if (IS_ERR(switch_reset))
+ return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
+ "Could not obtain switch reset");
+
+ reset_control_reset(switch_reset);
+
+ /* Don't reinitialize the switch core, if it is already initialized. In
+ * case it is initialized twice, some pointers inside the queue system
+ * in HW will get corrupted and then after a while the queue system gets
+ * full and no traffic is passing through the switch. The issue is seen
+ * when loading and unloading the driver and sending traffic through the
+ * switch.
+ */
+ if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA)
+ return 0;
+
+ lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
+ lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
+ ret = readx_poll_timeout(lan966x_ram_init, lan966x,
+ val, (val & BIT(1)) == 0, READL_SLEEP_US,
+ READL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG);
+
+ return 0;
+}
+
+static int lan966x_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *ports, *portnp;
+ struct lan966x *lan966x;
+ u8 mac_addr[ETH_ALEN];
+ int err;
+
+ lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
+ if (!lan966x)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, lan966x);
+ lan966x->dev = &pdev->dev;
+
+ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+
+ if (!device_get_mac_address(&pdev->dev, mac_addr)) {
+ ether_addr_copy(lan966x->base_mac, mac_addr);
+ } else {
+ pr_info("MAC addr was not set, use random MAC\n");
+ eth_random_addr(lan966x->base_mac);
+ lan966x->base_mac[5] &= 0xf0;
+ }
+
+ err = lan966x_create_targets(pdev, lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Failed to create targets");
+
+ err = lan966x_reset_switch(lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Reset failed");
+
+ lan966x->num_phys_ports = NUM_PHYS_PORTS;
+ lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
+ sizeof(struct lan966x_port *),
+ GFP_KERNEL);
+ if (!lan966x->ports)
+ return -ENOMEM;
+
+ /* There QS system has 32KB of memory */
+ lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY;
+
+ /* set irq */
+ lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr");
+ if (lan966x->xtr_irq < 0)
+ return lan966x->xtr_irq;
+
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL,
+ lan966x_xtr_irq_handler, IRQF_ONESHOT,
+ "frame extraction", lan966x);
+ if (err) {
+ pr_err("Unable to use xtr irq");
+ return -ENODEV;
+ }
+
+ lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
+ if (lan966x->ana_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
+ lan966x_ana_irq_handler, IRQF_ONESHOT,
+ "ana irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
+ }
+
+ lan966x->ptp_irq = platform_get_irq_byname(pdev, "ptp");
+ if (lan966x->ptp_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ptp_irq, NULL,
+ lan966x_ptp_irq_handler, IRQF_ONESHOT,
+ "ptp irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ptp irq");
+
+ lan966x->ptp = 1;
+ }
+
+ lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma");
+ if (lan966x->fdma_irq > 0) {
+ err = devm_request_irq(&pdev->dev, lan966x->fdma_irq,
+ lan966x_fdma_irq_handler, 0,
+ "fdma irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq");
+
+ lan966x->fdma = true;
+ }
+
+ if (lan966x->ptp) {
+ lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext");
+ if (lan966x->ptp_ext_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev,
+ lan966x->ptp_ext_irq, NULL,
+ lan966x_ptp_ext_irq_handler,
+ IRQF_ONESHOT,
+ "ptp-ext irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Unable to use ptp-ext irq");
+ }
+ }
+
+ ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
+ if (!ports)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "no ethernet-ports child found\n");
+
+ /* init switch */
+ lan966x_init(lan966x);
+ lan966x_stats_init(lan966x);
+
+ /* go over the child nodes */
+ fwnode_for_each_available_child_node(ports, portnp) {
+ phy_interface_t phy_mode;
+ struct phy *serdes;
+ u32 p;
+
+ if (fwnode_property_read_u32(portnp, "reg", &p))
+ continue;
+
+ phy_mode = fwnode_get_phy_mode(portnp);
+ err = lan966x_probe_port(lan966x, p, phy_mode, portnp);
+ if (err)
+ goto cleanup_ports;
+
+ /* Read needed configuration */
+ lan966x->ports[p]->config.portmode = phy_mode;
+ lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
+
+ serdes = devm_of_phy_optional_get(lan966x->dev,
+ to_of_node(portnp), NULL);
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ goto cleanup_ports;
+ }
+ lan966x->ports[p]->serdes = serdes;
+
+ lan966x_port_init(lan966x->ports[p]);
+ err = lan966x_xdp_port_init(lan966x->ports[p]);
+ if (err)
+ goto cleanup_ports;
+ }
+
+ fwnode_handle_put(ports);
+
+ lan966x_mdb_init(lan966x);
+ err = lan966x_fdb_init(lan966x);
+ if (err)
+ goto cleanup_ports;
+
+ err = lan966x_ptp_init(lan966x);
+ if (err)
+ goto cleanup_fdb;
+
+ err = lan966x_fdma_init(lan966x);
+ if (err)
+ goto cleanup_ptp;
+
+ err = lan966x_vcap_init(lan966x);
+ if (err)
+ goto cleanup_fdma;
+
+ lan966x_dcb_init(lan966x);
+
+ return 0;
+
+cleanup_fdma:
+ lan966x_fdma_deinit(lan966x);
+
+cleanup_ptp:
+ lan966x_ptp_deinit(lan966x);
+
+cleanup_fdb:
+ lan966x_fdb_deinit(lan966x);
+
+cleanup_ports:
+ fwnode_handle_put(ports);
+ fwnode_handle_put(portnp);
+
+ lan966x_cleanup_ports(lan966x);
+
+ cancel_delayed_work_sync(&lan966x->stats_work);
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
+ return err;
+}
+
+static int lan966x_remove(struct platform_device *pdev)
+{
+ struct lan966x *lan966x = platform_get_drvdata(pdev);
+
+ lan966x_taprio_deinit(lan966x);
+ lan966x_vcap_deinit(lan966x);
+ lan966x_fdma_deinit(lan966x);
+ lan966x_cleanup_ports(lan966x);
+
+ cancel_delayed_work_sync(&lan966x->stats_work);
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
+ lan966x_mac_purge_entries(lan966x);
+ lan966x_mdb_deinit(lan966x);
+ lan966x_fdb_deinit(lan966x);
+ lan966x_ptp_deinit(lan966x);
+
+ debugfs_remove_recursive(lan966x->debugfs_root);
+
+ return 0;
+}
+
+static struct platform_driver lan966x_driver = {
+ .probe = lan966x_probe,
+ .remove = lan966x_remove,
+ .driver = {
+ .name = "lan966x-switch",
+ .of_match_table = lan966x_match,
+ },
+};
+
+static int __init lan966x_switch_driver_init(void)
+{
+ int ret;
+
+ lan966x_register_notifier_blocks();
+
+ ret = platform_driver_register(&lan966x_driver);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ lan966x_unregister_notifier_blocks();
+ return ret;
+}
+
+static void __exit lan966x_switch_driver_exit(void)
+{
+ platform_driver_unregister(&lan966x_driver);
+ lan966x_unregister_notifier_blocks();
+}
+
+module_init(lan966x_switch_driver_init);
+module_exit(lan966x_switch_driver_exit);
+
+MODULE_DESCRIPTION("Microchip LAN966X switch driver");
+MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
new file mode 100644
index 0000000000..caa9e0533c
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -0,0 +1,792 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __LAN966X_MAIN_H__
+#define __LAN966X_MAIN_H__
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/ptp_clock_kernel.h>
+#include <net/page_pool/types.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/switchdev.h>
+#include <net/xdp.h>
+
+#include <vcap_api.h>
+#include <vcap_api_client.h>
+
+#include "lan966x_regs.h"
+#include "lan966x_ifh.h"
+
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
+#define READL_SLEEP_US 10
+#define READL_TIMEOUT_US 100000000
+
+#define LAN966X_BUFFER_CELL_SZ 64
+#define LAN966X_BUFFER_MEMORY (160 * 1024)
+#define LAN966X_BUFFER_MIN_SZ 60
+
+#define LAN966X_HW_MTU(mtu) ((mtu) + ETH_HLEN + ETH_FCS_LEN)
+
+#define PGID_AGGR 64
+#define PGID_SRC 80
+#define PGID_ENTRIES 89
+
+#define UNAWARE_PVID 0
+#define HOST_PVID 4095
+
+/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
+#define QSYS_Q_RSRV 95
+
+#define NUM_PHYS_PORTS 8
+#define CPU_PORT 8
+#define NUM_PRIO_QUEUES 8
+
+/* Reserved PGIDs */
+#define PGID_CPU (PGID_AGGR - 6)
+#define PGID_UC (PGID_AGGR - 5)
+#define PGID_BC (PGID_AGGR - 4)
+#define PGID_MC (PGID_AGGR - 3)
+#define PGID_MCIPV4 (PGID_AGGR - 2)
+#define PGID_MCIPV6 (PGID_AGGR - 1)
+
+/* Non-reserved PGIDs, used for general purpose */
+#define PGID_GP_START (CPU_PORT + 1)
+#define PGID_GP_END PGID_CPU
+
+#define LAN966X_SPEED_NONE 0
+#define LAN966X_SPEED_2500 1
+#define LAN966X_SPEED_1000 1
+#define LAN966X_SPEED_100 2
+#define LAN966X_SPEED_10 3
+
+#define LAN966X_PHC_COUNT 3
+#define LAN966X_PHC_PORT 0
+#define LAN966X_PHC_PINS_NUM 7
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_ONE_STEP_PTP 0x3
+#define IFH_REW_OP_TWO_STEP_PTP 0x4
+
+#define FDMA_RX_DCB_MAX_DBS 1
+#define FDMA_TX_DCB_MAX_DBS 1
+#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
+
+#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_STATUS_SOF BIT(16)
+#define FDMA_DCB_STATUS_EOF BIT(17)
+#define FDMA_DCB_STATUS_INTR BIT(18)
+#define FDMA_DCB_STATUS_DONE BIT(19)
+#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define FDMA_DCB_INVALID_DATA 0x1
+
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
+#define FDMA_DCB_MAX 512
+
+#define SE_IDX_QUEUE 0 /* 0-79 : Queue scheduler elements */
+#define SE_IDX_PORT 80 /* 80-89 : Port schedular elements */
+
+#define LAN966X_VCAP_CID_IS1_L0 VCAP_CID_INGRESS_L0 /* IS1 lookup 0 */
+#define LAN966X_VCAP_CID_IS1_L1 VCAP_CID_INGRESS_L1 /* IS1 lookup 1 */
+#define LAN966X_VCAP_CID_IS1_L2 VCAP_CID_INGRESS_L2 /* IS1 lookup 2 */
+#define LAN966X_VCAP_CID_IS1_MAX (VCAP_CID_INGRESS_L3 - 1) /* IS1 Max */
+
+#define LAN966X_VCAP_CID_IS2_L0 VCAP_CID_INGRESS_STAGE2_L0 /* IS2 lookup 0 */
+#define LAN966X_VCAP_CID_IS2_L1 VCAP_CID_INGRESS_STAGE2_L1 /* IS2 lookup 1 */
+#define LAN966X_VCAP_CID_IS2_MAX (VCAP_CID_INGRESS_STAGE2_L2 - 1) /* IS2 Max */
+
+#define LAN966X_VCAP_CID_ES0_L0 VCAP_CID_EGRESS_L0 /* ES0 lookup 0 */
+#define LAN966X_VCAP_CID_ES0_MAX (VCAP_CID_EGRESS_L1 - 1) /* ES0 Max */
+
+#define LAN966X_PORT_QOS_PCP_COUNT 8
+#define LAN966X_PORT_QOS_DEI_COUNT 8
+#define LAN966X_PORT_QOS_PCP_DEI_COUNT \
+ (LAN966X_PORT_QOS_PCP_COUNT + LAN966X_PORT_QOS_DEI_COUNT)
+
+#define LAN966X_PORT_QOS_DSCP_COUNT 64
+
+/* Port PCP rewrite mode */
+#define LAN966X_PORT_REW_TAG_CTRL_CLASSIFIED 0
+#define LAN966X_PORT_REW_TAG_CTRL_MAPPED 2
+
+/* Port DSCP rewrite mode */
+#define LAN966X_PORT_REW_DSCP_FRAME 0
+#define LAN966X_PORT_REW_DSCP_ANALIZER 1
+#define LAN966X_PORT_QOS_REWR_DSCP_ALL 3
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACV4,
+ ENTRYTYPE_MACV6,
+};
+
+/* FDMA return action codes for checking if the frame is valid
+ * FDMA_PASS, frame is valid and can be used
+ * FDMA_ERROR, something went wrong, stop getting more frames
+ * FDMA_DROP, frame is dropped, but continue to get more frames
+ * FDMA_TX, frame is given to TX, but continue to get more frames
+ * FDMA_REDIRECT, frame is given to TX, but continue to get more frames
+ */
+enum lan966x_fdma_action {
+ FDMA_PASS = 0,
+ FDMA_ERROR,
+ FDMA_DROP,
+ FDMA_TX,
+ FDMA_REDIRECT,
+};
+
+/* Controls how PORT_MASK is applied */
+enum LAN966X_PORT_MASK_MODE {
+ LAN966X_PMM_NO_ACTION,
+ LAN966X_PMM_REPLACE,
+ LAN966X_PMM_FORWARDING,
+ LAN966X_PMM_REDIRECT,
+};
+
+enum vcap_is2_port_sel_ipv6 {
+ VCAP_IS2_PS_IPV6_TCPUDP_OTHER,
+ VCAP_IS2_PS_IPV6_STD,
+ VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER,
+ VCAP_IS2_PS_IPV6_MAC_ETYPE,
+};
+
+enum vcap_is1_port_sel_other {
+ VCAP_IS1_PS_OTHER_NORMAL,
+ VCAP_IS1_PS_OTHER_7TUPLE,
+ VCAP_IS1_PS_OTHER_DBL_VID,
+ VCAP_IS1_PS_OTHER_DMAC_VID,
+};
+
+enum vcap_is1_port_sel_ipv4 {
+ VCAP_IS1_PS_IPV4_NORMAL,
+ VCAP_IS1_PS_IPV4_7TUPLE,
+ VCAP_IS1_PS_IPV4_5TUPLE_IP4,
+ VCAP_IS1_PS_IPV4_DBL_VID,
+ VCAP_IS1_PS_IPV4_DMAC_VID,
+};
+
+enum vcap_is1_port_sel_ipv6 {
+ VCAP_IS1_PS_IPV6_NORMAL,
+ VCAP_IS1_PS_IPV6_7TUPLE,
+ VCAP_IS1_PS_IPV6_5TUPLE_IP4,
+ VCAP_IS1_PS_IPV6_NORMAL_IP6,
+ VCAP_IS1_PS_IPV6_5TUPLE_IP6,
+ VCAP_IS1_PS_IPV6_DBL_VID,
+ VCAP_IS1_PS_IPV6_DMAC_VID,
+};
+
+enum vcap_is1_port_sel_rt {
+ VCAP_IS1_PS_RT_NORMAL,
+ VCAP_IS1_PS_RT_7TUPLE,
+ VCAP_IS1_PS_RT_DBL_VID,
+ VCAP_IS1_PS_RT_DMAC_VID,
+ VCAP_IS1_PS_RT_FOLLOW_OTHER = 7,
+};
+
+struct lan966x_port;
+
+struct lan966x_db {
+ u64 dataptr;
+ u64 status;
+};
+
+struct lan966x_rx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
+};
+
+struct lan966x_tx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
+};
+
+struct lan966x_rx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the array of hardware dcbs. */
+ struct lan966x_rx_dcb *dcbs;
+
+ /* Pointer to the last address in the dcbs. */
+ struct lan966x_rx_dcb *last_entry;
+
+ /* For each DB, there is a page */
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+
+ /* Represents the db_index, it can have a value between 0 and
+ * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
+ * it means that the DCB can be reused.
+ */
+ int db_index;
+
+ /* Represents the index in the dcbs. It has a value between 0 and
+ * FDMA_DCB_MAX
+ */
+ int dcb_index;
+
+ /* Represents the dma address to the dcbs array */
+ dma_addr_t dma;
+
+ /* Represents the page order that is used to allocate the pages for the
+ * RX buffers. This value is calculated based on max MTU of the devices.
+ */
+ u8 page_order;
+
+ /* Represents the max size frame that it can receive to the CPU. This
+ * includes the IFH + VLAN tags + frame + skb_shared_info
+ */
+ u32 max_mtu;
+
+ u8 channel_id;
+
+ struct page_pool *page_pool;
+};
+
+struct lan966x_tx_dcb_buf {
+ dma_addr_t dma_addr;
+ struct net_device *dev;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ struct page *page;
+ } data;
+ u32 len;
+ u32 used : 1;
+ u32 ptp : 1;
+ u32 use_skb : 1;
+ u32 xdp_ndo : 1;
+};
+
+struct lan966x_tx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the dcb list */
+ struct lan966x_tx_dcb *dcbs;
+ u16 last_in_use;
+
+ /* Represents the DMA address to the first entry of the dcb entries. */
+ dma_addr_t dma;
+
+ /* Array of dcbs that are given to the HW */
+ struct lan966x_tx_dcb_buf *dcbs_buf;
+
+ u8 channel_id;
+
+ bool activated;
+};
+
+struct lan966x_stat_layout {
+ u32 offset;
+ char name[ETH_GSTRING_LEN];
+};
+
+struct lan966x_phc {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM];
+ struct kernel_hwtstamp_config hwtstamp_config;
+ struct lan966x *lan966x;
+ u8 index;
+};
+
+struct lan966x_skb_cb {
+ u8 rew_op;
+ u16 ts_id;
+ unsigned long jiffies;
+};
+
+#define LAN966X_PTP_TIMEOUT msecs_to_jiffies(10)
+#define LAN966X_SKB_CB(skb) \
+ ((struct lan966x_skb_cb *)((skb)->cb))
+
+struct lan966x {
+ struct device *dev;
+
+ u8 num_phys_ports;
+ struct lan966x_port **ports;
+
+ void __iomem *regs[NUM_TARGETS];
+
+ int shared_queue_sz;
+
+ u8 base_mac[ETH_ALEN];
+
+ spinlock_t tx_lock; /* lock for frame transmition */
+
+ struct net_device *bridge;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct list_head mac_entries;
+ spinlock_t mac_lock; /* lock for mac_entries list */
+
+ u16 vlan_mask[VLAN_N_VID];
+ DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID);
+
+ /* stats */
+ const struct lan966x_stat_layout *stats_layout;
+ u32 num_stats;
+
+ /* workqueue for reading stats */
+ struct mutex stats_lock;
+ u64 *stats;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+
+ /* interrupts */
+ int xtr_irq;
+ int ana_irq;
+ int ptp_irq;
+ int fdma_irq;
+ int ptp_ext_irq;
+
+ /* worqueue for fdb */
+ struct workqueue_struct *fdb_work;
+ struct list_head fdb_entries;
+
+ /* mdb */
+ struct list_head mdb_entries;
+ struct list_head pgid_entries;
+
+ /* ptp */
+ bool ptp;
+ struct lan966x_phc phc[LAN966X_PHC_COUNT];
+ spinlock_t ptp_clock_lock; /* lock for phc */
+ spinlock_t ptp_ts_id_lock; /* lock for ts_id */
+ struct mutex ptp_lock; /* lock for ptp interface state */
+ u16 ptp_skbs;
+
+ /* fdma */
+ bool fdma;
+ struct net_device *fdma_ndev;
+ struct lan966x_rx rx;
+ struct lan966x_tx tx;
+ struct napi_struct napi;
+
+ /* Mirror */
+ struct lan966x_port *mirror_monitor;
+ u32 mirror_mask[2];
+ u32 mirror_count;
+
+ /* vcap */
+ struct vcap_control *vcap_ctrl;
+
+ /* debugfs */
+ struct dentry *debugfs_root;
+};
+
+struct lan966x_port_config {
+ phy_interface_t portmode;
+ const unsigned long *advertising;
+ int speed;
+ int duplex;
+ u32 pause;
+ bool inband;
+ bool autoneg;
+};
+
+struct lan966x_port_tc {
+ bool ingress_shared_block;
+ unsigned long police_id;
+ unsigned long ingress_mirror_id;
+ unsigned long egress_mirror_id;
+ struct flow_stats police_stat;
+ struct flow_stats mirror_stat;
+};
+
+struct lan966x_port_qos_pcp {
+ u8 map[LAN966X_PORT_QOS_PCP_DEI_COUNT];
+ bool enable;
+};
+
+struct lan966x_port_qos_dscp {
+ u8 map[LAN966X_PORT_QOS_DSCP_COUNT];
+ bool enable;
+};
+
+struct lan966x_port_qos_pcp_rewr {
+ u16 map[NUM_PRIO_QUEUES];
+ bool enable;
+};
+
+struct lan966x_port_qos_dscp_rewr {
+ u16 map[LAN966X_PORT_QOS_DSCP_COUNT];
+ bool enable;
+};
+
+struct lan966x_port_qos {
+ struct lan966x_port_qos_pcp pcp;
+ struct lan966x_port_qos_dscp dscp;
+ struct lan966x_port_qos_pcp_rewr pcp_rewr;
+ struct lan966x_port_qos_dscp_rewr dscp_rewr;
+ u8 default_prio;
+};
+
+struct lan966x_port {
+ struct net_device *dev;
+ struct lan966x *lan966x;
+
+ u8 chip_port;
+ u16 pvid;
+ u16 vid;
+ bool vlan_aware;
+
+ bool learn_ena;
+ bool mcast_ena;
+
+ struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
+ struct lan966x_port_config config;
+ struct phylink *phylink;
+ struct phy *serdes;
+ struct fwnode_handle *fwnode;
+
+ u8 ptp_tx_cmd;
+ bool ptp_rx_cmd;
+ u16 ts_id;
+ struct sk_buff_head tx_skbs;
+
+ struct net_device *bond;
+ bool lag_tx_active;
+ enum netdev_lag_hash hash_type;
+
+ struct lan966x_port_tc tc;
+
+ struct bpf_prog *xdp_prog;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
+extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
+extern const struct ethtool_ops lan966x_ethtool_ops;
+extern struct notifier_block lan966x_switchdev_nb __read_mostly;
+extern struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
+
+bool lan966x_netdevice_check(const struct net_device *dev);
+
+void lan966x_register_notifier_blocks(void);
+void lan966x_unregister_notifier_blocks(void);
+
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
+
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
+void lan966x_ifh_set_bypass(void *ifh, u64 bypass);
+void lan966x_ifh_set_port(void *ifh, u64 bypass);
+
+void lan966x_stats_get(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+int lan966x_stats_init(struct lan966x *lan966x);
+
+void lan966x_port_config_down(struct lan966x_port *port);
+void lan966x_port_config_up(struct lan966x_port *port);
+void lan966x_port_status_get(struct lan966x_port *port,
+ struct phylink_link_state *state);
+int lan966x_port_pcs_set(struct lan966x_port *port,
+ struct lan966x_port_config *config);
+void lan966x_port_init(struct lan966x_port *port);
+
+void lan966x_port_qos_set(struct lan966x_port *port,
+ struct lan966x_port_qos *qos);
+void lan966x_port_qos_dscp_rewr_mode_set(struct lan966x_port *port,
+ int mode);
+
+int lan966x_mac_ip_learn(struct lan966x *lan966x,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_learn(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid);
+int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid);
+void lan966x_mac_init(struct lan966x *lan966x);
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing);
+int lan966x_mac_del_entry(struct lan966x *lan966x,
+ const unsigned char *addr,
+ u16 vid);
+int lan966x_mac_add_entry(struct lan966x *lan966x,
+ struct lan966x_port *port,
+ const unsigned char *addr,
+ u16 vid);
+void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src,
+ struct lan966x_port *dst);
+void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src);
+void lan966x_mac_purge_entries(struct lan966x *lan966x);
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x);
+
+void lan966x_vlan_init(struct lan966x *lan966x);
+void lan966x_vlan_port_apply(struct lan966x_port *port);
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware);
+int lan966x_vlan_port_set_vid(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid);
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid);
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid);
+int lan966x_fdb_init(struct lan966x *lan966x);
+void lan966x_fdb_deinit(struct lan966x *lan966x);
+void lan966x_fdb_flush_workqueue(struct lan966x *lan966x);
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info);
+
+void lan966x_mdb_init(struct lan966x *lan966x);
+void lan966x_mdb_deinit(struct lan966x *lan966x);
+int lan966x_handle_port_mdb_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj);
+int lan966x_handle_port_mdb_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj);
+void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_mdb_clear_entries(struct lan966x *lan966x);
+void lan966x_mdb_restore_entries(struct lan966x *lan966x);
+
+int lan966x_ptp_init(struct lan966x *lan966x);
+void lan966x_ptp_deinit(struct lan966x *lan966x);
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg);
+void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
+ u64 src_port, u64 timestamp);
+int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ struct sk_buff *skb);
+void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
+ struct sk_buff *skb);
+irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
+u32 lan966x_ptp_get_period_ps(void);
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+int lan966x_ptp_setup_traps(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg);
+int lan966x_ptp_del_traps(struct lan966x_port *port);
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
+int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len);
+int lan966x_fdma_change_mtu(struct lan966x *lan966x);
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
+int lan966x_fdma_init(struct lan966x *lan966x);
+void lan966x_fdma_deinit(struct lan966x *lan966x);
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
+int lan966x_fdma_reload_page_pool(struct lan966x *lan966x);
+
+int lan966x_lag_port_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bond,
+ struct netlink_ext_ack *extack);
+void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond);
+int lan966x_lag_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_lag_port_changelowerstate(struct net_device *dev,
+ struct netdev_notifier_changelowerstate_info *info);
+int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_lag_netdev_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev);
+u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond);
+
+int lan966x_port_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_port_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info);
+void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state);
+void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t);
+void lan966x_update_fwd_mask(struct lan966x *lan966x);
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc);
+int lan966x_mqprio_del(struct lan966x_port *port);
+
+void lan966x_taprio_init(struct lan966x *lan966x);
+void lan966x_taprio_deinit(struct lan966x *lan966x);
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt);
+int lan966x_taprio_del(struct lan966x_port *port);
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed);
+
+int lan966x_tbf_add(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt);
+int lan966x_tbf_del(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt);
+
+int lan966x_cbs_add(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt);
+int lan966x_cbs_del(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt);
+
+int lan966x_ets_add(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt);
+int lan966x_ets_del(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt);
+
+int lan966x_tc_matchall(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress);
+
+int lan966x_police_port_add(struct lan966x_port *port,
+ struct flow_action *action,
+ struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int lan966x_police_port_del(struct lan966x_port *port,
+ unsigned long police_id,
+ struct netlink_ext_ack *extack);
+void lan966x_police_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats);
+
+int lan966x_mirror_port_add(struct lan966x_port *port,
+ struct flow_action_entry *action,
+ unsigned long mirror_id,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int lan966x_mirror_port_del(struct lan966x_port *port,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+void lan966x_mirror_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats,
+ bool ingress);
+
+int lan966x_xdp_port_init(struct lan966x_port *port);
+void lan966x_xdp_port_deinit(struct lan966x_port *port);
+int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+int lan966x_xdp_run(struct lan966x_port *port,
+ struct page *page,
+ u32 data_len);
+int lan966x_xdp_xmit(struct net_device *dev,
+ int n,
+ struct xdp_frame **frames,
+ u32 flags);
+bool lan966x_xdp_present(struct lan966x *lan966x);
+static inline bool lan966x_xdp_port_present(struct lan966x_port *port)
+{
+ return !!port->xdp_prog;
+}
+
+int lan966x_vcap_init(struct lan966x *lan966x);
+void lan966x_vcap_deinit(struct lan966x *lan966x);
+#if defined(CONFIG_DEBUG_FS)
+int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out);
+#else
+static inline int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+#endif
+
+int lan966x_tc_flower(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ bool ingress);
+
+int lan966x_goto_port_add(struct lan966x_port *port,
+ int from_cid, int to_cid,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack);
+int lan966x_goto_port_del(struct lan966x_port *port,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack);
+
+#ifdef CONFIG_LAN966X_DCB
+void lan966x_dcb_init(struct lan966x *lan966x);
+#else
+static inline void lan966x_dcb_init(struct lan966x *lan966x)
+{
+}
+#endif
+
+static inline void __iomem *lan_addr(void __iomem *base[],
+ int id, int tinst, int tcnt,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((tinst) >= tcnt);
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base[id + (tinst)] +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline u32 lan_rd(struct lan966x *lan966x, int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void lan_wr(u32 val, struct lan966x *lan966x,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ writel(val, lan_addr(lan966x->regs, id, tinst, tcnt,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth));
+}
+
+static inline void lan_rmw(u32 val, u32 mask, struct lan966x *lan966x,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+
+ nval = readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+#endif /* __LAN966X_MAIN_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
new file mode 100644
index 0000000000..2af55268bf
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_pgid_entry {
+ struct list_head list;
+ int index;
+ refcount_t refcount;
+ u16 ports;
+};
+
+struct lan966x_mdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN];
+ u16 vid;
+ u16 ports;
+ struct lan966x_pgid_entry *pgid;
+ u8 cpu_copy;
+};
+
+void lan966x_mdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->mdb_entries);
+ INIT_LIST_HEAD(&lan966x->pgid_entries);
+}
+
+static void lan966x_mdb_purge_mdb_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry, *tmp;
+
+ list_for_each_entry_safe(mdb_entry, tmp, &lan966x->mdb_entries, list) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ }
+}
+
+static void lan966x_mdb_purge_pgid_entries(struct lan966x *lan966x)
+{
+ struct lan966x_pgid_entry *pgid_entry, *tmp;
+
+ list_for_each_entry_safe(pgid_entry, tmp, &lan966x->pgid_entries, list) {
+ list_del(&pgid_entry->list);
+ kfree(pgid_entry);
+ }
+}
+
+void lan966x_mdb_deinit(struct lan966x *lan966x)
+{
+ lan966x_mdb_purge_mdb_entries(lan966x);
+ lan966x_mdb_purge_pgid_entries(lan966x);
+}
+
+static struct lan966x_mdb_entry *
+lan966x_mdb_entry_get(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (ether_addr_equal(mdb_entry->mac, mac) &&
+ mdb_entry->vid == vid)
+ return mdb_entry;
+ }
+
+ return NULL;
+}
+
+static struct lan966x_mdb_entry *
+lan966x_mdb_entry_add(struct lan966x *lan966x,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ether_addr_copy(mdb_entry->mac, mdb->addr);
+ mdb_entry->vid = mdb->vid;
+
+ list_add_tail(&mdb_entry->list, &lan966x->mdb_entries);
+
+ return mdb_entry;
+}
+
+static void lan966x_mdb_encode_mac(unsigned char *mac,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ ether_addr_copy(mac, mdb_entry->mac);
+
+ if (type == ENTRYTYPE_MACV4) {
+ mac[0] = 0;
+ mac[1] = mdb_entry->ports >> 8;
+ mac[2] = mdb_entry->ports & 0xff;
+ } else if (type == ENTRYTYPE_MACV6) {
+ mac[0] = mdb_entry->ports >> 8;
+ mac[1] = mdb_entry->ports & 0xff;
+ }
+}
+
+static int lan966x_mdb_ip_add(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ bool cpu_copy = false;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry) {
+ mdb_entry = lan966x_mdb_entry_add(lan966x, mdb);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+ } else {
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+
+ if (cpu_port)
+ mdb_entry->cpu_copy++;
+ else
+ mdb_entry->ports |= BIT(port->chip_port);
+
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ cpu_copy = true;
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ return lan966x_mac_ip_learn(lan966x, cpu_copy,
+ mac, mdb_entry->vid, type);
+}
+
+static int lan966x_mdb_ip_del(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ u16 ports;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry)
+ return -ENOENT;
+
+ ports = mdb_entry->ports;
+ if (cpu_port) {
+ /* If there are still other references to the CPU port then
+ * there is no point to delete and add again the same entry
+ */
+ mdb_entry->cpu_copy--;
+ if (mdb_entry->cpu_copy)
+ return 0;
+ } else {
+ ports &= ~BIT(port->chip_port);
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports = ports;
+
+ if (!mdb_entry->ports && !mdb_entry->cpu_copy) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return 0;
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ return lan966x_mac_ip_learn(lan966x, mdb_entry->cpu_copy,
+ mac, mdb_entry->vid, type);
+}
+
+static struct lan966x_pgid_entry *
+lan966x_pgid_entry_add(struct lan966x *lan966x, int index, u16 ports)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+
+ pgid_entry = kzalloc(sizeof(*pgid_entry), GFP_KERNEL);
+ if (!pgid_entry)
+ return ERR_PTR(-ENOMEM);
+
+ pgid_entry->ports = ports;
+ pgid_entry->index = index;
+ refcount_set(&pgid_entry->refcount, 1);
+
+ list_add_tail(&pgid_entry->list, &lan966x->pgid_entries);
+
+ return pgid_entry;
+}
+
+static struct lan966x_pgid_entry *
+lan966x_pgid_entry_get(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ int index;
+
+ /* Try to find an existing pgid that uses the same ports as the
+ * mdb_entry
+ */
+ list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) {
+ if (pgid_entry->ports == mdb_entry->ports) {
+ refcount_inc(&pgid_entry->refcount);
+ return pgid_entry;
+ }
+ }
+
+ /* Try to find an empty pgid entry and allocate one in case it finds it,
+ * otherwise it means that there are no more resources
+ */
+ for (index = PGID_GP_START; index < PGID_GP_END; index++) {
+ bool used = false;
+
+ list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) {
+ if (pgid_entry->index == index) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used)
+ return lan966x_pgid_entry_add(lan966x, index,
+ mdb_entry->ports);
+ }
+
+ return ERR_PTR(-ENOSPC);
+}
+
+static void lan966x_pgid_entry_del(struct lan966x *lan966x,
+ struct lan966x_pgid_entry *pgid_entry)
+{
+ if (!refcount_dec_and_test(&pgid_entry->refcount))
+ return;
+
+ list_del(&pgid_entry->list);
+ kfree(pgid_entry);
+}
+
+static int lan966x_mdb_l2_add(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_pgid_entry *pgid_entry;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry) {
+ mdb_entry = lan966x_mdb_entry_add(lan966x, mdb);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+ } else {
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+
+ if (cpu_port) {
+ mdb_entry->ports |= BIT(CPU_PORT);
+ mdb_entry->cpu_copy++;
+ } else {
+ mdb_entry->ports |= BIT(port->chip_port);
+ }
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry)) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return PTR_ERR(pgid_entry);
+ }
+ mdb_entry->pgid = pgid_entry;
+
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ mdb_entry->ports &= BIT(CPU_PORT);
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+static int lan966x_mdb_l2_del(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_pgid_entry *pgid_entry;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ u16 ports;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry)
+ return -ENOENT;
+
+ ports = mdb_entry->ports;
+ if (cpu_port) {
+ /* If there are still other references to the CPU port then
+ * there is no point to delete and add again the same entry
+ */
+ mdb_entry->cpu_copy--;
+ if (mdb_entry->cpu_copy)
+ return 0;
+
+ ports &= ~BIT(CPU_PORT);
+ } else {
+ ports &= ~BIT(port->chip_port);
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+
+ mdb_entry->ports = ports;
+
+ if (!mdb_entry->ports) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return 0;
+ }
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry)) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return PTR_ERR(pgid_entry);
+ }
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+static enum macaccess_entry_type
+lan966x_mdb_classify(const unsigned char *mac)
+{
+ if (mac[0] == 0x01 && mac[1] == 0x00 && mac[2] == 0x5e)
+ return ENTRYTYPE_MACV4;
+ if (mac[0] == 0x33 && mac[1] == 0x33)
+ return ENTRYTYPE_MACV6;
+ return ENTRYTYPE_LOCKED;
+}
+
+int lan966x_handle_port_mdb_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ enum macaccess_entry_type type;
+
+ /* Split the way the entries are added for ipv4/ipv6 and for l2. The
+ * reason is that for ipv4/ipv6 it doesn't require to use any pgid
+ * entry, while for l2 is required to use pgid entries
+ */
+ type = lan966x_mdb_classify(mdb->addr);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ return lan966x_mdb_ip_add(port, mdb, type);
+
+ return lan966x_mdb_l2_add(port, mdb, type);
+}
+
+int lan966x_handle_port_mdb_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ enum macaccess_entry_type type;
+
+ /* Split the way the entries are removed for ipv4/ipv6 and for l2. The
+ * reason is that for ipv4/ipv6 it doesn't require to use any pgid
+ * entry, while for l2 is required to use pgid entries
+ */
+ type = lan966x_mdb_classify(mdb->addr);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ return lan966x_mdb_ip_del(port, mdb, type);
+
+ return lan966x_mdb_l2_del(port, mdb, type);
+}
+
+static void lan966x_mdb_ip_cpu_copy(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_mac_ip_learn(lan966x, true, mac, mdb_entry->vid, type);
+}
+
+static void lan966x_mdb_l2_cpu_copy(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports |= BIT(CPU_PORT);
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry))
+ return;
+
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (mdb_entry->vid != vid || !mdb_entry->cpu_copy)
+ continue;
+
+ type = lan966x_mdb_classify(mdb_entry->mac);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ lan966x_mdb_ip_cpu_copy(lan966x, mdb_entry, type);
+ else
+ lan966x_mdb_l2_cpu_copy(lan966x, mdb_entry, type);
+ }
+}
+
+static void lan966x_mdb_ip_cpu_remove(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_mac_ip_learn(lan966x, false, mac, mdb_entry->vid, type);
+}
+
+static void lan966x_mdb_l2_cpu_remove(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports &= ~BIT(CPU_PORT);
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry))
+ return;
+
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (mdb_entry->vid != vid || !mdb_entry->cpu_copy)
+ continue;
+
+ type = lan966x_mdb_classify(mdb_entry->mac);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ lan966x_mdb_ip_cpu_remove(lan966x, mdb_entry, type);
+ else
+ lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type);
+ }
+}
+
+void lan966x_mdb_clear_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+ unsigned char mac[ETH_ALEN];
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ type = lan966x_mdb_classify(mdb_entry->mac);
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ /* Remove just the MAC entry, still keep the PGID in case of L2
+ * entries because this can be restored at later point
+ */
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+}
+
+void lan966x_mdb_restore_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+ unsigned char mac[ETH_ALEN];
+ bool cpu_copy = false;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ type = lan966x_mdb_classify(mdb_entry->mac);
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) {
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ cpu_copy = true;
+
+ lan966x_mac_ip_learn(lan966x, cpu_copy, mac,
+ mdb_entry->vid, type);
+ } else {
+ lan966x_mac_learn(lan966x, mdb_entry->pgid->index,
+ mdb_entry->mac,
+ mdb_entry->vid, type);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c
new file mode 100644
index 0000000000..7e1ba3f40c
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mirror_port_add(struct lan966x_port *port,
+ struct flow_action_entry *action,
+ unsigned long mirror_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_port *monitor_port;
+
+ if (!lan966x_netdevice_check(action->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an lan966x port");
+ return -EOPNOTSUPP;
+ }
+
+ monitor_port = netdev_priv(action->dev);
+
+ if (lan966x->mirror_mask[ingress] & BIT(port->chip_port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirror already exists");
+ return -EEXIST;
+ }
+
+ if (lan966x->mirror_monitor &&
+ lan966x->mirror_monitor != monitor_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change mirror port while in use");
+ return -EBUSY;
+ }
+
+ if (port == monitor_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot mirror the monitor port");
+ return -EINVAL;
+ }
+
+ lan966x->mirror_mask[ingress] |= BIT(port->chip_port);
+
+ lan966x->mirror_monitor = monitor_port;
+ lan_wr(BIT(monitor_port->chip_port), lan966x, ANA_MIRRORPORTS);
+
+ if (ingress) {
+ lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(1),
+ ANA_PORT_CFG_SRC_MIRROR_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ } else {
+ lan_wr(lan966x->mirror_mask[0], lan966x,
+ ANA_EMIRRORPORTS);
+ }
+
+ lan966x->mirror_count++;
+
+ if (ingress)
+ port->tc.ingress_mirror_id = mirror_id;
+ else
+ port->tc.egress_mirror_id = mirror_id;
+
+ return 0;
+}
+
+int lan966x_mirror_port_del(struct lan966x_port *port,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->mirror_mask[ingress] & BIT(port->chip_port))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "There is no mirroring for this port");
+ return -ENOENT;
+ }
+
+ lan966x->mirror_mask[ingress] &= ~BIT(port->chip_port);
+
+ if (ingress) {
+ lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(0),
+ ANA_PORT_CFG_SRC_MIRROR_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ } else {
+ lan_wr(lan966x->mirror_mask[0], lan966x,
+ ANA_EMIRRORPORTS);
+ }
+
+ lan966x->mirror_count--;
+
+ if (lan966x->mirror_count == 0) {
+ lan966x->mirror_monitor = NULL;
+ lan_wr(0, lan966x, ANA_MIRRORPORTS);
+ }
+
+ if (ingress)
+ port->tc.ingress_mirror_id = 0;
+ else
+ port->tc.egress_mirror_id = 0;
+
+ return 0;
+}
+
+void lan966x_mirror_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats,
+ bool ingress)
+{
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &port->tc.mirror_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+
+ if (ingress) {
+ flow_stats_update(stats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+ } else {
+ flow_stats_update(stats,
+ new_stats.tx_bytes - old_stats->bytes,
+ new_stats.tx_packets - old_stats->pkts,
+ new_stats.tx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.tx_bytes;
+ old_stats->pkts = new_stats.tx_packets;
+ old_stats->drops = new_stats.tx_dropped;
+ old_stats->lastused = jiffies;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c
new file mode 100644
index 0000000000..7fa76e74f9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc)
+{
+ u8 i;
+
+ if (num_tc != NUM_PRIO_QUEUES) {
+ netdev_err(port->dev, "Only %d traffic classes supported\n",
+ NUM_PRIO_QUEUES);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(port->dev, num_tc);
+
+ for (i = 0; i < num_tc; ++i)
+ netdev_set_tc_queue(port->dev, i, 1, i);
+
+ return 0;
+}
+
+int lan966x_mqprio_del(struct lan966x_port *port)
+{
+ netdev_reset_tc(port->dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
new file mode 100644
index 0000000000..1d63903f90
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/phylink.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/phy/phy.h>
+
+#include "lan966x_main.h"
+
+static struct phylink_pcs *lan966x_phylink_mac_select(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+
+ return &port->phylink_pcs;
+}
+
+static void lan966x_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static int lan966x_phylink_mac_prepare(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t iface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ phy_interface_t serdes_mode = iface;
+ int err;
+
+ if (port->serdes) {
+ err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
+ serdes_mode);
+ if (err) {
+ netdev_err(to_net_dev(config->dev),
+ "Could not set mode of SerDes\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void lan966x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ struct lan966x_port_config *port_config = &port->config;
+
+ port_config->duplex = duplex;
+ port_config->speed = speed;
+ port_config->pause = 0;
+ port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0;
+ port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0;
+
+ if (phy_interface_mode_is_rgmii(interface))
+ phy_set_speed(port->serdes, speed);
+
+ lan966x_port_config_up(port);
+}
+
+static void lan966x_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_port_config_down(port);
+
+ /* Take PCS out of reset */
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+}
+
+static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct lan966x_port, phylink_pcs);
+}
+
+static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct lan966x_port *port = lan966x_pcs_to_port(pcs);
+
+ lan966x_port_status_get(port, state);
+}
+
+static int lan966x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct lan966x_port *port = lan966x_pcs_to_port(pcs);
+ struct lan966x_port_config config;
+ int ret;
+
+ config = port->config;
+ config.portmode = interface;
+ config.inband = neg_mode & PHYLINK_PCS_NEG_INBAND;
+ config.autoneg = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
+ config.advertising = advertising;
+
+ ret = lan966x_port_pcs_set(port, &config);
+ if (ret)
+ netdev_err(port->dev, "port PCS config failed: %d\n", ret);
+
+ return ret;
+}
+
+static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs)
+{
+ /* Currently not used */
+}
+
+const struct phylink_mac_ops lan966x_phylink_mac_ops = {
+ .mac_select_pcs = lan966x_phylink_mac_select,
+ .mac_config = lan966x_phylink_mac_config,
+ .mac_prepare = lan966x_phylink_mac_prepare,
+ .mac_link_down = lan966x_phylink_mac_link_down,
+ .mac_link_up = lan966x_phylink_mac_link_up,
+};
+
+const struct phylink_pcs_ops lan966x_phylink_pcs_ops = {
+ .pcs_get_state = lan966x_pcs_get_state,
+ .pcs_config = lan966x_pcs_config,
+ .pcs_an_restart = lan966x_pcs_aneg_restart,
+};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
new file mode 100644
index 0000000000..7302df2300
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+/* 0-8 : 9 port policers */
+#define POL_IDX_PORT 0
+
+/* Policer order: Serial (QoS -> Port -> VCAP) */
+#define POL_ORDER 0x1d3
+
+struct lan966x_tc_policer {
+ /* kilobit per second */
+ u32 rate;
+ /* bytes */
+ u32 burst;
+};
+
+static int lan966x_police_add(struct lan966x_port *port,
+ struct lan966x_tc_policer *pol,
+ u16 pol_idx)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Rate unit is 33 1/3 kpps */
+ pol->rate = DIV_ROUND_UP(pol->rate * 3, 100);
+ /* Avoid zero burst size */
+ pol->burst = pol->burst ?: 1;
+ /* Unit is 4kB */
+ pol->burst = DIV_ROUND_UP(pol->burst, 4096);
+
+ if (pol->rate > GENMASK(15, 0) ||
+ pol->burst > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) |
+ ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) |
+ ANA_POL_MODE_IPG_SIZE_SET(20) |
+ ANA_POL_MODE_FRM_MODE_SET(1) |
+ ANA_POL_MODE_OVERSHOOT_ENA_SET(1),
+ lan966x, ANA_POL_MODE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0),
+ lan966x, ANA_POL_PIR_STATE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(pol->rate) |
+ ANA_POL_PIR_CFG_PIR_BURST_SET(pol->burst),
+ lan966x, ANA_POL_PIR_CFG(pol_idx));
+
+ return 0;
+}
+
+static void lan966x_police_del(struct lan966x_port *port, u16 pol_idx)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) |
+ ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) |
+ ANA_POL_MODE_IPG_SIZE_SET(20) |
+ ANA_POL_MODE_FRM_MODE_SET(2) |
+ ANA_POL_MODE_OVERSHOOT_ENA_SET(1),
+ lan966x, ANA_POL_MODE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0),
+ lan966x, ANA_POL_PIR_STATE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(GENMASK(14, 0)) |
+ ANA_POL_PIR_CFG_PIR_BURST_SET(0),
+ lan966x, ANA_POL_PIR_CFG(pol_idx));
+}
+
+static int lan966x_police_validate(struct lan966x_port *port,
+ const struct flow_action *action,
+ const struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policer is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.ingress_shared_block) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policer is not supported on shared ingress blocks");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.police_id && port->tc.police_id != police_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported");
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int lan966x_police_port_add(struct lan966x_port *port,
+ struct flow_action *action,
+ struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct rtnl_link_stats64 new_stats;
+ struct lan966x_tc_policer pol;
+ struct flow_stats *old_stats;
+ int err;
+
+ err = lan966x_police_validate(port, action, act, police_id, ingress,
+ extack);
+ if (err)
+ return err;
+
+ memset(&pol, 0, sizeof(pol));
+
+ pol.rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = act->police.burst;
+
+ err = lan966x_police_add(port, &pol, POL_IDX_PORT + port->chip_port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to add policer to port");
+ return err;
+ }
+
+ lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(1) |
+ ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
+ ANA_POL_CFG_PORT_POL_ENA |
+ ANA_POL_CFG_POL_ORDER,
+ lan966x, ANA_POL_CFG(port->chip_port));
+
+ port->tc.police_id = police_id;
+
+ /* Setup initial stats */
+ old_stats = &port->tc.police_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+
+ return 0;
+}
+
+int lan966x_police_port_del(struct lan966x_port *port,
+ unsigned long police_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (port->tc.police_id != police_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid policer id");
+ return -EINVAL;
+ }
+
+ lan966x_police_del(port, POL_IDX_PORT + port->chip_port);
+
+ lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(0) |
+ ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
+ ANA_POL_CFG_PORT_POL_ENA |
+ ANA_POL_CFG_POL_ORDER,
+ lan966x, ANA_POL_CFG(port->chip_port));
+
+ port->tc.police_id = 0;
+
+ return 0;
+}
+
+void lan966x_police_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats)
+{
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &port->tc.police_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+
+ flow_stats_update(stats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
new file mode 100644
index 0000000000..92108d3540
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/netdevice.h>
+#include <linux/phy/phy.h>
+
+#include "lan966x_main.h"
+
+/* Watermark encode */
+#define MULTIPLIER_BIT BIT(8)
+static u32 lan966x_wm_enc(u32 value)
+{
+ value /= LAN966X_BUFFER_CELL_SZ;
+
+ if (value >= MULTIPLIER_BIT) {
+ value /= 16;
+ if (value >= MULTIPLIER_BIT)
+ value = (MULTIPLIER_BIT - 1);
+
+ value |= MULTIPLIER_BIT;
+ }
+
+ return value;
+}
+
+static void lan966x_port_link_down(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val, delay = 0;
+
+ /* 0.5: Disable any AFI */
+ lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(1) |
+ AFI_PORT_CFG_FRM_OUT_MAX_SET(0),
+ AFI_PORT_CFG_FC_SKIP_TTI_INJ |
+ AFI_PORT_CFG_FRM_OUT_MAX,
+ lan966x, AFI_PORT_CFG(port->chip_port));
+
+ /* wait for reg afi_port_frm_out to become 0 for the port */
+ while (true) {
+ val = lan_rd(lan966x, AFI_PORT_FRM_OUT(port->chip_port));
+ if (!AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(val))
+ break;
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ delay++;
+ if (delay == 2000) {
+ pr_err("AFI timeout chip port %u", port->chip_port);
+ break;
+ }
+ }
+
+ delay = 0;
+
+ /* 1: Reset the PCS Rx clock domain */
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(1),
+ DEV_CLOCK_CFG_PCS_RX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* 2: Disable MAC frame reception */
+ lan_rmw(DEV_MAC_ENA_CFG_RX_ENA_SET(0),
+ DEV_MAC_ENA_CFG_RX_ENA,
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ /* 3: Disable traffic being sent to or from switch port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 4: Disable dequeuing from the egress queues */
+ lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(1),
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ lan966x, QSYS_PORT_MODE(port->chip_port));
+
+ /* 5: Disable Flowcontrol */
+ lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(0),
+ SYS_PAUSE_CFG_PAUSE_ENA,
+ lan966x, SYS_PAUSE_CFG(port->chip_port));
+
+ /* 5.1: Disable PFC */
+ lan_rmw(QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0),
+ QSYS_SW_PORT_MODE_TX_PFC_ENA,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 6: Wait a worst case time 8ms (jumbo/10Mbit) */
+ usleep_range(8 * USEC_PER_MSEC, 9 * USEC_PER_MSEC);
+
+ /* 7: Disable HDX backpressure */
+ lan_rmw(SYS_FRONT_PORT_MODE_HDX_MODE_SET(0),
+ SYS_FRONT_PORT_MODE_HDX_MODE,
+ lan966x, SYS_FRONT_PORT_MODE(port->chip_port));
+
+ /* 8: Flush the queues accociated with the port */
+ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3),
+ QSYS_SW_PORT_MODE_AGING_MODE,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 9: Enable dequeuing from the egress queues */
+ lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(0),
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ lan966x, QSYS_PORT_MODE(port->chip_port));
+
+ /* 10: Wait until flushing is complete */
+ while (true) {
+ val = lan_rd(lan966x, QSYS_SW_STATUS(port->chip_port));
+ if (!QSYS_SW_STATUS_EQ_AVAIL_GET(val))
+ break;
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ delay++;
+ if (delay == 2000) {
+ pr_err("Flush timeout chip port %u", port->chip_port);
+ break;
+ }
+ }
+
+ /* 11: Reset the Port and MAC clock domains */
+ lan_rmw(DEV_MAC_ENA_CFG_TX_ENA_SET(0),
+ DEV_MAC_ENA_CFG_TX_ENA,
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(1),
+ DEV_CLOCK_CFG_PORT_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ lan_rmw(DEV_CLOCK_CFG_MAC_TX_RST_SET(1) |
+ DEV_CLOCK_CFG_MAC_RX_RST_SET(1) |
+ DEV_CLOCK_CFG_PORT_RST_SET(1),
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_RX_RST |
+ DEV_CLOCK_CFG_PORT_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* 12: Clear flushing */
+ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(2),
+ QSYS_SW_PORT_MODE_AGING_MODE,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* The port is disabled and flushed, now set up the port in the
+ * new operating mode
+ */
+}
+
+static void lan966x_port_link_up(struct lan966x_port *port)
+{
+ struct lan966x_port_config *config = &port->config;
+ struct lan966x *lan966x = port->lan966x;
+ int speed = 0, mode = 0;
+ int atop_wm = 0;
+
+ switch (config->speed) {
+ case SPEED_10:
+ speed = LAN966X_SPEED_10;
+ break;
+ case SPEED_100:
+ speed = LAN966X_SPEED_100;
+ break;
+ case SPEED_1000:
+ speed = LAN966X_SPEED_1000;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ break;
+ case SPEED_2500:
+ speed = LAN966X_SPEED_2500;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ break;
+ }
+
+ lan966x_taprio_speed_set(port, config->speed);
+
+ /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
+ * port speed for QSGMII ports.
+ */
+ if (phy_interface_num_ports(config->portmode) == 4)
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+
+ lan_wr(config->duplex | mode,
+ lan966x, DEV_MAC_MODE_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_IFG_CFG_TX_IFG_SET(config->duplex ? 6 : 5) |
+ DEV_MAC_IFG_CFG_RX_IFG1_SET(config->speed == SPEED_10 ? 2 : 1) |
+ DEV_MAC_IFG_CFG_RX_IFG2_SET(2),
+ DEV_MAC_IFG_CFG_TX_IFG |
+ DEV_MAC_IFG_CFG_RX_IFG1 |
+ DEV_MAC_IFG_CFG_RX_IFG2,
+ lan966x, DEV_MAC_IFG_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_HDX_CFG_SEED_SET(4) |
+ DEV_MAC_HDX_CFG_SEED_LOAD_SET(1),
+ DEV_MAC_HDX_CFG_SEED |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ lan966x, DEV_MAC_HDX_CFG(port->chip_port));
+
+ if (config->portmode == PHY_INTERFACE_MODE_GMII) {
+ if (config->speed == SPEED_1000)
+ lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(1),
+ CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
+ lan966x,
+ CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
+ else
+ lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(0),
+ CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
+ lan966x,
+ CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
+ }
+
+ /* No PFC */
+ lan_wr(ANA_PFC_CFG_FC_LINK_SPEED_SET(speed),
+ lan966x, ANA_PFC_CFG(port->chip_port));
+
+ lan_rmw(DEV_PCS1G_CFG_PCS_ENA_SET(1),
+ DEV_PCS1G_CFG_PCS_ENA,
+ lan966x, DEV_PCS1G_CFG(port->chip_port));
+
+ lan_rmw(DEV_PCS1G_SD_CFG_SD_ENA_SET(0),
+ DEV_PCS1G_SD_CFG_SD_ENA,
+ lan966x, DEV_PCS1G_SD_CFG(port->chip_port));
+
+ /* Set Pause WM hysteresis, start/stop are in 1518 byte units */
+ lan_wr(SYS_PAUSE_CFG_PAUSE_ENA_SET(1) |
+ SYS_PAUSE_CFG_PAUSE_STOP_SET(lan966x_wm_enc(4 * 1518)) |
+ SYS_PAUSE_CFG_PAUSE_START_SET(lan966x_wm_enc(6 * 1518)),
+ lan966x, SYS_PAUSE_CFG(port->chip_port));
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ lan_wr(0, lan966x, DEV_FC_MAC_LOW_CFG(port->chip_port));
+ lan_wr(0, lan966x, DEV_FC_MAC_HIGH_CFG(port->chip_port));
+
+ /* Flow control */
+ lan_rmw(SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(speed) |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(7) |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(1) |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(0xffff) |
+ SYS_MAC_FC_CFG_RX_FC_ENA_SET(config->pause & MLO_PAUSE_RX ? 1 : 0) |
+ SYS_MAC_FC_CFG_TX_FC_ENA_SET(config->pause & MLO_PAUSE_TX ? 1 : 0),
+ SYS_MAC_FC_CFG_FC_LINK_SPEED |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG |
+ SYS_MAC_FC_CFG_RX_FC_ENA |
+ SYS_MAC_FC_CFG_TX_FC_ENA,
+ lan966x, SYS_MAC_FC_CFG(port->chip_port));
+
+ /* Tail dropping watermark */
+ atop_wm = lan966x->shared_queue_sz;
+
+ /* The total memory size is diveded by number of front ports plus CPU
+ * port
+ */
+ lan_wr(lan966x_wm_enc(atop_wm / lan966x->num_phys_ports + 1), lan966x,
+ SYS_ATOP(port->chip_port));
+ lan_wr(lan966x_wm_enc(atop_wm), lan966x, SYS_ATOP_TOT_CFG);
+
+ /* This needs to be at the end */
+ /* Enable MAC module */
+ lan_wr(DEV_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEV_MAC_ENA_CFG_TX_ENA_SET(1),
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ /* Take out the clock from reset */
+ lan_wr(DEV_CLOCK_CFG_LINK_SPEED_SET(speed),
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* Core: Enable port for frame transfer */
+ lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
+ QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
+ QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(0) |
+ AFI_PORT_CFG_FRM_OUT_MAX_SET(16),
+ AFI_PORT_CFG_FC_SKIP_TTI_INJ |
+ AFI_PORT_CFG_FRM_OUT_MAX,
+ lan966x, AFI_PORT_CFG(port->chip_port));
+}
+
+void lan966x_port_config_down(struct lan966x_port *port)
+{
+ lan966x_port_link_down(port);
+}
+
+void lan966x_port_config_up(struct lan966x_port *port)
+{
+ lan966x_port_link_up(port);
+}
+
+void lan966x_port_status_get(struct lan966x_port *port,
+ struct phylink_link_state *state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool link_down;
+ u16 bmsr = 0;
+ u16 lp_adv;
+ u32 val;
+
+ val = lan_rd(lan966x, DEV_PCS1G_STICKY(port->chip_port));
+ link_down = DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(val);
+ if (link_down)
+ lan_wr(val, lan966x, DEV_PCS1G_STICKY(port->chip_port));
+
+ /* Get both current Link and Sync status */
+ val = lan_rd(lan966x, DEV_PCS1G_LINK_STATUS(port->chip_port));
+ state->link = DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(val) &&
+ DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(val);
+ state->link &= !link_down;
+
+ /* Get PCS ANEG status register */
+ val = lan_rd(lan966x, DEV_PCS1G_ANEG_STATUS(port->chip_port));
+ /* Aneg complete provides more information */
+ if (DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(val)) {
+ state->an_complete = true;
+
+ bmsr |= state->link ? BMSR_LSTATUS : 0;
+ bmsr |= BMSR_ANEGCOMPLETE;
+
+ lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ } else {
+ if (!state->link)
+ return;
+
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ state->speed = SPEED_1000;
+ else if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+
+ state->duplex = DUPLEX_FULL;
+ }
+}
+
+int lan966x_port_pcs_set(struct lan966x_port *port,
+ struct lan966x_port_config *config)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool inband_aneg = false;
+ bool outband;
+ bool full_preamble = false;
+
+ if (config->portmode == PHY_INTERFACE_MODE_QUSGMII)
+ full_preamble = true;
+
+ if (config->inband) {
+ if (config->portmode == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_num_ports(config->portmode) == 4)
+ inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+ else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX &&
+ config->autoneg)
+ inband_aneg = true; /* Clause-37 in-band-aneg */
+
+ outband = false;
+ } else {
+ outband = true;
+ }
+
+ /* Disable or enable inband.
+ * For QUSGMII, we rely on the preamble to transmit data such as
+ * timestamps, therefore force full preamble transmission, and prevent
+ * premable shortening
+ */
+ lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband) |
+ DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(full_preamble),
+ DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA |
+ DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA,
+ lan966x, DEV_PCS1G_MODE_CFG(port->chip_port));
+
+ /* Enable PCS */
+ lan_wr(DEV_PCS1G_CFG_PCS_ENA_SET(1),
+ lan966x, DEV_PCS1G_CFG(port->chip_port));
+
+ if (inband_aneg) {
+ int adv = phylink_mii_c22_pcs_encode_advertisement(config->portmode,
+ config->advertising);
+ if (adv >= 0)
+ /* Enable in-band aneg */
+ lan_wr(DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(adv) |
+ DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
+ DEV_PCS1G_ANEG_CFG_ENA_SET(1) |
+ DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(1),
+ lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
+ } else {
+ lan_wr(0, lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
+ }
+
+ /* Take PCS out of reset */
+ lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000) |
+ DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ DEV_CLOCK_CFG_LINK_SPEED |
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ port->config = *config;
+
+ return 0;
+}
+
+static void lan966x_port_qos_pcp_set(struct lan966x_port *port,
+ struct lan966x_port_qos_pcp *qos)
+{
+ u8 *pcp_itr = qos->map;
+ u8 pcp, dp;
+
+ lan_rmw(ANA_QOS_CFG_QOS_PCP_ENA_SET(qos->enable),
+ ANA_QOS_CFG_QOS_PCP_ENA,
+ port->lan966x, ANA_QOS_CFG(port->chip_port));
+
+ /* Map PCP and DEI to priority */
+ for (int i = 0; i < ARRAY_SIZE(qos->map); i++) {
+ pcp = *(pcp_itr + i);
+ dp = (i < LAN966X_PORT_QOS_PCP_COUNT) ? 0 : 1;
+
+ lan_rmw(ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL_SET(pcp) |
+ ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL_SET(dp),
+ ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL |
+ ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL,
+ port->lan966x,
+ ANA_PCP_DEI_CFG(port->chip_port, i));
+ }
+}
+
+static void lan966x_port_qos_dscp_set(struct lan966x_port *port,
+ struct lan966x_port_qos_dscp *qos)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Enable/disable dscp for qos classification. */
+ lan_rmw(ANA_QOS_CFG_QOS_DSCP_ENA_SET(qos->enable),
+ ANA_QOS_CFG_QOS_DSCP_ENA,
+ lan966x, ANA_QOS_CFG(port->chip_port));
+
+ /* Map each dscp value to priority and dp */
+ for (int i = 0; i < ARRAY_SIZE(qos->map); i++)
+ lan_rmw(ANA_DSCP_CFG_DP_DSCP_VAL_SET(0) |
+ ANA_DSCP_CFG_QOS_DSCP_VAL_SET(*(qos->map + i)),
+ ANA_DSCP_CFG_DP_DSCP_VAL |
+ ANA_DSCP_CFG_QOS_DSCP_VAL,
+ lan966x, ANA_DSCP_CFG(i));
+
+ /* Set per-dscp trust */
+ for (int i = 0; i < ARRAY_SIZE(qos->map); i++)
+ lan_rmw(ANA_DSCP_CFG_DSCP_TRUST_ENA_SET(qos->enable),
+ ANA_DSCP_CFG_DSCP_TRUST_ENA,
+ lan966x, ANA_DSCP_CFG(i));
+}
+
+static int lan966x_port_qos_default_set(struct lan966x_port *port,
+ struct lan966x_port_qos *qos)
+{
+ /* Set default prio and dp level */
+ lan_rmw(ANA_QOS_CFG_DP_DEFAULT_VAL_SET(0) |
+ ANA_QOS_CFG_QOS_DEFAULT_VAL_SET(qos->default_prio),
+ ANA_QOS_CFG_DP_DEFAULT_VAL |
+ ANA_QOS_CFG_QOS_DEFAULT_VAL,
+ port->lan966x, ANA_QOS_CFG(port->chip_port));
+
+ /* Set default pcp and dei for untagged frames */
+ lan_rmw(ANA_VLAN_CFG_VLAN_DEI_SET(0) |
+ ANA_VLAN_CFG_VLAN_PCP_SET(0),
+ ANA_VLAN_CFG_VLAN_DEI |
+ ANA_VLAN_CFG_VLAN_PCP,
+ port->lan966x, ANA_VLAN_CFG(port->chip_port));
+
+ return 0;
+}
+
+static void lan966x_port_qos_pcp_rewr_set(struct lan966x_port *port,
+ struct lan966x_port_qos_pcp_rewr *qos)
+{
+ u8 mode = LAN966X_PORT_REW_TAG_CTRL_CLASSIFIED;
+ u8 pcp, dei;
+
+ if (qos->enable)
+ mode = LAN966X_PORT_REW_TAG_CTRL_MAPPED;
+
+ /* Map the values only if it is enabled otherwise will be the classified
+ * value
+ */
+ lan_rmw(REW_TAG_CFG_TAG_PCP_CFG_SET(mode) |
+ REW_TAG_CFG_TAG_DEI_CFG_SET(mode),
+ REW_TAG_CFG_TAG_PCP_CFG |
+ REW_TAG_CFG_TAG_DEI_CFG,
+ port->lan966x, REW_TAG_CFG(port->chip_port));
+
+ /* Map each value to pcp and dei */
+ for (int i = 0; i < ARRAY_SIZE(qos->map); i++) {
+ pcp = qos->map[i];
+ if (pcp > LAN966X_PORT_QOS_PCP_COUNT)
+ dei = 1;
+ else
+ dei = 0;
+
+ lan_rmw(REW_PCP_DEI_CFG_DEI_QOS_VAL_SET(dei) |
+ REW_PCP_DEI_CFG_PCP_QOS_VAL_SET(pcp),
+ REW_PCP_DEI_CFG_DEI_QOS_VAL |
+ REW_PCP_DEI_CFG_PCP_QOS_VAL,
+ port->lan966x,
+ REW_PCP_DEI_CFG(port->chip_port,
+ i + dei * LAN966X_PORT_QOS_PCP_COUNT));
+ }
+}
+
+static void lan966x_port_qos_dscp_rewr_set(struct lan966x_port *port,
+ struct lan966x_port_qos_dscp_rewr *qos)
+{
+ u16 dscp;
+ u8 mode;
+
+ if (qos->enable)
+ mode = LAN966X_PORT_REW_DSCP_ANALIZER;
+ else
+ mode = LAN966X_PORT_REW_DSCP_FRAME;
+
+ /* Enable the rewrite otherwise will use the values from the frame */
+ lan_rmw(REW_DSCP_CFG_DSCP_REWR_CFG_SET(mode),
+ REW_DSCP_CFG_DSCP_REWR_CFG,
+ port->lan966x, REW_DSCP_CFG(port->chip_port));
+
+ /* Map each classified Qos class and DP to classified DSCP value */
+ for (int i = 0; i < ARRAY_SIZE(qos->map); i++) {
+ dscp = qos->map[i];
+
+ lan_rmw(ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL_SET(dscp),
+ ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL,
+ port->lan966x, ANA_DSCP_REWR_CFG(i));
+ }
+}
+
+void lan966x_port_qos_dscp_rewr_mode_set(struct lan966x_port *port,
+ int mode)
+{
+ lan_rmw(ANA_QOS_CFG_DSCP_REWR_CFG_SET(mode),
+ ANA_QOS_CFG_DSCP_REWR_CFG,
+ port->lan966x, ANA_QOS_CFG(port->chip_port));
+}
+
+void lan966x_port_qos_set(struct lan966x_port *port,
+ struct lan966x_port_qos *qos)
+{
+ lan966x_port_qos_pcp_set(port, &qos->pcp);
+ lan966x_port_qos_dscp_set(port, &qos->dscp);
+ lan966x_port_qos_default_set(port, qos);
+ lan966x_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
+ lan966x_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
+}
+
+void lan966x_port_init(struct lan966x_port *port)
+{
+ struct lan966x_port_config *config = &port->config;
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(0),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_port_config_down(port);
+
+ if (lan966x->fdma)
+ lan966x_fdma_netdev_init(lan966x, port->dev);
+
+ if (phy_interface_num_ports(config->portmode) != 4)
+ return;
+
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0) |
+ DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000),
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST |
+ DEV_CLOCK_CFG_LINK_SPEED,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
new file mode 100644
index 0000000000..63905bb5a6
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -0,0 +1,1113 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/ptp_classify.h>
+
+#include "lan966x_main.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+#define LAN966X_MAX_PTP_ID 512
+
+/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
+ * The value is calculated as following: (1/1000000)/((2^-59)/6.037735849)
+ */
+#define LAN966X_1PPM_FORMAT 3480517749723LL
+
+/* Represents 1ppb adjustment in 2^29 format with 6.037735849ns as reference
+ * The value is calculated as following: (1/1000000000)/((2^59)/6.037735849)
+ */
+#define LAN966X_1PPB_FORMAT 3480517749LL
+
+#define TOD_ACC_PIN 0x7
+
+/* This represents the base rule ID for the PTP rules that are added in the
+ * VCAP to trap frames to CPU. This number needs to be bigger than the maximum
+ * number of entries that can exist in the VCAP.
+ */
+#define LAN966X_VCAP_PTP_RULE_ID 1000000
+#define LAN966X_VCAP_L2_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 0)
+#define LAN966X_VCAP_IPV4_EV_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 1)
+#define LAN966X_VCAP_IPV4_GEN_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 2)
+#define LAN966X_VCAP_IPV6_EV_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 3)
+#define LAN966X_VCAP_IPV6_GEN_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 4)
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_TOD
+};
+
+static u64 lan966x_ptp_get_nominal_value(void)
+{
+ /* This is the default value that for each system clock, the time of day
+ * is increased. It has the format 5.59 nanosecond.
+ */
+ return 0x304d4873ecade305;
+}
+
+static int lan966x_ptp_add_trap(struct lan966x_port *port,
+ int (*add_ptp_key)(struct vcap_rule *vrule,
+ struct lan966x_port*),
+ u32 rule_id,
+ u16 proto)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct vcap_rule *vrule;
+ int err;
+
+ vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
+ if (!IS_ERR(vrule)) {
+ u32 value, mask;
+
+ /* Just modify the ingress port mask and exit */
+ vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK,
+ &value, &mask);
+ mask &= ~BIT(port->chip_port);
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK,
+ value, mask);
+
+ err = vcap_mod_rule(vrule);
+ goto free_rule;
+ }
+
+ vrule = vcap_alloc_rule(lan966x->vcap_ctrl, port->dev,
+ LAN966X_VCAP_CID_IS2_L0,
+ VCAP_USER_PTP, 0, rule_id);
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ err = add_ptp_key(vrule, port);
+ if (err)
+ goto free_rule;
+
+ err = vcap_rule_add_action_bit(vrule, VCAP_AF_CPU_COPY_ENA, VCAP_BIT_1);
+ err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, LAN966X_PMM_REPLACE);
+ err |= vcap_val_rule(vrule, proto);
+ if (err)
+ goto free_rule;
+
+ err = vcap_add_rule(vrule);
+
+free_rule:
+ /* Free the local copy of the rule */
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int lan966x_ptp_del_trap(struct lan966x_port *port,
+ u32 rule_id)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct vcap_rule *vrule;
+ u32 value, mask;
+ int err;
+
+ vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
+ if (IS_ERR(vrule))
+ return -EEXIST;
+
+ vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, &value, &mask);
+ mask |= BIT(port->chip_port);
+
+ /* No other port requires this trap, so it is safe to remove it */
+ if (mask == GENMASK(lan966x->num_phys_ports, 0)) {
+ err = vcap_del_rule(lan966x->vcap_ctrl, port->dev, rule_id);
+ goto free_rule;
+ }
+
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, value, mask);
+ err = vcap_mod_rule(vrule);
+
+free_rule:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int lan966x_ptp_add_l2_key(struct vcap_rule *vrule,
+ struct lan966x_port *port)
+{
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_ETYPE, ETH_P_1588, ~0);
+}
+
+static int lan966x_ptp_add_ip_event_key(struct vcap_rule *vrule,
+ struct lan966x_port *port)
+{
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_L4_DPORT, PTP_EV_PORT, ~0) ||
+ vcap_rule_add_key_bit(vrule, VCAP_KF_TCP_IS, VCAP_BIT_0);
+}
+
+static int lan966x_ptp_add_ip_general_key(struct vcap_rule *vrule,
+ struct lan966x_port *port)
+{
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_L4_DPORT, PTP_GEN_PORT, ~0) ||
+ vcap_rule_add_key_bit(vrule, VCAP_KF_TCP_IS, VCAP_BIT_0);
+}
+
+static int lan966x_ptp_add_l2_rule(struct lan966x_port *port)
+{
+ return lan966x_ptp_add_trap(port, lan966x_ptp_add_l2_key,
+ LAN966X_VCAP_L2_PTP_TRAP, ETH_P_ALL);
+}
+
+static int lan966x_ptp_add_ipv4_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_event_key,
+ LAN966X_VCAP_IPV4_EV_PTP_TRAP, ETH_P_IP);
+ if (err)
+ return err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_general_key,
+ LAN966X_VCAP_IPV4_GEN_PTP_TRAP, ETH_P_IP);
+ if (err)
+ lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_EV_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_add_ipv6_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_event_key,
+ LAN966X_VCAP_IPV6_EV_PTP_TRAP, ETH_P_IPV6);
+ if (err)
+ return err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_general_key,
+ LAN966X_VCAP_IPV6_GEN_PTP_TRAP, ETH_P_IPV6);
+ if (err)
+ lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_EV_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_del_l2_rule(struct lan966x_port *port)
+{
+ return lan966x_ptp_del_trap(port, LAN966X_VCAP_L2_PTP_TRAP);
+}
+
+static int lan966x_ptp_del_ipv4_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_EV_PTP_TRAP);
+ err |= lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_GEN_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_del_ipv6_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_EV_PTP_TRAP);
+ err |= lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_GEN_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_add_traps(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_add_l2_rule(port);
+ if (err)
+ goto err_l2;
+
+ err = lan966x_ptp_add_ipv4_rules(port);
+ if (err)
+ goto err_ipv4;
+
+ err = lan966x_ptp_add_ipv6_rules(port);
+ if (err)
+ goto err_ipv6;
+
+ return err;
+
+err_ipv6:
+ lan966x_ptp_del_ipv4_rules(port);
+err_ipv4:
+ lan966x_ptp_del_l2_rule(port);
+err_l2:
+ return err;
+}
+
+int lan966x_ptp_del_traps(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_del_l2_rule(port);
+ err |= lan966x_ptp_del_ipv4_rules(port);
+ err |= lan966x_ptp_del_ipv6_rules(port);
+
+ return err;
+}
+
+int lan966x_ptp_setup_traps(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg)
+{
+ if (cfg->rx_filter == HWTSTAMP_FILTER_NONE)
+ return lan966x_ptp_del_traps(port);
+ else
+ return lan966x_ptp_add_traps(port);
+}
+
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ switch (cfg->tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_tx_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ port->ptp_tx_cmd = IFH_REW_OP_ONE_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_tx_cmd = IFH_REW_OP_NOOP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ port->ptp_rx_cmd = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ port->ptp_rx_cmd = true;
+ cfg->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ mutex_lock(&lan966x->ptp_lock);
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ phc->hwtstamp_config = *cfg;
+ mutex_unlock(&lan966x->ptp_lock);
+
+ return 0;
+}
+
+void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ *cfg = phc->hwtstamp_config;
+}
+
+static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
+{
+ struct ptp_header *header;
+ u8 msgtype;
+ int type;
+
+ if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
+ return IFH_REW_OP_NOOP;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return IFH_REW_OP_NOOP;
+
+ header = ptp_parse_header(skb, type);
+ if (!header)
+ return IFH_REW_OP_NOOP;
+
+ if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ return IFH_REW_OP_TWO_STEP_PTP;
+
+ /* If it is sync and run 1 step then set the correct operation,
+ * otherwise run as 2 step
+ */
+ msgtype = ptp_get_msgtype(header, type);
+ if ((msgtype & 0xf) == 0)
+ return IFH_REW_OP_ONE_STEP_PTP;
+
+ return IFH_REW_OP_TWO_STEP_PTP;
+}
+
+static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
+{
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if time_after(LAN966X_SKB_CB(skb)->jiffies + LAN966X_PTP_TIMEOUT,
+ jiffies)
+ break;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+}
+
+int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ struct sk_buff *skb)
+{
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long flags;
+ u8 rew_op;
+
+ rew_op = lan966x_ptp_classify(port, skb);
+ LAN966X_SKB_CB(skb)->rew_op = rew_op;
+
+ if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
+ return 0;
+
+ lan966x_ptp_txtstamp_old_release(port);
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ if (lan966x->ptp_skbs == LAN966X_MAX_PTP_ID) {
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_queue_tail(&port->tx_skbs, skb);
+ LAN966X_SKB_CB(skb)->ts_id = port->ts_id;
+ LAN966X_SKB_CB(skb)->jiffies = jiffies;
+
+ lan966x->ptp_skbs++;
+ port->ts_id++;
+ if (port->ts_id == LAN966X_MAX_PTP_ID)
+ port->ts_id = 0;
+
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+
+ return 0;
+}
+
+void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
+ struct sk_buff *skb)
+{
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ port->ts_id--;
+ lan966x->ptp_skbs--;
+ skb_unlink(skb, &port->tx_skbs);
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+}
+
+static void lan966x_get_hwtimestamp(struct lan966x *lan966x,
+ struct timespec64 *ts,
+ u32 nsec)
+{
+ /* Read current PTP time to get seconds */
+ unsigned long flags;
+ u32 curr_nsec;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_DOM_SET(LAN966X_PHC_PORT) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ ts->tv_sec = lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ curr_nsec = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ ts->tv_nsec = nsec;
+
+ /* Sec has incremented since the ts was registered */
+ if (curr_nsec < nsec)
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+}
+
+irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
+{
+ int budget = LAN966X_MAX_PTP_ID;
+ struct lan966x *lan966x = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct lan966x_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & PTP_TWOSTEP_CTRL_VLD))
+ break;
+
+ WARN_ON(val & PTP_TWOSTEP_CTRL_OVFL);
+
+ if (!(val & PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = lan966x->ports[txport];
+
+ /* Retrieve the delay */
+ delay = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
+ delay = PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_NXT,
+ lan966x, PTP_TWOSTEP_CTRL);
+
+ val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retried */
+ if (!(val & PTP_TWOSTEP_CTRL_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (LAN966X_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_NXT,
+ lan966x, PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ lan966x->ptp_skbs--;
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+
+ /* Get the h/w timestamp */
+ lan966x_get_hwtimestamp(lan966x, &ts, delay);
+
+ /* Set the timestamp into the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ struct lan966x_phc *phc;
+ unsigned long flags;
+ u64 time = 0;
+ time64_t s;
+ int pin, i;
+ s64 ns;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR)))
+ return IRQ_NONE;
+
+ /* Go through all domains and see which pin generated the interrupt */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ struct ptp_clock_event ptp_event = {0};
+
+ phc = &lan966x->phc[i];
+ pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0);
+ if (pin == -1)
+ continue;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin)))
+ continue;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Enable to get the new interrupt.
+ * By writing 1 it clears the bit
+ */
+ lan_wr(BIT(pin), lan966x, PTP_PIN_INTR);
+
+ /* Get current time */
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(pin));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+ time = ktime_set(s, ns);
+
+ ptp_event.index = pin;
+ ptp_event.timestamp = time;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(phc->clock, &ptp_event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ bool neg_adj = 0;
+ u64 tod_inc;
+ u64 ref;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ tod_inc = lan966x_ptp_get_nominal_value();
+
+ /* The multiplication is split in 2 separate additions because of
+ * overflow issues. If scaled_ppm with 16bit fractional part was bigger
+ * than 20ppm then we got overflow.
+ */
+ ref = LAN966X_1PPM_FORMAT * (scaled_ppm >> 16);
+ ref += (LAN966X_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16;
+ tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(1 << BIT(phc->index)),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ lan_wr((u32)tod_inc & 0xFFFFFFFF, lan966x,
+ PTP_CLK_PER_CFG(phc->index, 0));
+ lan_wr((u32)(tod_inc >> 32), lan966x,
+ PTP_CLK_PER_CFG(phc->index, 1));
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ /* Set new value */
+ lan_wr(PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
+ lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ lan_wr(lower_32_bits(ts->tv_sec),
+ lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ lan_wr(ts->tv_nsec, lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Apply new values */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ time64_t s;
+ s64 ns;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ lan_wr(PTP_TOD_NSEC_TOD_NSEC_SET(delta),
+ lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Adjust time with the value of PTP_TOD_NSEC */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using lan966x_ptp_settime64 which is not exact */
+ struct timespec64 ts;
+ u64 now;
+
+ lan966x_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ lan966x_ptp_settime64(ptp, &ts);
+ }
+
+ return 0;
+}
+
+static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct ptp_clock_info *info;
+ int i;
+
+ /* Currently support only 1 channel */
+ if (chan != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ case PTP_PF_EXTTS:
+ break;
+ default:
+ return -1;
+ }
+
+ /* The PTP pins are shared by all the PHC. So it is required to see if
+ * the pin is connected to another PHC. The pin is connected to another
+ * PHC if that pin already has a function on that PHC.
+ */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ info = &lan966x->phc[i].info;
+
+ /* Ignore the check with ourself */
+ if (ptp == info)
+ continue;
+
+ if (info->pin_config[pin].func == PTP_PF_PEROUT ||
+ info->pin_config[pin].func == PTP_PF_EXTTS)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct timespec64 ts_phase, ts_period;
+ unsigned long flags;
+ s64 wf_high, wf_low;
+ bool pps = false;
+ int pin;
+
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ if (!on) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ if (rq->perout.period.sec == 1 &&
+ rq->perout.period.nsec == 0)
+ pps = true;
+
+ if (rq->perout.flags & PTP_PEROUT_PHASE) {
+ ts_phase.tv_sec = rq->perout.phase.sec;
+ ts_phase.tv_nsec = rq->perout.phase.nsec;
+ } else {
+ ts_phase.tv_sec = rq->perout.start.sec;
+ ts_phase.tv_nsec = rq->perout.start.nsec;
+ }
+
+ if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) {
+ dev_warn(lan966x->dev,
+ "Absolute time not supported!\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+
+ wf_high = timespec64_to_ns(&ts_on);
+ } else {
+ wf_high = 5000;
+ }
+
+ if (pps) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(3),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+
+ wf_low = timespec64_to_ns(&ts_period);
+ wf_low -= wf_high;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ int pin;
+ u32 val;
+
+ if (lan966x->ptp_ext_irq <= 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SELECT_SET(pin),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_SYNC |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SELECT,
+ lan966x, PTP_PIN_CFG(pin));
+
+ val = lan_rd(lan966x, PTP_PIN_INTR_ENA);
+ if (on)
+ val |= BIT(pin);
+ else
+ val &= ~BIT(pin);
+ lan_wr(val, lan966x, PTP_PIN_INTR_ENA);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return lan966x_ptp_perout(ptp, rq, on);
+ case PTP_CLK_REQ_EXTTS:
+ return lan966x_ptp_extts(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct ptp_clock_info lan966x_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "lan966x ptp",
+ .max_adj = 200000,
+ .gettime64 = lan966x_ptp_gettime64,
+ .settime64 = lan966x_ptp_settime64,
+ .adjtime = lan966x_ptp_adjtime,
+ .adjfine = lan966x_ptp_adjfine,
+ .verify = lan966x_ptp_verify,
+ .enable = lan966x_ptp_enable,
+ .n_per_out = LAN966X_PHC_PINS_NUM,
+ .n_ext_ts = LAN966X_PHC_PINS_NUM,
+ .n_pins = LAN966X_PHC_PINS_NUM,
+};
+
+static int lan966x_ptp_phc_init(struct lan966x *lan966x,
+ int index,
+ struct ptp_clock_info *clock_info)
+{
+ struct lan966x_phc *phc = &lan966x->phc[index];
+ struct ptp_pin_desc *p;
+ int i;
+
+ for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) {
+ p = &phc->pins[i];
+
+ snprintf(p->name, sizeof(p->name), "pin%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
+
+ phc->info = *clock_info;
+ phc->info.pin_config = &phc->pins[0];
+ phc->clock = ptp_clock_register(&phc->info, lan966x->dev);
+ if (IS_ERR(phc->clock))
+ return PTR_ERR(phc->clock);
+
+ phc->index = index;
+ phc->lan966x = lan966x;
+
+ return 0;
+}
+
+int lan966x_ptp_init(struct lan966x *lan966x)
+{
+ u64 tod_adj = lan966x_ptp_get_nominal_value();
+ struct lan966x_port *port;
+ int err, i;
+
+ if (!lan966x->ptp)
+ return 0;
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ err = lan966x_ptp_phc_init(lan966x, i, &lan966x_ptp_clock_info);
+ if (err)
+ return err;
+ }
+
+ spin_lock_init(&lan966x->ptp_clock_lock);
+ spin_lock_init(&lan966x->ptp_ts_id_lock);
+ mutex_init(&lan966x->ptp_lock);
+
+ /* Disable master counters */
+ lan_wr(PTP_DOM_CFG_ENA_SET(0), lan966x, PTP_DOM_CFG);
+
+ /* Configure the nominal TOD increment per clock cycle */
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0x7),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ lan_wr((u32)tod_adj & 0xFFFFFFFF, lan966x,
+ PTP_CLK_PER_CFG(i, 0));
+ lan_wr((u32)(tod_adj >> 32), lan966x,
+ PTP_CLK_PER_CFG(i, 1));
+ }
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ /* Enable master counters */
+ lan_wr(PTP_DOM_CFG_ENA_SET(0x7), lan966x, PTP_DOM_CFG);
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_head_init(&port->tx_skbs);
+ }
+
+ return 0;
+}
+
+void lan966x_ptp_deinit(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ if (!lan966x->ptp)
+ return;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_purge(&port->tx_skbs);
+ }
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i)
+ ptp_clock_unregister(lan966x->phc[i].clock);
+}
+
+void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
+ u64 src_port, u64 timestamp)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct lan966x_phc *phc;
+ struct timespec64 ts;
+ u64 full_ts_in_ns;
+
+ if (!lan966x->ptp ||
+ !lan966x->ports[src_port]->ptp_rx_cmd)
+ return;
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ lan966x_ptp_gettime64(&phc->info, &ts);
+
+ /* Drop the sub-ns precision */
+ timestamp = timestamp >> 2;
+ if (ts.tv_nsec < timestamp)
+ ts.tv_sec--;
+ ts.tv_nsec = timestamp;
+ full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
+
+u32 lan966x_ptp_get_period_ps(void)
+{
+ /* This represents the system clock period in picoseconds */
+ return 15125;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
new file mode 100644
index 0000000000..4b553927d2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -0,0 +1,1888 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+/* This file is autogenerated by cml-utils 2021-10-10 13:25:08 +0200.
+ * Commit ID: 26db2002924973d36a30b369c94f025a678fe9ea (dirty)
+ */
+
+#ifndef _LAN966X_REGS_H_
+#define _LAN966X_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum lan966x_target {
+ TARGET_AFI = 2,
+ TARGET_ANA = 3,
+ TARGET_CHIP_TOP = 5,
+ TARGET_CPU = 6,
+ TARGET_DEV = 13,
+ TARGET_FDMA = 21,
+ TARGET_GCB = 27,
+ TARGET_ORG = 36,
+ TARGET_PTP = 41,
+ TARGET_QS = 42,
+ TARGET_QSYS = 46,
+ TARGET_REW = 47,
+ TARGET_SYS = 52,
+ TARGET_VCAP = 61,
+ NUM_TARGETS = 66
+};
+
+#define __REG(...) __VA_ARGS__
+
+/* AFI:PORT_TBL:PORT_FRM_OUT */
+#define AFI_PORT_FRM_OUT(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 0, 0, 1, 4)
+
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT GENMASK(26, 16)
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_SET(x)\
+ FIELD_PREP(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(x)\
+ FIELD_GET(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
+
+/* AFI:PORT_TBL:PORT_CFG */
+#define AFI_PORT_CFG(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 4, 0, 1, 4)
+
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ BIT(16)
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(x)\
+ FIELD_PREP(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_GET(x)\
+ FIELD_GET(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
+
+#define AFI_PORT_CFG_FRM_OUT_MAX GENMASK(9, 0)
+#define AFI_PORT_CFG_FRM_OUT_MAX_SET(x)\
+ FIELD_PREP(AFI_PORT_CFG_FRM_OUT_MAX, x)
+#define AFI_PORT_CFG_FRM_OUT_MAX_GET(x)\
+ FIELD_GET(AFI_PORT_CFG_FRM_OUT_MAX, x)
+
+/* ANA:ANA:ADVLEARN */
+#define ANA_ADVLEARN __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 0, 0, 1, 4)
+
+#define ANA_ADVLEARN_VLAN_CHK BIT(0)
+#define ANA_ADVLEARN_VLAN_CHK_SET(x)\
+ FIELD_PREP(ANA_ADVLEARN_VLAN_CHK, x)
+#define ANA_ADVLEARN_VLAN_CHK_GET(x)\
+ FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x)
+
+/* ANA:ANA:VLANMASK */
+#define ANA_VLANMASK __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 8, 0, 1, 4)
+
+/* ANA:ANA:ANAINTR */
+#define ANA_ANAINTR __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 16, 0, 1, 4)
+
+#define ANA_ANAINTR_INTR BIT(1)
+#define ANA_ANAINTR_INTR_SET(x)\
+ FIELD_PREP(ANA_ANAINTR_INTR, x)
+#define ANA_ANAINTR_INTR_GET(x)\
+ FIELD_GET(ANA_ANAINTR_INTR, x)
+
+#define ANA_ANAINTR_INTR_ENA BIT(0)
+#define ANA_ANAINTR_INTR_ENA_SET(x)\
+ FIELD_PREP(ANA_ANAINTR_INTR_ENA, x)
+#define ANA_ANAINTR_INTR_ENA_GET(x)\
+ FIELD_GET(ANA_ANAINTR_INTR_ENA, x)
+
+/* ANA:ANA:AUTOAGE */
+#define ANA_AUTOAGE __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 44, 0, 1, 4)
+
+#define ANA_AUTOAGE_AGE_PERIOD GENMASK(20, 1)
+#define ANA_AUTOAGE_AGE_PERIOD_SET(x)\
+ FIELD_PREP(ANA_AUTOAGE_AGE_PERIOD, x)
+#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\
+ FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x)
+
+/* ANA:ANA:MIRRORPORTS */
+#define ANA_MIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 60, 0, 1, 4)
+
+#define ANA_MIRRORPORTS_MIRRORPORTS GENMASK(8, 0)
+#define ANA_MIRRORPORTS_MIRRORPORTS_SET(x)\
+ FIELD_PREP(ANA_MIRRORPORTS_MIRRORPORTS, x)
+#define ANA_MIRRORPORTS_MIRRORPORTS_GET(x)\
+ FIELD_GET(ANA_MIRRORPORTS_MIRRORPORTS, x)
+
+/* ANA:ANA:EMIRRORPORTS */
+#define ANA_EMIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 64, 0, 1, 4)
+
+#define ANA_EMIRRORPORTS_EMIRRORPORTS GENMASK(8, 0)
+#define ANA_EMIRRORPORTS_EMIRRORPORTS_SET(x)\
+ FIELD_PREP(ANA_EMIRRORPORTS_EMIRRORPORTS, x)
+#define ANA_EMIRRORPORTS_EMIRRORPORTS_GET(x)\
+ FIELD_GET(ANA_EMIRRORPORTS_EMIRRORPORTS, x)
+
+/* ANA:ANA:FLOODING */
+#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4)
+
+#define ANA_FLOODING_FLD_UNICAST GENMASK(17, 12)
+#define ANA_FLOODING_FLD_UNICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_UNICAST, x)
+#define ANA_FLOODING_FLD_UNICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_UNICAST, x)
+
+#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6)
+#define ANA_FLOODING_FLD_BROADCAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x)
+#define ANA_FLOODING_FLD_BROADCAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_BROADCAST, x)
+
+#define ANA_FLOODING_FLD_MULTICAST GENMASK(5, 0)
+#define ANA_FLOODING_FLD_MULTICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_MULTICAST, x)
+#define ANA_FLOODING_FLD_MULTICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_MULTICAST, x)
+
+/* ANA:ANA:FLOODING_IPMC */
+#define ANA_FLOODING_IPMC __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 100, 0, 1, 4)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL GENMASK(23, 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA GENMASK(17, 12)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL GENMASK(11, 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA GENMASK(5, 0)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
+
+/* ANA:PGID:PGID */
+#define ANA_PGID(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 0, 0, 1, 4)
+
+#define ANA_PGID_PGID GENMASK(8, 0)
+#define ANA_PGID_PGID_SET(x)\
+ FIELD_PREP(ANA_PGID_PGID, x)
+#define ANA_PGID_PGID_GET(x)\
+ FIELD_GET(ANA_PGID_PGID, x)
+
+/* ANA:PGID:PGID_CFG */
+#define ANA_PGID_CFG(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 4, 0, 1, 4)
+
+#define ANA_PGID_CFG_OBEY_VLAN BIT(0)
+#define ANA_PGID_CFG_OBEY_VLAN_SET(x)\
+ FIELD_PREP(ANA_PGID_CFG_OBEY_VLAN, x)
+#define ANA_PGID_CFG_OBEY_VLAN_GET(x)\
+ FIELD_GET(ANA_PGID_CFG_OBEY_VLAN, x)
+
+/* ANA:ANA_TABLES:MACHDATA */
+#define ANA_MACHDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 40, 0, 1, 4)
+
+/* ANA:ANA_TABLES:MACLDATA */
+#define ANA_MACLDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 44, 0, 1, 4)
+
+/* ANA:ANA_TABLES:MACACCESS */
+#define ANA_MACACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 48, 0, 1, 4)
+
+#define ANA_MACACCESS_CHANGE2SW BIT(17)
+#define ANA_MACACCESS_CHANGE2SW_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_CHANGE2SW, x)
+#define ANA_MACACCESS_CHANGE2SW_GET(x)\
+ FIELD_GET(ANA_MACACCESS_CHANGE2SW, x)
+
+#define ANA_MACACCESS_MAC_CPU_COPY BIT(16)
+#define ANA_MACACCESS_MAC_CPU_COPY_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_MAC_CPU_COPY, x)
+#define ANA_MACACCESS_MAC_CPU_COPY_GET(x)\
+ FIELD_GET(ANA_MACACCESS_MAC_CPU_COPY, x)
+
+#define ANA_MACACCESS_VALID BIT(12)
+#define ANA_MACACCESS_VALID_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_VALID, x)
+#define ANA_MACACCESS_VALID_GET(x)\
+ FIELD_GET(ANA_MACACCESS_VALID, x)
+
+#define ANA_MACACCESS_ENTRYTYPE GENMASK(11, 10)
+#define ANA_MACACCESS_ENTRYTYPE_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_ENTRYTYPE, x)
+#define ANA_MACACCESS_ENTRYTYPE_GET(x)\
+ FIELD_GET(ANA_MACACCESS_ENTRYTYPE, x)
+
+#define ANA_MACACCESS_DEST_IDX GENMASK(9, 4)
+#define ANA_MACACCESS_DEST_IDX_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_DEST_IDX, x)
+#define ANA_MACACCESS_DEST_IDX_GET(x)\
+ FIELD_GET(ANA_MACACCESS_DEST_IDX, x)
+
+#define ANA_MACACCESS_MAC_TABLE_CMD GENMASK(3, 0)
+#define ANA_MACACCESS_MAC_TABLE_CMD_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_MAC_TABLE_CMD, x)
+#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\
+ FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x)
+
+/* ANA:ANA_TABLES:MACTINDX */
+#define ANA_MACTINDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 52, 0, 1, 4)
+
+#define ANA_MACTINDX_BUCKET GENMASK(12, 11)
+#define ANA_MACTINDX_BUCKET_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_BUCKET, x)
+#define ANA_MACTINDX_BUCKET_GET(x)\
+ FIELD_GET(ANA_MACTINDX_BUCKET, x)
+
+#define ANA_MACTINDX_M_INDEX GENMASK(10, 0)
+#define ANA_MACTINDX_M_INDEX_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_M_INDEX, x)
+#define ANA_MACTINDX_M_INDEX_GET(x)\
+ FIELD_GET(ANA_MACTINDX_M_INDEX, x)
+
+/* ANA:ANA_TABLES:VLAN_PORT_MASK */
+#define ANA_VLAN_PORT_MASK __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 56, 0, 1, 4)
+
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK GENMASK(8, 0)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(x)\
+ FIELD_PREP(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_GET(x)\
+ FIELD_GET(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+
+/* ANA:ANA_TABLES:VLANACCESS */
+#define ANA_VLANACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 60, 0, 1, 4)
+
+#define ANA_VLANACCESS_VLAN_TBL_CMD GENMASK(1, 0)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_SET(x)\
+ FIELD_PREP(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_GET(x)\
+ FIELD_GET(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+
+/* ANA:ANA_TABLES:VLANTIDX */
+#define ANA_VLANTIDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 64, 0, 1, 4)
+
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS BIT(18)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+
+#define ANA_VLANTIDX_V_INDEX GENMASK(11, 0)
+#define ANA_VLANTIDX_V_INDEX_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_V_INDEX, x)
+#define ANA_VLANTIDX_V_INDEX_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_V_INDEX, x)
+
+/* ANA:PORT:VLAN_CFG */
+#define ANA_VLAN_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 0, 0, 1, 4)
+
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+
+#define ANA_VLAN_CFG_VLAN_POP_CNT GENMASK(19, 18)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+
+#define ANA_VLAN_CFG_VLAN_PCP GENMASK(15, 13)
+#define ANA_VLAN_CFG_VLAN_PCP_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_PCP, x)
+#define ANA_VLAN_CFG_VLAN_PCP_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_PCP, x)
+
+#define ANA_VLAN_CFG_VLAN_DEI BIT(12)
+#define ANA_VLAN_CFG_VLAN_DEI_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_DEI, x)
+#define ANA_VLAN_CFG_VLAN_DEI_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_DEI, x)
+
+#define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0)
+#define ANA_VLAN_CFG_VLAN_VID_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x)
+#define ANA_VLAN_CFG_VLAN_VID_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_VID, x)
+
+/* ANA:PORT:DROP_CFG */
+#define ANA_DROP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 4, 0, 1, 4)
+
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+
+/* ANA:PORT:QOS_CFG */
+#define ANA_QOS_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 8, 0, 1, 4)
+
+#define ANA_QOS_CFG_DP_DEFAULT_VAL BIT(8)
+#define ANA_QOS_CFG_DP_DEFAULT_VAL_SET(x)\
+ FIELD_PREP(ANA_QOS_CFG_DP_DEFAULT_VAL, x)
+#define ANA_QOS_CFG_DP_DEFAULT_VAL_GET(x)\
+ FIELD_GET(ANA_QOS_CFG_DP_DEFAULT_VAL, x)
+
+#define ANA_QOS_CFG_QOS_DEFAULT_VAL GENMASK(7, 5)
+#define ANA_QOS_CFG_QOS_DEFAULT_VAL_SET(x)\
+ FIELD_PREP(ANA_QOS_CFG_QOS_DEFAULT_VAL, x)
+#define ANA_QOS_CFG_QOS_DEFAULT_VAL_GET(x)\
+ FIELD_GET(ANA_QOS_CFG_QOS_DEFAULT_VAL, x)
+
+#define ANA_QOS_CFG_QOS_DSCP_ENA BIT(4)
+#define ANA_QOS_CFG_QOS_DSCP_ENA_SET(x)\
+ FIELD_PREP(ANA_QOS_CFG_QOS_DSCP_ENA, x)
+#define ANA_QOS_CFG_QOS_DSCP_ENA_GET(x)\
+ FIELD_GET(ANA_QOS_CFG_QOS_DSCP_ENA, x)
+
+#define ANA_QOS_CFG_QOS_PCP_ENA BIT(3)
+#define ANA_QOS_CFG_QOS_PCP_ENA_SET(x)\
+ FIELD_PREP(ANA_QOS_CFG_QOS_PCP_ENA, x)
+#define ANA_QOS_CFG_QOS_PCP_ENA_GET(x)\
+ FIELD_GET(ANA_QOS_CFG_QOS_PCP_ENA, x)
+
+#define ANA_QOS_CFG_DSCP_REWR_CFG GENMASK(1, 0)
+#define ANA_QOS_CFG_DSCP_REWR_CFG_SET(x)\
+ FIELD_PREP(ANA_QOS_CFG_DSCP_REWR_CFG, x)
+#define ANA_QOS_CFG_DSCP_REWR_CFG_GET(x)\
+ FIELD_GET(ANA_QOS_CFG_DSCP_REWR_CFG, x)
+
+/* ANA:PORT:VCAP_CFG */
+#define ANA_VCAP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 12, 0, 1, 4)
+
+#define ANA_VCAP_CFG_S1_ENA BIT(14)
+#define ANA_VCAP_CFG_S1_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_CFG_S1_ENA, x)
+#define ANA_VCAP_CFG_S1_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_CFG_S1_ENA, x)
+
+/* ANA:PORT:VCAP_S1_KEY_CFG */
+#define ANA_VCAP_S1_CFG(g, r) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 16, r, 3, 4)
+
+#define ANA_VCAP_S1_CFG_KEY_RT_CFG GENMASK(11, 9)
+#define ANA_VCAP_S1_CFG_KEY_RT_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_RT_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_RT_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_RT_CFG, x)
+
+#define ANA_VCAP_S1_CFG_KEY_IP6_CFG GENMASK(8, 6)
+#define ANA_VCAP_S1_CFG_KEY_IP6_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_IP6_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_IP6_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_IP6_CFG, x)
+
+#define ANA_VCAP_S1_CFG_KEY_IP4_CFG GENMASK(5, 3)
+#define ANA_VCAP_S1_CFG_KEY_IP4_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_IP4_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_IP4_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_IP4_CFG, x)
+
+#define ANA_VCAP_S1_CFG_KEY_OTHER_CFG GENMASK(2, 0)
+#define ANA_VCAP_S1_CFG_KEY_OTHER_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_OTHER_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_OTHER_CFG, x)
+
+/* ANA:PORT:VCAP_S2_CFG */
+#define ANA_VCAP_S2_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 28, 0, 1, 4)
+
+#define ANA_VCAP_S2_CFG_ISDX_ENA GENMASK(20, 19)
+#define ANA_VCAP_S2_CFG_ISDX_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ISDX_ENA, x)
+#define ANA_VCAP_S2_CFG_ISDX_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ISDX_ENA, x)
+
+#define ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA GENMASK(18, 17)
+#define ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA, x)
+#define ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA, x)
+
+#define ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA GENMASK(16, 15)
+#define ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA, x)
+#define ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA, x)
+
+#define ANA_VCAP_S2_CFG_ENA BIT(14)
+#define ANA_VCAP_S2_CFG_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ENA, x)
+#define ANA_VCAP_S2_CFG_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ENA, x)
+
+#define ANA_VCAP_S2_CFG_SNAP_DIS GENMASK(13, 12)
+#define ANA_VCAP_S2_CFG_SNAP_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_SNAP_DIS, x)
+#define ANA_VCAP_S2_CFG_SNAP_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_SNAP_DIS, x)
+
+#define ANA_VCAP_S2_CFG_ARP_DIS GENMASK(11, 10)
+#define ANA_VCAP_S2_CFG_ARP_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ARP_DIS, x)
+#define ANA_VCAP_S2_CFG_ARP_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ARP_DIS, x)
+
+#define ANA_VCAP_S2_CFG_IP_TCPUDP_DIS GENMASK(9, 8)
+#define ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_IP_TCPUDP_DIS, x)
+#define ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_IP_TCPUDP_DIS, x)
+
+#define ANA_VCAP_S2_CFG_IP_OTHER_DIS GENMASK(7, 6)
+#define ANA_VCAP_S2_CFG_IP_OTHER_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_IP_OTHER_DIS, x)
+#define ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_IP_OTHER_DIS, x)
+
+#define ANA_VCAP_S2_CFG_IP6_CFG GENMASK(5, 2)
+#define ANA_VCAP_S2_CFG_IP6_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_IP6_CFG, x)
+#define ANA_VCAP_S2_CFG_IP6_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_IP6_CFG, x)
+
+#define ANA_VCAP_S2_CFG_OAM_DIS GENMASK(1, 0)
+#define ANA_VCAP_S2_CFG_OAM_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_OAM_DIS, x)
+#define ANA_VCAP_S2_CFG_OAM_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_OAM_DIS, x)
+
+/* ANA:PORT:QOS_PCP_DEI_MAP_CFG */
+#define ANA_PCP_DEI_CFG(g, r) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 32, r, 16, 4)
+
+#define ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL BIT(3)
+#define ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL_SET(x)\
+ FIELD_PREP(ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL, x)
+#define ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL_GET(x)\
+ FIELD_GET(ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL, x)
+
+#define ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL GENMASK(2, 0)
+#define ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL_SET(x)\
+ FIELD_PREP(ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL, x)
+#define ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL_GET(x)\
+ FIELD_GET(ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL, x)
+
+/* ANA:PORT:CPU_FWD_CFG */
+#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
+
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA BIT(6)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x)
+
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA BIT(5)
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x)
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x)
+
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA BIT(4)
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x)
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x)
+
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3)
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
+
+/* ANA:PORT:CPU_FWD_BPDU_CFG */
+#define ANA_CPU_FWD_BPDU_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 100, 0, 1, 4)
+
+/* ANA:PORT:PORT_CFG */
+#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4)
+
+#define ANA_PORT_CFG_SRC_MIRROR_ENA BIT(13)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_SRC_MIRROR_ENA, x)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_SRC_MIRROR_ENA, x)
+
+#define ANA_PORT_CFG_LEARNAUTO BIT(6)
+#define ANA_PORT_CFG_LEARNAUTO_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x)
+#define ANA_PORT_CFG_LEARNAUTO_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_LEARNAUTO, x)
+
+#define ANA_PORT_CFG_LEARN_ENA BIT(5)
+#define ANA_PORT_CFG_LEARN_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_LEARN_ENA, x)
+#define ANA_PORT_CFG_LEARN_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_LEARN_ENA, x)
+
+#define ANA_PORT_CFG_RECV_ENA BIT(4)
+#define ANA_PORT_CFG_RECV_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_RECV_ENA, x)
+#define ANA_PORT_CFG_RECV_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_RECV_ENA, x)
+
+#define ANA_PORT_CFG_PORTID_VAL GENMASK(3, 0)
+#define ANA_PORT_CFG_PORTID_VAL_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_PORTID_VAL, x)
+#define ANA_PORT_CFG_PORTID_VAL_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x)
+
+/* ANA:COMMON:DSCP_REWR_CFG */
+#define ANA_DSCP_REWR_CFG(r) __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 332, r, 16, 4)
+
+#define ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL GENMASK(5, 0)
+#define ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL_SET(x)\
+ FIELD_PREP(ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL, x)
+#define ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL_GET(x)\
+ FIELD_GET(ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL, x)
+
+/* ANA:PORT:POL_CFG */
+#define ANA_POL_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 116, 0, 1, 4)
+
+#define ANA_POL_CFG_PORT_POL_ENA BIT(17)
+#define ANA_POL_CFG_PORT_POL_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_CFG_PORT_POL_ENA, x)
+#define ANA_POL_CFG_PORT_POL_ENA_GET(x)\
+ FIELD_GET(ANA_POL_CFG_PORT_POL_ENA, x)
+
+#define ANA_POL_CFG_POL_ORDER GENMASK(8, 0)
+#define ANA_POL_CFG_POL_ORDER_SET(x)\
+ FIELD_PREP(ANA_POL_CFG_POL_ORDER, x)
+#define ANA_POL_CFG_POL_ORDER_GET(x)\
+ FIELD_GET(ANA_POL_CFG_POL_ORDER, x)
+
+/* ANA:PFC:PFC_CFG */
+#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4)
+
+#define ANA_PFC_CFG_FC_LINK_SPEED GENMASK(1, 0)
+#define ANA_PFC_CFG_FC_LINK_SPEED_SET(x)\
+ FIELD_PREP(ANA_PFC_CFG_FC_LINK_SPEED, x)
+#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\
+ FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x)
+
+/* ANA:COMMON:AGGR_CFG */
+#define ANA_AGGR_CFG __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 0, 0, 1, 4)
+
+#define ANA_AGGR_CFG_AC_RND_ENA BIT(6)
+#define ANA_AGGR_CFG_AC_RND_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_RND_ENA, x)
+#define ANA_AGGR_CFG_AC_RND_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_RND_ENA, x)
+
+#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(5)
+#define ANA_AGGR_CFG_AC_DMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_DMAC_ENA, x)
+#define ANA_AGGR_CFG_AC_DMAC_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_DMAC_ENA, x)
+
+#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(4)
+#define ANA_AGGR_CFG_AC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_SMAC_ENA, x)
+#define ANA_AGGR_CFG_AC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_SMAC_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(3)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(2)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(1)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(0)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
+
+/* ANA:COMMON:DSCP_CFG */
+#define ANA_DSCP_CFG(r) __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 76, r, 64, 4)
+
+#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11)
+#define ANA_DSCP_CFG_DP_DSCP_VAL_SET(x)\
+ FIELD_PREP(ANA_DSCP_CFG_DP_DSCP_VAL, x)
+#define ANA_DSCP_CFG_DP_DSCP_VAL_GET(x)\
+ FIELD_GET(ANA_DSCP_CFG_DP_DSCP_VAL, x)
+
+#define ANA_DSCP_CFG_QOS_DSCP_VAL GENMASK(10, 8)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_SET(x)\
+ FIELD_PREP(ANA_DSCP_CFG_QOS_DSCP_VAL, x)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_GET(x)\
+ FIELD_GET(ANA_DSCP_CFG_QOS_DSCP_VAL, x)
+
+#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1)
+#define ANA_DSCP_CFG_DSCP_TRUST_ENA_SET(x)\
+ FIELD_PREP(ANA_DSCP_CFG_DSCP_TRUST_ENA, x)
+#define ANA_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\
+ FIELD_GET(ANA_DSCP_CFG_DSCP_TRUST_ENA, x)
+
+#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0)
+#define ANA_DSCP_CFG_DSCP_REWR_ENA_SET(x)\
+ FIELD_PREP(ANA_DSCP_CFG_DSCP_REWR_ENA, x)
+#define ANA_DSCP_CFG_DSCP_REWR_ENA_GET(x)\
+ FIELD_GET(ANA_DSCP_CFG_DSCP_REWR_ENA, x)
+
+/* ANA:POL:POL_PIR_CFG */
+#define ANA_POL_PIR_CFG(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 0, 0, 1, 4)
+
+#define ANA_POL_PIR_CFG_PIR_RATE GENMASK(20, 6)
+#define ANA_POL_PIR_CFG_PIR_RATE_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_CFG_PIR_RATE, x)
+#define ANA_POL_PIR_CFG_PIR_RATE_GET(x)\
+ FIELD_GET(ANA_POL_PIR_CFG_PIR_RATE, x)
+
+#define ANA_POL_PIR_CFG_PIR_BURST GENMASK(5, 0)
+#define ANA_POL_PIR_CFG_PIR_BURST_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_CFG_PIR_BURST, x)
+#define ANA_POL_PIR_CFG_PIR_BURST_GET(x)\
+ FIELD_GET(ANA_POL_PIR_CFG_PIR_BURST, x)
+
+/* ANA:POL:POL_MODE_CFG */
+#define ANA_POL_MODE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 8, 0, 1, 4)
+
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA BIT(11)
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x)
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x)
+
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA BIT(10)
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x)
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x)
+
+#define ANA_POL_MODE_IPG_SIZE GENMASK(9, 5)
+#define ANA_POL_MODE_IPG_SIZE_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_IPG_SIZE, x)
+#define ANA_POL_MODE_IPG_SIZE_GET(x)\
+ FIELD_GET(ANA_POL_MODE_IPG_SIZE, x)
+
+#define ANA_POL_MODE_FRM_MODE GENMASK(4, 3)
+#define ANA_POL_MODE_FRM_MODE_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_FRM_MODE, x)
+#define ANA_POL_MODE_FRM_MODE_GET(x)\
+ FIELD_GET(ANA_POL_MODE_FRM_MODE, x)
+
+#define ANA_POL_MODE_OVERSHOOT_ENA BIT(0)
+#define ANA_POL_MODE_OVERSHOOT_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_OVERSHOOT_ENA, x)
+#define ANA_POL_MODE_OVERSHOOT_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_OVERSHOOT_ENA, x)
+
+/* ANA:POL:POL_PIR_STATE */
+#define ANA_POL_PIR_STATE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 12, 0, 1, 4)
+
+#define ANA_POL_PIR_STATE_PIR_LVL GENMASK(21, 0)
+#define ANA_POL_PIR_STATE_PIR_LVL_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_STATE_PIR_LVL, x)
+#define ANA_POL_PIR_STATE_PIR_LVL_GET(x)\
+ FIELD_GET(ANA_POL_PIR_STATE_PIR_LVL, x)
+
+/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */
+#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4)
+
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA BIT(0)
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(x)\
+ FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_GET(x)\
+ FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
+
+/* DEV:PORT_MODE:CLOCK_CFG */
+#define DEV_CLOCK_CFG(t) __REG(TARGET_DEV, t, 8, 0, 0, 1, 28, 0, 0, 1, 4)
+
+#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
+#define DEV_CLOCK_CFG_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_MAC_TX_RST, x)
+#define DEV_CLOCK_CFG_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_MAC_TX_RST, x)
+
+#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
+#define DEV_CLOCK_CFG_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_MAC_RX_RST, x)
+#define DEV_CLOCK_CFG_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_MAC_RX_RST, x)
+
+#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
+#define DEV_CLOCK_CFG_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PCS_TX_RST, x)
+#define DEV_CLOCK_CFG_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PCS_TX_RST, x)
+
+#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
+#define DEV_CLOCK_CFG_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PCS_RX_RST, x)
+#define DEV_CLOCK_CFG_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PCS_RX_RST, x)
+
+#define DEV_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_CLOCK_CFG_PORT_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PORT_RST, x)
+#define DEV_CLOCK_CFG_PORT_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PORT_RST, x)
+
+#define DEV_CLOCK_CFG_LINK_SPEED GENMASK(1, 0)
+#define DEV_CLOCK_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_LINK_SPEED, x)
+#define DEV_CLOCK_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_LINK_SPEED, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV_MAC_ENA_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 0, 0, 1, 4)
+
+#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_ENA_CFG_RX_ENA, x)
+#define DEV_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_ENA_CFG_TX_ENA, x)
+#define DEV_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV_MAC_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 4, 0, 1, 4)
+
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 8, 0, 1, 4)
+
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV_MAC_TAGS_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 12, 0, 1, 4)
+
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x)
+
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4)
+
+#define DEV_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEV_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_TX_IFG, x)
+#define DEV_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEV_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEV_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG2, x)
+#define DEV_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEV_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEV_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG1, x)
+#define DEV_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG1, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV_MAC_HDX_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 24, 0, 1, 4)
+
+#define DEV_MAC_HDX_CFG_SEED GENMASK(23, 16)
+#define DEV_MAC_HDX_CFG_SEED_SET(x)\
+ FIELD_PREP(DEV_MAC_HDX_CFG_SEED, x)
+#define DEV_MAC_HDX_CFG_SEED_GET(x)\
+ FIELD_GET(DEV_MAC_HDX_CFG_SEED, x)
+
+#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_MAC_HDX_CFG_SEED_LOAD_SET(x)\
+ FIELD_PREP(DEV_MAC_HDX_CFG_SEED_LOAD, x)
+#define DEV_MAC_HDX_CFG_SEED_LOAD_GET(x)\
+ FIELD_GET(DEV_MAC_HDX_CFG_SEED_LOAD, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_LOW_CFG */
+#define DEV_FC_MAC_LOW_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 32, 0, 1, 4)
+
+/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_HIGH_CFG */
+#define DEV_FC_MAC_HIGH_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 36, 0, 1, 4)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV_PCS1G_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 0, 0, 1, 4)
+
+#define DEV_PCS1G_CFG_PCS_ENA BIT(0)
+#define DEV_PCS1G_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_CFG_PCS_ENA, x)
+#define DEV_PCS1G_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_CFG_PCS_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV_PCS1G_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 4, 0, 1, 4)
+
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4)
+
+#define DEV_PCS1G_SD_CFG_SD_ENA BIT(0)
+#define DEV_PCS1G_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_SD_CFG_SD_ENA, x)
+#define DEV_PCS1G_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_SD_CFG_SD_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 12, 0, 1, 4)
+
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT BIT(1)
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
+
+#define DEV_PCS1G_ANEG_CFG_ENA BIT(0)
+#define DEV_PCS1G_ANEG_CFG_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_ENA, x)
+#define DEV_PCS1G_ANEG_CFG_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 32, 0, 1, 4)
+
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV GENMASK(31, 16)
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
+
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 40, 0, 1, 4)
+
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\
+ FIELD_PREP(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\
+ FIELD_GET(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
+
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\
+ FIELD_PREP(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
+ FIELD_GET(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV_PCS1G_STICKY(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 48, 0, 1, 4)
+
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
+ FIELD_PREP(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
+ FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+
+/* FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
+ FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
+ FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+
+/* FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+
+#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
+#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
+ FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x)
+#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
+ FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
+
+/* FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+
+#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
+#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
+ FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x)
+#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
+ FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
+
+/* FDMA:FDMA:FDMA_CH_DB_DISCARD */
+#define FDMA_CH_DB_DISCARD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 16, 0, 1, 4)
+
+#define FDMA_CH_DB_DISCARD_DB_DISCARD GENMASK(7, 0)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_SET(x)\
+ FIELD_PREP(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_GET(x)\
+ FIELD_GET(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+
+/* FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_CH_ACTIVE */
+#define FDMA_CH_ACTIVE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 180, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(4)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+
+#define FDMA_CH_CFG_CH_INJ_PORT BIT(3)
+#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+
+#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(2, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+
+#define FDMA_CH_CFG_CH_MEM BIT(0)
+#define FDMA_CH_CFG_CH_MEM_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_MEM, x)
+#define FDMA_CH_CFG_CH_MEM_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
+
+/* FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+
+#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
+#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x)
+#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x)
+
+#define FDMA_PORT_CTRL_XTR_STOP BIT(2)
+#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x)
+#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x)
+
+/* FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
+ FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
+ FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+
+/* FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_PIN_INTR_INTR_PTP GENMASK(7, 0)
+#define PTP_PIN_INTR_INTR_PTP_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_INTR_PTP, x)
+#define PTP_PIN_INTR_INTR_PTP_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_INTR_PTP, x)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_PIN_INTR_ENA_INTR_ENA GENMASK(7, 0)
+#define PTP_PIN_INTR_ENA_INTR_ENA_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_ENA_INTR_ENA, x)
+#define PTP_PIN_INTR_ENA_INTR_ENA_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_ENA_INTR_ENA, x)
+
+/* PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4)
+
+#define PTP_DOM_CFG_ENA GENMASK(11, 9)
+#define PTP_DOM_CFG_ENA_SET(x)\
+ FIELD_PREP(PTP_DOM_CFG_ENA, x)
+#define PTP_DOM_CFG_ENA_GET(x)\
+ FIELD_GET(PTP_DOM_CFG_ENA, x)
+
+#define PTP_DOM_CFG_CLKCFG_DIS GENMASK(2, 0)
+#define PTP_DOM_CFG_CLKCFG_DIS_SET(x)\
+ FIELD_PREP(PTP_DOM_CFG_CLKCFG_DIS, x)
+#define PTP_DOM_CFG_CLKCFG_DIS_GET(x)\
+ FIELD_GET(PTP_DOM_CFG_CLKCFG_DIS, x)
+
+/* PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 528, g, 3, 28, 0, r, 2, 4)
+
+/* PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 0, 0, 1, 4)
+
+#define PTP_PIN_CFG_PIN_ACTION GENMASK(29, 27)
+#define PTP_PIN_CFG_PIN_ACTION_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_ACTION, x)
+#define PTP_PIN_CFG_PIN_ACTION_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_ACTION, x)
+
+#define PTP_PIN_CFG_PIN_SYNC GENMASK(26, 25)
+#define PTP_PIN_CFG_PIN_SYNC_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SYNC, x)
+#define PTP_PIN_CFG_PIN_SYNC_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x)
+
+#define PTP_PIN_CFG_PIN_SELECT GENMASK(23, 21)
+#define PTP_PIN_CFG_PIN_SELECT_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SELECT, x)
+#define PTP_PIN_CFG_PIN_SELECT_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SELECT, x)
+
+#define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16)
+#define PTP_PIN_CFG_PIN_DOM_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x)
+#define PTP_PIN_CFG_PIN_DOM_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_DOM, x)
+
+/* PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 4, 0, 1, 4)
+
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB GENMASK(15, 0)
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x)
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x)
+
+/* PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 8, 0, 1, 4)
+
+/* PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 12, 0, 1, 4)
+
+#define PTP_TOD_NSEC_TOD_NSEC GENMASK(29, 0)
+#define PTP_TOD_NSEC_TOD_NSEC_SET(x)\
+ FIELD_PREP(PTP_TOD_NSEC_TOD_NSEC, x)
+#define PTP_TOD_NSEC_TOD_NSEC_GET(x)\
+ FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x)
+
+/* PTP:PTP_PINS:WF_HIGH_PERIOD */
+#define PTP_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 24, 0, 1, 4)
+
+#define PTP_WF_HIGH_PERIOD_PIN_WFH(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_M GENMASK(29, 0)
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_X(x) ((x) & GENMASK(29, 0))
+
+/* PTP:PTP_PINS:WF_LOW_PERIOD */
+#define PTP_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 28, 0, 1, 4)
+
+#define PTP_WF_LOW_PERIOD_PIN_WFL(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_LOW_PERIOD_PIN_WFL_M GENMASK(29, 0)
+#define PTP_WF_LOW_PERIOD_PIN_WFL_X(x) ((x) & GENMASK(29, 0))
+
+/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */
+#define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4)
+
+#define PTP_TWOSTEP_CTRL_NXT BIT(11)
+#define PTP_TWOSTEP_CTRL_NXT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_NXT, x)
+#define PTP_TWOSTEP_CTRL_NXT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_NXT, x)
+
+#define PTP_TWOSTEP_CTRL_VLD BIT(10)
+#define PTP_TWOSTEP_CTRL_VLD_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_VLD, x)
+#define PTP_TWOSTEP_CTRL_VLD_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_VLD, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define PTP_TWOSTEP_CTRL_OVFL BIT(0)
+#define PTP_TWOSTEP_CTRL_OVFL_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_OVFL, x)
+#define PTP_TWOSTEP_CTRL_OVFL_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_OVFL, x)
+
+/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP */
+#define PTP_TWOSTEP_STAMP __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 4, 0, 1, 4)
+
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(31, 2)
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+
+/* DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+
+#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_MODE, x)
+#define QS_XTR_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_MODE, x)
+
+#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+
+/* DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+
+/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+
+/* DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+
+#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_MODE, x)
+#define QS_INJ_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_MODE, x)
+
+#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+
+/* DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+
+#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x)
+#define QS_INJ_CTRL_GAP_SIZE_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x)
+
+#define QS_INJ_CTRL_EOF BIT(19)
+#define QS_INJ_CTRL_EOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_EOF, x)
+#define QS_INJ_CTRL_EOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_EOF, x)
+
+#define QS_INJ_CTRL_SOF BIT(18)
+#define QS_INJ_CTRL_SOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_SOF, x)
+#define QS_INJ_CTRL_SOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_SOF, x)
+
+#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x)
+#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
+
+/* DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+
+#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x)
+#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x)
+
+#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x)
+#define QS_INJ_STATUS_FIFO_RDY_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x)
+
+/* QSYS:SYSTEM:PORT_MODE */
+#define QSYS_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 0, r, 10, 4)
+
+#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
+#define QSYS_PORT_MODE_DEQUEUE_DIS_SET(x)\
+ FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_DIS, x)
+#define QSYS_PORT_MODE_DEQUEUE_DIS_GET(x)\
+ FIELD_GET(QSYS_PORT_MODE_DEQUEUE_DIS, x)
+
+/* QSYS:SYSTEM:SWITCH_PORT_MODE */
+#define QSYS_SW_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 80, r, 9, 4)
+
+#define QSYS_SW_PORT_MODE_PORT_ENA BIT(18)
+#define QSYS_SW_PORT_MODE_PORT_ENA_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_PORT_ENA, x)
+#define QSYS_SW_PORT_MODE_PORT_ENA_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_PORT_ENA, x)
+
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG GENMASK(16, 14)
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
+
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE BIT(12)
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
+
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA GENMASK(11, 4)
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
+
+#define QSYS_SW_PORT_MODE_AGING_MODE GENMASK(1, 0)
+#define QSYS_SW_PORT_MODE_AGING_MODE_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_AGING_MODE, x)
+#define QSYS_SW_PORT_MODE_AGING_MODE_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_AGING_MODE, x)
+
+/* QSYS:SYSTEM:SW_STATUS */
+#define QSYS_SW_STATUS(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 164, r, 9, 4)
+
+#define QSYS_SW_STATUS_EQ_AVAIL GENMASK(7, 0)
+#define QSYS_SW_STATUS_EQ_AVAIL_SET(x)\
+ FIELD_PREP(QSYS_SW_STATUS_EQ_AVAIL, x)
+#define QSYS_SW_STATUS_EQ_AVAIL_GET(x)\
+ FIELD_GET(QSYS_SW_STATUS_EQ_AVAIL, x)
+
+/* QSYS:SYSTEM:CPU_GROUP_MAP */
+#define QSYS_CPU_GROUP_MAP __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 204, 0, 1, 4)
+
+/* QSYS:RES_CTRL:RES_CFG */
+#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
+
+/* QSYS:HSCH:CIR_CFG */
+#define QSYS_CIR_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 0, 0, 1, 4)
+
+#define QSYS_CIR_CFG_CIR_RATE GENMASK(20, 6)
+#define QSYS_CIR_CFG_CIR_RATE_SET(x)\
+ FIELD_PREP(QSYS_CIR_CFG_CIR_RATE, x)
+#define QSYS_CIR_CFG_CIR_RATE_GET(x)\
+ FIELD_GET(QSYS_CIR_CFG_CIR_RATE, x)
+
+#define QSYS_CIR_CFG_CIR_BURST GENMASK(5, 0)
+#define QSYS_CIR_CFG_CIR_BURST_SET(x)\
+ FIELD_PREP(QSYS_CIR_CFG_CIR_BURST, x)
+#define QSYS_CIR_CFG_CIR_BURST_GET(x)\
+ FIELD_GET(QSYS_CIR_CFG_CIR_BURST, x)
+
+/* QSYS:HSCH:SE_CFG */
+#define QSYS_SE_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 8, 0, 1, 4)
+
+#define QSYS_SE_CFG_SE_DWRR_CNT GENMASK(9, 6)
+#define QSYS_SE_CFG_SE_DWRR_CNT_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_DWRR_CNT, x)
+#define QSYS_SE_CFG_SE_DWRR_CNT_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_DWRR_CNT, x)
+
+#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
+#define QSYS_SE_CFG_SE_RR_ENA_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_RR_ENA, x)
+#define QSYS_SE_CFG_SE_RR_ENA_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_RR_ENA, x)
+
+#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
+#define QSYS_SE_CFG_SE_AVB_ENA_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_AVB_ENA, x)
+#define QSYS_SE_CFG_SE_AVB_ENA_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_AVB_ENA, x)
+
+#define QSYS_SE_CFG_SE_FRM_MODE GENMASK(3, 2)
+#define QSYS_SE_CFG_SE_FRM_MODE_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_FRM_MODE, x)
+#define QSYS_SE_CFG_SE_FRM_MODE_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_FRM_MODE, x)
+
+#define QSYS_SE_DWRR_CFG(g, r) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 12, r, 12, 4)
+
+#define QSYS_SE_DWRR_CFG_DWRR_COST GENMASK(4, 0)
+#define QSYS_SE_DWRR_CFG_DWRR_COST_SET(x)\
+ FIELD_PREP(QSYS_SE_DWRR_CFG_DWRR_COST, x)
+#define QSYS_SE_DWRR_CFG_DWRR_COST_GET(x)\
+ FIELD_GET(QSYS_SE_DWRR_CFG_DWRR_COST, x)
+
+/* QSYS:TAS_CONFIG:TAS_CFG_CTRL */
+#define QSYS_TAS_CFG_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 0, 0, 1, 4)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX GENMASK(27, 23)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM GENMASK(22, 18)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q BIT(17)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM GENMASK(16, 5)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+
+/* QSYS:TAS_CONFIG:TAS_GATE_STATE_CTRL */
+#define QSYS_TAS_GS_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 4, 0, 1, 4)
+
+#define QSYS_TAS_GS_CTRL_HSCH_POS GENMASK(2, 0)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+
+/* QSYS:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define QSYS_TAS_STM_CFG __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 8, 0, 1, 4)
+
+#define QSYS_TAS_STM_CFG_REVISIT_DLY GENMASK(7, 0)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_SET(x)\
+ FIELD_PREP(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_GET(x)\
+ FIELD_GET(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+
+/* QSYS:TAS_PROFILE_CFG:TAS_PROFILE_CONFIG */
+#define QSYS_TAS_PROFILE_CFG(g) __REG(TARGET_QSYS, 0, 1, 30720, g, 16, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM GENMASK(21, 19)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED GENMASK(18, 16)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_NSEC */
+#define QSYS_TAS_BT_NSEC __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 0, 0, 1, 4)
+
+#define QSYS_TAS_BT_NSEC_NSEC GENMASK(29, 0)
+#define QSYS_TAS_BT_NSEC_NSEC_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_NSEC_NSEC, x)
+#define QSYS_TAS_BT_NSEC_NSEC_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_NSEC_NSEC, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_LSB */
+#define QSYS_TAS_BT_SEC_LSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 4, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_MSB */
+#define QSYS_TAS_BT_SEC_MSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 8, 0, 1, 4)
+
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB GENMASK(15, 0)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_CYCLE_TIME_CFG */
+#define QSYS_TAS_CT_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 24, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_STARTUP_CFG */
+#define QSYS_TAS_STARTUP_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 28, 0, 1, 4)
+
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX GENMASK(27, 23)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(x)\
+ FIELD_PREP(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_GET(x)\
+ FIELD_GET(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_CFG */
+#define QSYS_TAS_LIST_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR GENMASK(11, 0)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(x)\
+ FIELD_PREP(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(x)\
+ FIELD_GET(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_STATE */
+#define QSYS_TAS_LST __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 36, 0, 1, 4)
+
+#define QSYS_TAS_LST_LIST_STATE GENMASK(2, 0)
+#define QSYS_TAS_LST_LIST_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_LST_LIST_STATE, x)
+#define QSYS_TAS_LST_LIST_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_LST_LIST_STATE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG */
+#define QSYS_TAS_GCL_CT_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 0, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS GENMASK(12, 10)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE GENMASK(9, 2)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE GENMASK(1, 0)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG2 */
+#define QSYS_TAS_GCL_CT_CFG2 __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 4, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE GENMASK(15, 12)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL GENMASK(11, 0)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_TIME_CFG */
+#define QSYS_TAS_GCL_TM_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 8, 0, 1, 4)
+
+/* QSYS:HSCH_TAS_STATE:TAS_GATE_STATE */
+#define QSYS_TAS_GATE_STATE __REG(TARGET_QSYS, 0, 1, 28004, 0, 1, 4, 0, 0, 1, 4)
+
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE GENMASK(7, 0)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+
+/* REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4)
+
+#define REW_PORT_VLAN_CFG_PORT_TPID GENMASK(31, 16)
+#define REW_PORT_VLAN_CFG_PORT_TPID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_TPID, x)
+#define REW_PORT_VLAN_CFG_PORT_TPID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_TPID, x)
+
+#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0)
+#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x)
+#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+
+/* REW:PORT:TAG_CFG */
+#define REW_TAG_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 4, 0, 1, 4)
+
+#define REW_TAG_CFG_TAG_CFG GENMASK(8, 7)
+#define REW_TAG_CFG_TAG_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_CFG, x)
+#define REW_TAG_CFG_TAG_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_CFG, x)
+
+#define REW_TAG_CFG_TAG_TPID_CFG GENMASK(6, 5)
+#define REW_TAG_CFG_TAG_TPID_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_TPID_CFG, x)
+#define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x)
+
+#define REW_TAG_CFG_TAG_PCP_CFG GENMASK(3, 2)
+#define REW_TAG_CFG_TAG_PCP_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_PCP_CFG, x)
+#define REW_TAG_CFG_TAG_PCP_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_PCP_CFG, x)
+
+#define REW_TAG_CFG_TAG_DEI_CFG GENMASK(1, 0)
+#define REW_TAG_CFG_TAG_DEI_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_DEI_CFG, x)
+#define REW_TAG_CFG_TAG_DEI_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_DEI_CFG, x)
+
+/* REW:PORT:PORT_CFG */
+#define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4)
+
+#define REW_PORT_CFG_ES0_EN BIT(4)
+#define REW_PORT_CFG_ES0_EN_SET(x)\
+ FIELD_PREP(REW_PORT_CFG_ES0_EN, x)
+#define REW_PORT_CFG_ES0_EN_GET(x)\
+ FIELD_GET(REW_PORT_CFG_ES0_EN, x)
+
+#define REW_PORT_CFG_NO_REWRITE BIT(0)
+#define REW_PORT_CFG_NO_REWRITE_SET(x)\
+ FIELD_PREP(REW_PORT_CFG_NO_REWRITE, x)
+#define REW_PORT_CFG_NO_REWRITE_GET(x)\
+ FIELD_GET(REW_PORT_CFG_NO_REWRITE, x)
+
+/* REW:PORT:DSCP_CFG */
+#define REW_DSCP_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 12, 0, 1, 4)
+
+#define REW_DSCP_CFG_DSCP_REWR_CFG GENMASK(1, 0)
+#define REW_DSCP_CFG_DSCP_REWR_CFG_SET(x)\
+ FIELD_PREP(REW_DSCP_CFG_DSCP_REWR_CFG, x)
+#define REW_DSCP_CFG_DSCP_REWR_CFG_GET(x)\
+ FIELD_GET(REW_DSCP_CFG_DSCP_REWR_CFG, x)
+
+/* REW:PORT:PCP_DEI_QOS_MAP_CFG */
+#define REW_PCP_DEI_CFG(g, r) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 16, r, 16, 4)
+
+#define REW_PCP_DEI_CFG_DEI_QOS_VAL BIT(3)
+#define REW_PCP_DEI_CFG_DEI_QOS_VAL_SET(x)\
+ FIELD_PREP(REW_PCP_DEI_CFG_DEI_QOS_VAL, x)
+#define REW_PCP_DEI_CFG_DEI_QOS_VAL_GET(x)\
+ FIELD_GET(REW_PCP_DEI_CFG_DEI_QOS_VAL, x)
+
+#define REW_PCP_DEI_CFG_PCP_QOS_VAL GENMASK(2, 0)
+#define REW_PCP_DEI_CFG_PCP_QOS_VAL_SET(x)\
+ FIELD_PREP(REW_PCP_DEI_CFG_PCP_QOS_VAL, x)
+#define REW_PCP_DEI_CFG_PCP_QOS_VAL_GET(x)\
+ FIELD_GET(REW_PCP_DEI_CFG_PCP_QOS_VAL, x)
+
+/* REW:COMMON:STAT_CFG */
+#define REW_STAT_CFG __REG(TARGET_REW, 0, 1, 3072, 0, 1, 528, 520, 0, 1, 4)
+
+#define REW_STAT_CFG_STAT_MODE GENMASK(1, 0)
+#define REW_STAT_CFG_STAT_MODE_SET(x)\
+ FIELD_PREP(REW_STAT_CFG_STAT_MODE, x)
+#define REW_STAT_CFG_STAT_MODE_GET(x)\
+ FIELD_GET(REW_STAT_CFG_STAT_MODE, x)
+
+/* SYS:SYSTEM:RESET_CFG */
+#define SYS_RESET_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 0, 0, 1, 4)
+
+#define SYS_RESET_CFG_CORE_ENA BIT(0)
+#define SYS_RESET_CFG_CORE_ENA_SET(x)\
+ FIELD_PREP(SYS_RESET_CFG_CORE_ENA, x)
+#define SYS_RESET_CFG_CORE_ENA_GET(x)\
+ FIELD_GET(SYS_RESET_CFG_CORE_ENA, x)
+
+/* SYS:SYSTEM:PORT_MODE */
+#define SYS_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 44, r, 10, 4)
+
+#define SYS_PORT_MODE_INCL_INJ_HDR GENMASK(5, 4)
+#define SYS_PORT_MODE_INCL_INJ_HDR_SET(x)\
+ FIELD_PREP(SYS_PORT_MODE_INCL_INJ_HDR, x)
+#define SYS_PORT_MODE_INCL_INJ_HDR_GET(x)\
+ FIELD_GET(SYS_PORT_MODE_INCL_INJ_HDR, x)
+
+#define SYS_PORT_MODE_INCL_XTR_HDR GENMASK(3, 2)
+#define SYS_PORT_MODE_INCL_XTR_HDR_SET(x)\
+ FIELD_PREP(SYS_PORT_MODE_INCL_XTR_HDR, x)
+#define SYS_PORT_MODE_INCL_XTR_HDR_GET(x)\
+ FIELD_GET(SYS_PORT_MODE_INCL_XTR_HDR, x)
+
+/* SYS:SYSTEM:FRONT_PORT_MODE */
+#define SYS_FRONT_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 84, r, 8, 4)
+
+#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(1)
+#define SYS_FRONT_PORT_MODE_HDX_MODE_SET(x)\
+ FIELD_PREP(SYS_FRONT_PORT_MODE_HDX_MODE, x)
+#define SYS_FRONT_PORT_MODE_HDX_MODE_GET(x)\
+ FIELD_GET(SYS_FRONT_PORT_MODE_HDX_MODE, x)
+
+/* SYS:SYSTEM:FRM_AGING */
+#define SYS_FRM_AGING __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 116, 0, 1, 4)
+
+#define SYS_FRM_AGING_AGE_TX_ENA BIT(20)
+#define SYS_FRM_AGING_AGE_TX_ENA_SET(x)\
+ FIELD_PREP(SYS_FRM_AGING_AGE_TX_ENA, x)
+#define SYS_FRM_AGING_AGE_TX_ENA_GET(x)\
+ FIELD_GET(SYS_FRM_AGING_AGE_TX_ENA, x)
+
+/* SYS:SYSTEM:STAT_CFG */
+#define SYS_STAT_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 120, 0, 1, 4)
+
+#define SYS_STAT_CFG_STAT_VIEW GENMASK(9, 0)
+#define SYS_STAT_CFG_STAT_VIEW_SET(x)\
+ FIELD_PREP(SYS_STAT_CFG_STAT_VIEW, x)
+#define SYS_STAT_CFG_STAT_VIEW_GET(x)\
+ FIELD_GET(SYS_STAT_CFG_STAT_VIEW, x)
+
+/* SYS:PAUSE_CFG:PAUSE_CFG */
+#define SYS_PAUSE_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 0, r, 9, 4)
+
+#define SYS_PAUSE_CFG_PAUSE_START GENMASK(18, 10)
+#define SYS_PAUSE_CFG_PAUSE_START_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_START, x)
+#define SYS_PAUSE_CFG_PAUSE_START_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_START, x)
+
+#define SYS_PAUSE_CFG_PAUSE_STOP GENMASK(9, 1)
+#define SYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_STOP, x)
+#define SYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_STOP, x)
+
+#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
+#define SYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_ENA, x)
+#define SYS_PAUSE_CFG_PAUSE_ENA_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_ENA, x)
+
+/* SYS:PAUSE_CFG:ATOP */
+#define SYS_ATOP(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 40, r, 9, 4)
+
+/* SYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define SYS_ATOP_TOT_CFG __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 76, 0, 1, 4)
+
+/* SYS:PAUSE_CFG:MAC_FC_CFG */
+#define SYS_MAC_FC_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 80, r, 8, 4)
+
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED GENMASK(27, 26)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
+
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG GENMASK(25, 20)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
+
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
+
+#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17)
+#define SYS_MAC_FC_CFG_TX_FC_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_TX_FC_ENA, x)
+#define SYS_MAC_FC_CFG_TX_FC_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_TX_FC_ENA, x)
+
+#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16)
+#define SYS_MAC_FC_CFG_RX_FC_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_RX_FC_ENA, x)
+#define SYS_MAC_FC_CFG_RX_FC_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_RX_FC_ENA, x)
+
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG GENMASK(15, 0)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
+
+/* SYS:STAT:CNT */
+#define SYS_CNT(g) __REG(TARGET_SYS, 0, 1, 0, g, 896, 4, 0, 0, 1, 4)
+
+/* SYS:RAM_CTRL:RAM_INIT */
+#define SYS_RAM_INIT __REG(TARGET_SYS, 0, 1, 4432, 0, 1, 4, 0, 0, 1, 4)
+
+#define SYS_RAM_INIT_RAM_INIT BIT(1)
+#define SYS_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(SYS_RAM_INIT_RAM_INIT, x)
+#define SYS_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(SYS_RAM_INIT_RAM_INIT, x)
+
+/* VCAP:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_UPDATE_CTRL(t) __REG(TARGET_VCAP, t, 3, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_UPDATE_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_UPDATE_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_CMD, x)
+#define VCAP_UPDATE_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_CMD, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_ADDR, x)
+#define VCAP_UPDATE_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_UPDATE_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_SHOT, x)
+#define VCAP_UPDATE_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_UPDATE_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_CLEAR_CACHE, x)
+#define VCAP_UPDATE_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_MV_CFG(t) __REG(TARGET_VCAP, t, 3, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_MV_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_MV_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_MV_CFG_MV_NUM_POS, x)
+#define VCAP_MV_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_MV_CFG_MV_NUM_POS, x)
+
+#define VCAP_MV_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_MV_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_MV_CFG_MV_SIZE, x)
+#define VCAP_MV_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_MV_CFG_MV_SIZE, x)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ENTRY_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_MASK_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ACTION_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_CNT_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_CNT_FW_DAT(t) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_TG_DAT(t) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_CORE_IDX(t) __REG(TARGET_VCAP, t, 3, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_CORE_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_CORE_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_CORE_IDX_CORE_IDX, x)
+#define VCAP_CORE_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_CORE_IDX_CORE_IDX, x)
+
+/* VCAP:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_CORE_MAP(t) __REG(TARGET_VCAP, t, 3, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_CORE_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_CORE_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_CORE_MAP_CORE_MAP, x)
+#define VCAP_CORE_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_CORE_MAP_CORE_MAP, x)
+
+/* VCAP:VCAP_CONST:VCAP_VER */
+#define VCAP_VER(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ENTRY_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ENTRY_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ENTRY_SWCNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ENTRY_TG_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ACTION_DEF_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ACTION_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:CNT_WIDTH */
+#define VCAP_CNT_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:CORE_CNT */
+#define VCAP_CORE_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:IF_CNT */
+#define VCAP_IF_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 36, 0, 1, 4)
+
+#endif /* _LAN966X_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
new file mode 100644
index 0000000000..1c88120eb2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -0,0 +1,664 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly;
+
+static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
+ u32 pgid_ip)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 flood_mask_ip;
+
+ flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
+ flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
+
+ /* If mcast snooping is not enabled then use mcast flood mask
+ * to decide to enable multicast flooding or not.
+ */
+ if (!port->mcast_ena) {
+ u32 flood_mask;
+
+ flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
+ flood_mask = ANA_PGID_PGID_GET(flood_mask);
+
+ if (flood_mask & BIT(port->chip_port))
+ flood_mask_ip |= BIT(port->chip_port);
+ else
+ flood_mask_ip &= ~BIT(port->chip_port);
+ } else {
+ flood_mask_ip &= ~BIT(port->chip_port);
+ }
+
+ lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_ip));
+}
+
+static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_MC));
+
+ if (!port->mcast_ena) {
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
+ }
+}
+
+static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_UC));
+}
+
+static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_BC));
+}
+
+static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
+{
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
+ ANA_PORT_CFG_LEARN_ENA,
+ port->lan966x, ANA_PORT_CFG(port->chip_port));
+
+ port->learn_ena = enabled;
+}
+
+static void lan966x_port_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_MCAST_FLOOD)
+ lan966x_port_set_mcast_flood(port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ if (flags.mask & BR_FLOOD)
+ lan966x_port_set_ucast_flood(port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ lan966x_port_set_bcast_flood(port,
+ !!(flags.val & BR_BCAST_FLOOD));
+
+ if (flags.mask & BR_LEARNING)
+ lan966x_port_set_learning(port,
+ !!(flags.val & BR_LEARNING));
+}
+
+static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
+ BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+void lan966x_update_fwd_mask(struct lan966x *lan966x)
+{
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ struct lan966x_port *port = lan966x->ports[i];
+ unsigned long mask = 0;
+
+ if (port && lan966x->bridge_fwd_mask & BIT(i)) {
+ mask = lan966x->bridge_fwd_mask & ~BIT(i);
+
+ if (port->bond)
+ mask &= ~lan966x_lag_get_mask(lan966x,
+ port->bond);
+ }
+
+ mask |= BIT(CPU_PORT);
+
+ lan_wr(ANA_PGID_PGID_SET(mask),
+ lan966x, ANA_PGID(PGID_SRC + i));
+ }
+}
+
+void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool learn_ena = false;
+
+ if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
+ port->learn_ena)
+ learn_ena = true;
+
+ if (state == BR_STATE_FORWARDING)
+ lan966x->bridge_fwd_mask |= BIT(port->chip_port);
+ else
+ lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_update_fwd_mask(lan966x);
+}
+
+void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ lan966x_mac_set_ageing(port->lan966x, ageing_time);
+}
+
+static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ port->mcast_ena = mcast_ena;
+ if (mcast_ena)
+ lan966x_mdb_restore_entries(lan966x);
+ else
+ lan966x_mdb_clear_entries(lan966x);
+
+ lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
+ ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
+ ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
+ ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
+ lan966x, ANA_CPU_FWD_CFG(port->chip_port));
+
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
+}
+
+static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err = 0;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ lan966x_port_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ lan966x_port_stp_state_set(port, attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ lan966x_port_ageing_set(port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
+ lan966x_vlan_port_apply(port);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ lan966x_port_mc_set(port, !attr->u.mc_disabled);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_port_bridge_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ int err;
+
+ if (!lan966x->bridge_mask) {
+ lan966x->bridge = bridge;
+ } else {
+ if (lan966x->bridge != bridge) {
+ NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
+ return -ENODEV;
+ }
+ }
+
+ err = switchdev_bridge_port_offload(brport_dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ return err;
+
+ lan966x->bridge_mask |= BIT(port->chip_port);
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+ lan966x_port_bridge_flags(port, flags);
+
+ return 0;
+}
+
+static void lan966x_port_bridge_leave(struct lan966x_port *port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask & ~BR_LEARNING;
+ lan966x_port_bridge_flags(port, flags);
+
+ lan966x->bridge_mask &= ~BIT(port->chip_port);
+
+ if (!lan966x->bridge_mask)
+ lan966x->bridge = NULL;
+
+ /* Set the port back to host mode */
+ lan966x_vlan_port_set_vlan_aware(port, false);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+}
+
+int lan966x_port_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_port_bridge_join(port, brport_dev,
+ info->upper_dev,
+ extack);
+ else
+ lan966x_port_bridge_leave(port, info->upper_dev);
+ }
+
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_lag_port_join(port, info->upper_dev,
+ info->upper_dev,
+ extack);
+ else
+ lan966x_lag_port_leave(port, info->upper_dev);
+ }
+
+ return err;
+}
+
+int lan966x_port_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err = NOTIFY_DONE;
+
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking) {
+ switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL);
+ lan966x_fdb_flush_workqueue(port->lan966x);
+ }
+
+ if (netif_is_lag_master(info->upper_dev)) {
+ err = lan966x_lag_port_prechangeupper(dev, info);
+ if (err || info->linking)
+ return err;
+
+ switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL);
+ lan966x_fdb_flush_workqueue(port->lan966x);
+ }
+
+ return err;
+}
+
+static int lan966x_foreign_bridging_check(struct net_device *upper,
+ bool *has_foreign,
+ bool *seen_lan966x,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = NULL;
+ struct net_device *dev;
+ struct list_head *iter;
+
+ if (!netif_is_bridge_master(upper) &&
+ !netif_is_lag_master(upper))
+ return 0;
+
+ netdev_for_each_lower_dev(upper, dev, iter) {
+ if (lan966x_netdevice_check(dev)) {
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (lan966x) {
+ /* Upper already has at least one port of a
+ * lan966x switch inside it, check that it's
+ * the same instance of the driver.
+ */
+ if (port->lan966x != lan966x) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging between multiple lan966x switches disallowed");
+ return -EINVAL;
+ }
+ } else {
+ /* This is the first lan966x port inside this
+ * upper device
+ */
+ lan966x = port->lan966x;
+ *seen_lan966x = true;
+ }
+ } else if (netif_is_lag_master(dev)) {
+ /* Allow to have bond interfaces that have only lan966x
+ * devices
+ */
+ if (lan966x_foreign_bridging_check(dev, has_foreign,
+ seen_lan966x,
+ extack))
+ return -EINVAL;
+ } else {
+ *has_foreign = true;
+ }
+
+ if (*seen_lan966x && *has_foreign) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging lan966x ports with foreign interfaces disallowed");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int lan966x_bridge_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ bool has_foreign = false;
+ bool seen_lan966x = false;
+
+ return lan966x_foreign_bridging_check(info->upper_dev,
+ &has_foreign,
+ &seen_lan966x,
+ info->info.extack);
+}
+
+static int lan966x_netdevice_port_event(struct net_device *dev,
+ struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ if (!lan966x_netdevice_check(dev)) {
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ if (netif_is_lag_master(dev)) {
+ if (event == NETDEV_CHANGEUPPER)
+ err = lan966x_lag_netdev_changeupper(dev,
+ ptr);
+ else
+ err = lan966x_lag_netdev_prechangeupper(dev,
+ ptr);
+
+ return err;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_port_prechangeupper(dev, dev, ptr);
+ break;
+ case NETDEV_CHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ err = lan966x_port_changeupper(dev, dev, ptr);
+ break;
+ case NETDEV_CHANGELOWERSTATE:
+ err = lan966x_lag_port_changelowerstate(dev, ptr);
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
+
+ return notifier_from_errno(ret);
+}
+
+static bool lan966x_foreign_dev_check(const struct net_device *dev,
+ const struct net_device *foreign_dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ if (netif_is_bridge_master(foreign_dev))
+ if (lan966x->bridge == foreign_dev)
+ return false;
+
+ if (netif_is_lag_master(foreign_dev))
+ for (i = 0; i < lan966x->num_phys_ports; ++i)
+ if (lan966x->ports[i] &&
+ lan966x->ports[i]->bond == foreign_dev)
+ return false;
+
+ return true;
+}
+
+static int lan966x_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
+ lan966x_netdevice_check,
+ lan966x_foreign_dev_check,
+ lan966x_handle_fdb);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_add_vlan(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ else
+ lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_add(port, obj);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = lan966x_handle_port_mdb_add(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_del_vlan(port, v->vid);
+ else
+ lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_del(port, obj);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = lan966x_handle_port_mdb_del(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly = {
+ .notifier_call = lan966x_netdevice_event,
+};
+
+struct notifier_block lan966x_switchdev_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_event,
+};
+
+struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_blocking_event,
+};
+
+void lan966x_register_notifier_blocks(void)
+{
+ register_netdevice_notifier(&lan966x_netdevice_nb);
+ register_switchdev_notifier(&lan966x_switchdev_nb);
+ register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+}
+
+void lan966x_unregister_notifier_blocks(void)
+{
+ unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&lan966x_switchdev_nb);
+ unregister_netdevice_notifier(&lan966x_netdevice_nb);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c
new file mode 100644
index 0000000000..3f5b212066
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define LAN966X_TAPRIO_TIMEOUT_MS 1000
+#define LAN966X_TAPRIO_ENTRIES_PER_PORT 2
+
+/* Minimum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MIN_CYCLE_TIME_NS NSEC_PER_USEC
+
+/* Maximum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MAX_CYCLE_TIME_NS (NSEC_PER_SEC - 1)
+
+/* Total number of TAS GCL entries */
+#define LAN966X_TAPRIO_NUM_GCL 256
+
+/* TAPRIO link speeds for calculation of guard band */
+enum lan966x_taprio_link_speed {
+ LAN966X_TAPRIO_SPEED_NO_GB,
+ LAN966X_TAPRIO_SPEED_10,
+ LAN966X_TAPRIO_SPEED_100,
+ LAN966X_TAPRIO_SPEED_1000,
+ LAN966X_TAPRIO_SPEED_2500,
+};
+
+/* TAPRIO list states */
+enum lan966x_taprio_state {
+ LAN966X_TAPRIO_STATE_ADMIN,
+ LAN966X_TAPRIO_STATE_ADVANCING,
+ LAN966X_TAPRIO_STATE_PENDING,
+ LAN966X_TAPRIO_STATE_OPERATING,
+ LAN966X_TAPRIO_STATE_TERMINATING,
+ LAN966X_TAPRIO_STATE_MAX,
+};
+
+/* TAPRIO GCL command */
+enum lan966x_taprio_gcl_cmd {
+ LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES = 0,
+};
+
+static u32 lan966x_taprio_list_index(struct lan966x_port *port, u8 entry)
+{
+ return port->chip_port * LAN966X_TAPRIO_ENTRIES_PER_PORT + entry;
+}
+
+static u32 lan966x_taprio_list_state_get(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ val = lan_rd(lan966x, QSYS_TAS_LST);
+ return QSYS_TAS_LST_LIST_STATE_GET(val);
+}
+
+static u32 lan966x_taprio_list_index_state_get(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ return lan966x_taprio_list_state_get(port);
+}
+
+static void lan966x_taprio_list_state_set(struct lan966x_port *port,
+ u32 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(state),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+}
+
+static int lan966x_taprio_list_shutdown(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool pending, operating;
+ unsigned long end;
+ u32 state;
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ /* It is required to try multiple times to set the state of list,
+ * because the HW can overwrite this.
+ */
+ do {
+ state = lan966x_taprio_list_state_get(port);
+
+ pending = false;
+ operating = false;
+
+ if (state == LAN966X_TAPRIO_STATE_ADVANCING ||
+ state == LAN966X_TAPRIO_STATE_PENDING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_ADMIN);
+ pending = true;
+ }
+
+ if (state == LAN966X_TAPRIO_STATE_OPERATING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_TERMINATING);
+ operating = true;
+ }
+
+ /* If the entry was in pending and now gets in admin, then there
+ * is nothing else to do, so just bail out
+ */
+ state = lan966x_taprio_list_state_get(port);
+ if (pending &&
+ state == LAN966X_TAPRIO_STATE_ADMIN)
+ return 0;
+
+ /* If the list was in operating and now is in terminating or
+ * admin, then is OK to exit but it needs to wait until the list
+ * will get in admin. It is not required to set the state
+ * again.
+ */
+ if (operating &&
+ (state == LAN966X_TAPRIO_STATE_TERMINATING ||
+ state == LAN966X_TAPRIO_STATE_ADMIN))
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ do {
+ state = lan966x_taprio_list_state_get(port);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ /* If the list was in operating mode, it could be stopped while some
+ * queues where closed, so make sure to restore "all-queues-open"
+ */
+ if (operating) {
+ lan_wr(QSYS_TAS_GS_CTRL_HSCH_POS_SET(port->chip_port),
+ lan966x, QSYS_TAS_GS_CTRL);
+
+ lan_wr(QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(0xff),
+ lan966x, QSYS_TAS_GATE_STATE);
+ }
+
+ return 0;
+}
+
+static int lan966x_taprio_shutdown(struct lan966x_port *port)
+{
+ u32 i, list, state;
+ int err;
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list = lan966x_taprio_list_index(port, i);
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ err = lan966x_taprio_list_shutdown(port, list);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Find a suitable list for a new schedule. First priority is a list in state
+ * pending. Second priority is a list in state admin.
+ */
+static int lan966x_taprio_find_list(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int *new_list, int *obs_list)
+{
+ int state[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int list[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int err, oper = -1;
+ u32 i;
+
+ *new_list = -1;
+ *obs_list = -1;
+
+ /* If there is already an entry in operating mode, return this list in
+ * obs_list, such that when the new list will get activated the
+ * operating list will be stopped. In this way is possible to have
+ * smooth transitions between the lists
+ */
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list[i] = lan966x_taprio_list_index(port, i);
+ state[i] = lan966x_taprio_list_index_state_get(port, list[i]);
+ if (state[i] == LAN966X_TAPRIO_STATE_OPERATING)
+ oper = list[i];
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_PENDING) {
+ err = lan966x_taprio_shutdown(port);
+ if (err)
+ return err;
+
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_ADMIN) {
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static int lan966x_taprio_check(struct tc_taprio_qopt_offload *qopt)
+{
+ u64 total_time = 0;
+ u32 i;
+
+ /* This is not supported by th HW */
+ if (qopt->cycle_time_extension)
+ return -EOPNOTSUPP;
+
+ /* There is a limited number of gcl entries that can be used, they are
+ * shared by all ports
+ */
+ if (qopt->num_entries > LAN966X_TAPRIO_NUM_GCL)
+ return -EINVAL;
+
+ /* Don't allow cycle times bigger than 1 sec or smaller than 1 usec */
+ if (qopt->cycle_time < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ qopt->cycle_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ for (i = 0; i < qopt->num_entries; ++i) {
+ struct tc_taprio_sched_entry *entry = &qopt->entries[i];
+
+ /* Don't allow intervals bigger than 1 sec or smaller than 1
+ * usec
+ */
+ if (entry->interval < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ entry->interval > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+
+ total_time += qopt->entries[i].interval;
+ }
+
+ /* Don't allow the total time of intervals be bigger than 1 sec */
+ if (total_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ /* The HW expects that the cycle time to be at least as big as sum of
+ * each interval of gcl
+ */
+ if (qopt->cycle_time < total_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_taprio_gcl_free_get(struct lan966x_port *port,
+ unsigned long *free_list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 num_free, state, list;
+ u32 base, next, max_list;
+
+ /* By default everything is free */
+ bitmap_fill(free_list, LAN966X_TAPRIO_NUM_GCL);
+ num_free = LAN966X_TAPRIO_NUM_GCL;
+
+ /* Iterate over all gcl entries and find out which are free. And mark
+ * those that are not free.
+ */
+ max_list = lan966x->num_phys_ports * LAN966X_TAPRIO_ENTRIES_PER_PORT;
+ for (list = 0; list < max_list; ++list) {
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ base = lan_rd(lan966x, QSYS_TAS_LIST_CFG);
+ base = QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(base);
+ next = base;
+
+ do {
+ clear_bit(next, free_list);
+ num_free--;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ next = lan_rd(lan966x, QSYS_TAS_GCL_CT_CFG2);
+ next = QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(next);
+ } while (base != next);
+ }
+
+ return num_free;
+}
+
+static void lan966x_taprio_gcl_setup_entry(struct lan966x_port *port,
+ struct tc_taprio_sched_entry *entry,
+ u32 next_entry)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Setup a single gcl entry */
+ lan_wr(QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(entry->gate_mask) |
+ QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES),
+ lan966x, QSYS_TAS_GCL_CT_CFG);
+
+ lan_wr(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(next_entry),
+ lan966x, QSYS_TAS_GCL_CT_CFG2);
+
+ lan_wr(entry->interval, lan966x, QSYS_TAS_GCL_TM_CFG);
+}
+
+static int lan966x_taprio_gcl_setup(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int list)
+{
+ DECLARE_BITMAP(free_list, LAN966X_TAPRIO_NUM_GCL);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, base, next;
+
+ if (lan966x_taprio_gcl_free_get(port, free_list) < qopt->num_entries)
+ return -ENOSPC;
+
+ /* Select list */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* Setup the address of the first gcl entry */
+ base = find_first_bit(free_list, LAN966X_TAPRIO_NUM_GCL);
+ lan_rmw(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(base),
+ QSYS_TAS_LIST_CFG_LIST_BASE_ADDR,
+ lan966x, QSYS_TAS_LIST_CFG);
+
+ /* Iterate over entries and add them to the gcl list */
+ next = base;
+ for (i = 0; i < qopt->num_entries; ++i) {
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* If the entry is last, point back to the start of the list */
+ if (i == qopt->num_entries - 1)
+ next = base;
+ else
+ next = find_next_bit(free_list, LAN966X_TAPRIO_NUM_GCL,
+ next + 1);
+
+ lan966x_taprio_gcl_setup_entry(port, &qopt->entries[i], next);
+ }
+
+ return 0;
+}
+
+/* Calculate new base_time based on cycle_time. The HW recommends to have the
+ * new base time at least 2 * cycle type + current time
+ */
+static void lan966x_taprio_new_base_time(struct lan966x *lan966x,
+ const u32 cycle_time,
+ const ktime_t org_base_time,
+ ktime_t *new_base_time)
+{
+ ktime_t current_time, threshold_time;
+ struct timespec64 ts;
+
+ /* Get the current time and calculate the threshold_time */
+ lan966x_ptp_gettime64(&lan966x->phc[LAN966X_PHC_PORT].info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ threshold_time = current_time + (2 * cycle_time);
+
+ /* If the org_base_time is in enough in future just use it */
+ if (org_base_time >= threshold_time) {
+ *new_base_time = org_base_time;
+ return;
+ }
+
+ /* If the org_base_time is smaller than current_time, calculate the new
+ * base time as following.
+ */
+ if (org_base_time <= current_time) {
+ u64 tmp = current_time - org_base_time;
+ u32 rem = 0;
+
+ if (tmp > cycle_time)
+ div_u64_rem(tmp, cycle_time, &rem);
+ rem = cycle_time - rem;
+ *new_base_time = threshold_time + rem;
+ return;
+ }
+
+ /* The only left place for org_base_time is between current_time and
+ * threshold_time. In this case the new_base_time is calculated like
+ * org_base_time + 2 * cycletime
+ */
+ *new_base_time = org_base_time + 2 * cycle_time;
+}
+
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 taprio_speed;
+
+ switch (speed) {
+ case SPEED_10:
+ taprio_speed = LAN966X_TAPRIO_SPEED_10;
+ break;
+ case SPEED_100:
+ taprio_speed = LAN966X_TAPRIO_SPEED_100;
+ break;
+ case SPEED_1000:
+ taprio_speed = LAN966X_TAPRIO_SPEED_1000;
+ break;
+ case SPEED_2500:
+ taprio_speed = LAN966X_TAPRIO_SPEED_2500;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ lan_rmw(QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(taprio_speed),
+ QSYS_TAS_PROFILE_CFG_LINK_SPEED,
+ lan966x, QSYS_TAS_PROFILE_CFG(port->chip_port));
+
+ return 0;
+}
+
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err, new_list, obs_list;
+ struct timespec64 ts;
+ ktime_t base_time;
+
+ err = lan966x_taprio_check(qopt);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_find_list(port, qopt, &new_list, &obs_list);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_gcl_setup(port, qopt, new_list);
+ if (err)
+ return err;
+
+ lan966x_taprio_new_base_time(lan966x, qopt->cycle_time,
+ qopt->base_time, &base_time);
+
+ ts = ktime_to_timespec64(base_time);
+ lan_wr(QSYS_TAS_BT_NSEC_NSEC_SET(ts.tv_nsec),
+ lan966x, QSYS_TAS_BT_NSEC);
+
+ lan_wr(lower_32_bits(ts.tv_sec),
+ lan966x, QSYS_TAS_BT_SEC_LSB);
+
+ lan_wr(QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(upper_32_bits(ts.tv_sec)),
+ lan966x, QSYS_TAS_BT_SEC_MSB);
+
+ lan_wr(qopt->cycle_time, lan966x, QSYS_TAS_CT_CFG);
+
+ lan_rmw(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(obs_list),
+ QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX,
+ lan966x, QSYS_TAS_STARTUP_CFG);
+
+ /* Start list processing */
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(LAN966X_TAPRIO_STATE_ADVANCING),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+
+ return err;
+}
+
+int lan966x_taprio_del(struct lan966x_port *port)
+{
+ return lan966x_taprio_shutdown(port);
+}
+
+void lan966x_taprio_init(struct lan966x *lan966x)
+{
+ int num_taprio_lists;
+ int p;
+
+ lan_wr(QSYS_TAS_STM_CFG_REVISIT_DLY_SET((256 * 1000) /
+ lan966x_ptp_get_period_ps()),
+ lan966x, QSYS_TAS_STM_CFG);
+
+ num_taprio_lists = lan966x->num_phys_ports *
+ LAN966X_TAPRIO_ENTRIES_PER_PORT;
+
+ /* For now we always use guard band on all queues */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(num_taprio_lists) |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(1),
+ QSYS_TAS_CFG_CTRL_LIST_NUM_MAX |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ for (p = 0; p < lan966x->num_phys_ports; p++)
+ lan_rmw(QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(p),
+ QSYS_TAS_PROFILE_CFG_PORT_NUM,
+ lan966x, QSYS_TAS_PROFILE_CFG(p));
+}
+
+void lan966x_taprio_deinit(struct lan966x *lan966x)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ lan966x_taprio_del(lan966x->ports[p]);
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c
new file mode 100644
index 0000000000..4555a35d0d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_tbf_add(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool root = qopt->parent == TC_H_ROOT;
+ u32 queue = 0;
+ u32 cir, cbs;
+ u32 se_idx;
+
+ if (!root) {
+ queue = TC_H_MIN(qopt->parent) - 1;
+ if (queue >= NUM_PRIO_QUEUES)
+ return -EOPNOTSUPP;
+ }
+
+ if (root)
+ se_idx = SE_IDX_PORT + port->chip_port;
+ else
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
+
+ cir = div_u64(qopt->replace_params.rate.rate_bytes_ps, 1000) * 8;
+ cbs = qopt->replace_params.max_size;
+
+ /* Rate unit is 100 kbps */
+ cir = DIV_ROUND_UP(cir, 100);
+ /* Avoid using zero rate */
+ cir = cir ?: 1;
+ /* Burst unit is 4kB */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ /* Avoid using zero burst */
+ cbs = cbs ?: 1;
+
+ /* Check that actually the result can be written */
+ if (cir > GENMASK(15, 0) ||
+ cbs > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(1),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) |
+ QSYS_CIR_CFG_CIR_BURST_SET(cbs),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_tbf_del(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool root = qopt->parent == TC_H_ROOT;
+ u32 queue = 0;
+ u32 se_idx;
+
+ if (!root) {
+ queue = TC_H_MIN(qopt->parent) - 1;
+ if (queue >= NUM_PRIO_QUEUES)
+ return -EOPNOTSUPP;
+ }
+
+ if (root)
+ se_idx = SE_IDX_PORT + port->chip_port;
+ else
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(0),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) |
+ QSYS_CIR_CFG_CIR_BURST_SET(0),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
new file mode 100644
index 0000000000..ee652f2d23
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+#include "lan966x_main.h"
+
+static LIST_HEAD(lan966x_tc_block_cb_list);
+
+static int lan966x_tc_setup_qdisc_mqprio(struct lan966x_port *port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ u8 num_tc = mqprio->qopt.num_tc;
+
+ mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return num_tc ? lan966x_mqprio_add(port, num_tc) :
+ lan966x_mqprio_del(port);
+}
+
+static int lan966x_tc_setup_qdisc_taprio(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ switch (taprio->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ return lan966x_taprio_add(port, taprio);
+ case TAPRIO_CMD_DESTROY:
+ return lan966x_taprio_del(port);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan966x_tc_setup_qdisc_tbf(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ return lan966x_tbf_add(port, qopt);
+ case TC_TBF_DESTROY:
+ return lan966x_tbf_del(port, qopt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_tc_setup_qdisc_cbs(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ return qopt->enable ? lan966x_cbs_add(port, qopt) :
+ lan966x_cbs_del(port, qopt);
+}
+
+static int lan966x_tc_setup_qdisc_ets(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+ return lan966x_ets_add(port, qopt);
+ case TC_ETS_DESTROY:
+ return lan966x_ets_del(port, qopt);
+ default:
+ return -EOPNOTSUPP;
+ };
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct lan966x_port *port = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return lan966x_tc_matchall(port, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return lan966x_tc_flower(port, type_data, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan966x_tc_block_cb_ingress(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return lan966x_tc_block_cb(type, type_data, cb_priv, true);
+}
+
+static int lan966x_tc_block_cb_egress(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return lan966x_tc_block_cb(type, type_data, cb_priv, false);
+}
+
+static int lan966x_tc_setup_block(struct lan966x_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb;
+ bool ingress;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = lan966x_tc_block_cb_ingress;
+ port->tc.ingress_shared_block = f->block_shared;
+ ingress = true;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = lan966x_tc_block_cb_egress;
+ ingress = false;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return flow_block_cb_setup_simple(f, &lan966x_tc_block_cb_list,
+ cb, port, port, ingress);
+}
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return lan966x_tc_setup_qdisc_mqprio(port, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return lan966x_tc_setup_qdisc_taprio(port, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return lan966x_tc_setup_qdisc_tbf(port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return lan966x_tc_setup_qdisc_cbs(port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return lan966x_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_BLOCK:
+ return lan966x_tc_setup_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
new file mode 100644
index 0000000000..d696cf9dbd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "vcap_tc.h"
+
+#define LAN966X_FORCE_UNTAGED 3
+
+static bool lan966x_tc_is_known_etype(struct vcap_tc_flower_parse_usage *st,
+ u16 etype)
+{
+ switch (st->admin->vtype) {
+ case VCAP_TYPE_IS1:
+ switch (etype) {
+ case ETH_P_ALL:
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ return true;
+ }
+ break;
+ case VCAP_TYPE_IS2:
+ switch (etype) {
+ case ETH_P_ALL:
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ case ETH_P_SNAP:
+ case ETH_P_802_2:
+ return true;
+ }
+ break;
+ case VCAP_TYPE_ES0:
+ return true;
+ default:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "VCAP type not supported");
+ return false;
+ }
+
+ return false;
+}
+
+static int
+lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_control match;
+ int err = 0;
+
+ flow_rule_match_control(st->frule, &match);
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_BIT_1);
+ else
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ }
+
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (match.key->flags & FLOW_DIS_FIRST_FRAG)
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_BIT_0);
+ else
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
+ return err;
+}
+
+static int
+lan966x_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_basic match;
+ int err = 0;
+
+ flow_rule_match_basic(st->frule, &match);
+ if (match.mask->n_proto) {
+ st->l3_proto = be16_to_cpu(match.key->n_proto);
+ if (!lan966x_tc_is_known_etype(st, st->l3_proto)) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IP) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IPV6 &&
+ st->admin->vtype == VCAP_TYPE_IS1) {
+ /* Don't set any keys in this case */
+ } else if (st->l3_proto == ETH_P_SNAP &&
+ st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
+ }
+ }
+ if (match.mask->ip_proto) {
+ st->l4_proto = match.key->ip_proto;
+
+ if (st->l4_proto == IPPROTO_TCP) {
+ if (st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l4_proto == IPPROTO_UDP) {
+ if (st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ } else {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP_PROTO,
+ st->l4_proto, ~0);
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
+ return err;
+}
+
+static int
+lan966x_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ if (st->admin->vtype != VCAP_TYPE_IS1) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "cvlan not supported in this VCAP");
+ return -EINVAL;
+ }
+
+ return vcap_tc_flower_handler_cvlan_usage(st);
+}
+
+static int
+lan966x_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
+
+ if (st->admin->vtype == VCAP_TYPE_IS1) {
+ vid_key = VCAP_KF_8021Q_VID0;
+ pcp_key = VCAP_KF_8021Q_PCP0;
+ }
+
+ return vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key);
+}
+
+static int
+(*lan966x_tc_flower_handlers_usage[])(struct vcap_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
+ [FLOW_DISSECTOR_KEY_CONTROL] = lan966x_tc_flower_handler_control_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_BASIC] = lan966x_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_CVLAN] = lan966x_tc_flower_handler_cvlan_usage,
+ [FLOW_DISSECTOR_KEY_VLAN] = lan966x_tc_flower_handler_vlan_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
+};
+
+static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ u16 *l3_proto)
+{
+ struct vcap_tc_flower_parse_usage state = {
+ .fco = f,
+ .vrule = vrule,
+ .l3_proto = ETH_P_ALL,
+ .admin = admin,
+ };
+ int err = 0;
+
+ state.frule = flow_cls_offload_flow_rule(f);
+ for (int i = 0; i < ARRAY_SIZE(lan966x_tc_flower_handlers_usage); ++i) {
+ if (!flow_rule_match_key(state.frule, i) ||
+ !lan966x_tc_flower_handlers_usage[i])
+ continue;
+
+ err = lan966x_tc_flower_handlers_usage[i](&state);
+ if (err)
+ return err;
+ }
+
+ if (l3_proto)
+ *l3_proto = state.l3_proto;
+
+ return err;
+}
+
+static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
+ struct net_device *dev,
+ struct flow_cls_offload *fco,
+ bool ingress)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
+ struct flow_action_entry *actent, *last_actent = NULL;
+ struct flow_action *act = &rule->action;
+ u64 action_mask = 0;
+ int idx;
+
+ if (!flow_action_has_entries(act)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
+ return -EINVAL;
+ }
+
+ if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
+ return -EOPNOTSUPP;
+
+ flow_action_for_each(idx, actent, act) {
+ if (action_mask & BIT(actent->id)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "More actions of the same type");
+ return -EINVAL;
+ }
+ action_mask |= BIT(actent->id);
+ last_actent = actent; /* Save last action for later check */
+ }
+
+ /* Check that last action is a goto
+ * The last chain/lookup does not need to have goto action
+ */
+ if (last_actent->id == FLOW_ACTION_GOTO) {
+ /* Check if the destination chain is in one of the VCAPs */
+ if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
+ last_actent->chain_index)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid goto chain");
+ return -EINVAL;
+ }
+ } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
+ ingress)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Last action must be 'goto'");
+ return -EINVAL;
+ }
+
+ /* Catch unsupported combinations of actions */
+ if (action_mask & BIT(FLOW_ACTION_TRAP) &&
+ action_mask & BIT(FLOW_ACTION_ACCEPT)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine pass and trap action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* Add the actionset that is the default for the VCAP type */
+static int lan966x_tc_set_actionset(struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ enum vcap_actionfield_set aset;
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ aset = VCAP_AFS_S1;
+ break;
+ case VCAP_TYPE_IS2:
+ aset = VCAP_AFS_BASE_TYPE;
+ break;
+ case VCAP_TYPE_ES0:
+ aset = VCAP_AFS_VID;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Do not overwrite any current actionset */
+ if (vrule->actionset == VCAP_AFS_NO_VALUE)
+ err = vcap_set_rule_set_actionset(vrule, aset);
+
+ return err;
+}
+
+static int lan966x_tc_add_rule_link_target(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ int target_cid)
+{
+ int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
+ int err;
+
+ if (!link_val)
+ return 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ /* Choose IS1 specific NXT_IDX key (for chaining rules from IS1) */
+ err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ 1, ~0);
+ if (err)
+ return err;
+
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
+ link_val, ~0);
+ case VCAP_TYPE_IS2:
+ /* Add IS2 specific PAG key (for chaining rules from IS1) */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
+ link_val, ~0);
+ case VCAP_TYPE_ES0:
+ /* Add ES0 specific ISDX key (for chaining rules from IS1) */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS,
+ link_val, ~0);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int lan966x_tc_add_rule_link(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *f,
+ int to_cid)
+{
+ struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
+ int diff, err = 0;
+
+ if (!to_admin) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unknown destination chain");
+ return -EINVAL;
+ }
+
+ diff = vcap_chain_offset(vctrl, f->common.chain_index, to_cid);
+ if (!diff)
+ return 0;
+
+ /* Between IS1 and IS2 the PAG value is used */
+ if (admin->vtype == VCAP_TYPE_IS1 && to_admin->vtype == VCAP_TYPE_IS2) {
+ /* This works for IS1->IS2 */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_OVERRIDE_MASK,
+ 0xff);
+ if (err)
+ return err;
+ } else if (admin->vtype == VCAP_TYPE_IS1 &&
+ to_admin->vtype == VCAP_TYPE_ES0) {
+ /* This works for IS1->ES0 */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_ADD_VAL,
+ diff);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_bit(vrule, VCAP_AF_ISDX_REPLACE_ENA,
+ VCAP_BIT_1);
+ if (err)
+ return err;
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported chain destination");
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int lan966x_tc_add_rule_counter(struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_mod_action_u32(vrule, VCAP_AF_ESDX,
+ vrule->id);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_tc_flower_add(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin,
+ bool ingress)
+{
+ struct flow_action_entry *act;
+ u16 l3_proto = ETH_P_ALL;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+ int err, idx;
+
+ err = lan966x_tc_flower_action_check(port->lan966x->vcap_ctrl,
+ port->dev, f, ingress);
+ if (err)
+ return err;
+
+ vrule = vcap_alloc_rule(port->lan966x->vcap_ctrl, port->dev,
+ f->common.chain_index, VCAP_USER_TC,
+ f->common.prio, 0);
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ vrule->cookie = f->cookie;
+ err = lan966x_tc_flower_use_dissectors(f, admin, vrule, &l3_proto);
+ if (err)
+ goto out;
+
+ err = lan966x_tc_add_rule_link_target(admin, vrule,
+ f->common.chain_index);
+ if (err)
+ goto out;
+
+ frule = flow_cls_offload_flow_rule(f);
+
+ flow_action_for_each(idx, act, &frule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_TRAP:
+ if (admin->vtype != VCAP_TYPE_IS2) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Trap action not supported in this VCAP");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_BIT_1);
+ err |= vcap_rule_add_action_u32(vrule,
+ VCAP_AF_CPU_QUEUE_NUM,
+ 0);
+ err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
+ LAN966X_PMM_REPLACE);
+ if (err)
+ goto out;
+
+ break;
+ case FLOW_ACTION_GOTO:
+ err = lan966x_tc_set_actionset(admin, vrule);
+ if (err)
+ goto out;
+
+ err = lan966x_tc_add_rule_link(port->lan966x->vcap_ctrl,
+ admin, vrule,
+ f, act->chain_index);
+ if (err)
+ goto out;
+
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ if (admin->vtype != VCAP_TYPE_ES0) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Cannot use vlan pop on non es0");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* Force untag */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PUSH_OUTER_TAG,
+ LAN966X_FORCE_UNTAGED);
+ if (err)
+ goto out;
+
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported TC action");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ err = lan966x_tc_add_rule_counter(admin, vrule);
+ if (err) {
+ vcap_set_tc_exterr(f, vrule);
+ goto out;
+ }
+
+ err = vcap_val_rule(vrule, l3_proto);
+ if (err) {
+ vcap_set_tc_exterr(f, vrule);
+ goto out;
+ }
+
+ err = vcap_add_rule(vrule);
+ if (err)
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Could not add the filter");
+out:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int lan966x_tc_flower_del(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin)
+{
+ struct vcap_control *vctrl;
+ int err = -ENOENT, rule_id;
+
+ vctrl = port->lan966x->vcap_ctrl;
+ while (true) {
+ rule_id = vcap_lookup_rule_by_cookie(vctrl, f->cookie);
+ if (rule_id <= 0)
+ break;
+
+ err = vcap_del_rule(vctrl, port->dev, rule_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Cannot delete rule");
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int lan966x_tc_flower_stats(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin)
+{
+ struct vcap_counter count = {};
+ int err;
+
+ err = vcap_get_rule_count_by_cookie(port->lan966x->vcap_ctrl,
+ &count, f->cookie);
+ if (err)
+ return err;
+
+ flow_stats_update(&f->stats, 0x0, count.value, 0, 0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ return err;
+}
+
+int lan966x_tc_flower(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ bool ingress)
+{
+ struct vcap_admin *admin;
+
+ admin = vcap_find_admin(port->lan966x->vcap_ctrl,
+ f->common.chain_index);
+ if (!admin) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Invalid chain");
+ return -EINVAL;
+ }
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return lan966x_tc_flower_add(port, f, admin, ingress);
+ case FLOW_CLS_DESTROY:
+ return lan966x_tc_flower_del(port, f, admin);
+ case FLOW_CLS_STATS:
+ return lan966x_tc_flower_stats(port, f, admin);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
new file mode 100644
index 0000000000..20627323d6
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+static int lan966x_tc_matchall_add(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ return lan966x_police_port_add(port, &f->rule->action, act,
+ f->cookie, ingress,
+ f->common.extack);
+ case FLOW_ACTION_MIRRED:
+ return lan966x_mirror_port_add(port, act, f->cookie,
+ ingress, f->common.extack);
+ case FLOW_ACTION_GOTO:
+ return lan966x_goto_port_add(port, f->common.chain_index,
+ act->chain_index, f->cookie,
+ f->common.extack);
+ default:
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_matchall_del(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (f->cookie == port->tc.police_id) {
+ return lan966x_police_port_del(port, f->cookie,
+ f->common.extack);
+ } else if (f->cookie == port->tc.ingress_mirror_id ||
+ f->cookie == port->tc.egress_mirror_id) {
+ return lan966x_mirror_port_del(port, ingress,
+ f->common.extack);
+ } else {
+ return lan966x_goto_port_del(port, f->cookie, f->common.extack);
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_matchall_stats(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (f->cookie == port->tc.police_id) {
+ lan966x_police_port_stats(port, &f->stats);
+ } else if (f->cookie == port->tc.ingress_mirror_id ||
+ f->cookie == port->tc.egress_mirror_id) {
+ lan966x_mirror_port_stats(port, &f->stats, ingress);
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int lan966x_tc_matchall(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return lan966x_tc_matchall_add(port, f, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ return lan966x_tc_matchall_del(port, f, ingress);
+ case TC_CLSMATCHALL_STATS:
+ return lan966x_tc_matchall_stats(port, f, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c
new file mode 100644
index 0000000000..fb6851b945
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c
@@ -0,0 +1,3270 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "lan966x_vcap_ag_api.h"
+
+/* keyfields */
+static const struct vcap_field is1_normal_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 3,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 35,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 100,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 101,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 104,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 105,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 32,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 143,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 144,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 145,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 161,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is1_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 35,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 36,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 48,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 49,
+ .width = 3,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 94,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 126,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 134,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 135,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is1_normal_ip6_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 53,
+ .width = 48,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 101,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 107,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 235,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 243,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 244,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_S1_IP6] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 252,
+ .width = 112,
+ },
+};
+
+static const struct vcap_field is1_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 53,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 101,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 150,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 166,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 169,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 170,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 171,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP_MSB] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 16,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 193,
+ .width = 64,
+ },
+ [VCAP_KF_L3_IP6_SIP_MSB] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 257,
+ .width = 16,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 273,
+ .width = 64,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 337,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 338,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 339,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 355,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is1_5tuple_ip6_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 53,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 59,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 187,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 315,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 323,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 324,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 332,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is1_dbl_vid_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 54,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 70,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 71,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 72,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 73,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 74,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 75,
+ .width = 6,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 82,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is1_rt_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 3,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 6,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 9,
+ .width = 48,
+ },
+ [VCAP_KF_RT_VLAN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 3,
+ },
+ [VCAP_KF_RT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 60,
+ .width = 2,
+ },
+ [VCAP_KF_RT_FRMID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is1_dmac_vid_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 33,
+ .width = 48,
+ },
+};
+
+static const struct vcap_field is2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 139,
+ .width = 16,
+ },
+ [VCAP_KF_L2_FRM_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 155,
+ .width = 4,
+ },
+ [VCAP_KF_L2_PAYLOAD0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 159,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 8,
+ },
+ [VCAP_KF_L2_PAYLOAD2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 183,
+ .width = 3,
+ },
+};
+
+static const struct vcap_field is2_mac_llc_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_L2_LLC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 139,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_mac_snap_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SNAP] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 139,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 131,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 163,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 43,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 44,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 45,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 47,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 48,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 120,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 121,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 122,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 138,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 154,
+ .width = 8,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 162,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 163,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 164,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 165,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 166,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 169,
+ .width = 1,
+ },
+ [VCAP_KF_L4_1588_DOM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 8,
+ },
+ [VCAP_KF_L4_1588_VER] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 178,
+ .width = 4,
+ },
+};
+
+static const struct vcap_field is2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 43,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 44,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 45,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 47,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 48,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 120,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 121,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U56,
+ .offset = 129,
+ .width = 56,
+ },
+};
+
+static const struct vcap_field is2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 43,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 44,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 172,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is2_oam_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_OAM_MEL_FLAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 139,
+ .width = 7,
+ },
+ [VCAP_KF_OAM_VER] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 146,
+ .width = 5,
+ },
+ [VCAP_KF_OAM_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 8,
+ },
+ [VCAP_KF_OAM_FLAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 159,
+ .width = 8,
+ },
+ [VCAP_KF_OAM_MEPID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 167,
+ .width = 16,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 183,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 184,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_DETECTED] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 185,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_ip6_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 37,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 41,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 50,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 178,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 306,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 307,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 308,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 324,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 340,
+ .width = 8,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 348,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 349,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 350,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 351,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 352,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 353,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 354,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 355,
+ .width = 1,
+ },
+ [VCAP_KF_L4_1588_DOM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 356,
+ .width = 8,
+ },
+ [VCAP_KF_L4_1588_VER] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 364,
+ .width = 4,
+ },
+};
+
+static const struct vcap_field is2_ip6_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 37,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 41,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 50,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 178,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 306,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 307,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U56,
+ .offset = 315,
+ .width = 56,
+ },
+};
+
+static const struct vcap_field is2_smac_sip4_keyfield[] = {
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 4,
+ .width = 48,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 52,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is2_smac_sip6_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 4,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 8,
+ .width = 48,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 56,
+ .width = 128,
+ },
+};
+
+static const struct vcap_field es0_vid_keyfield[] = {
+ [VCAP_KF_IF_EGR_PORT_NO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 4,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 9,
+ .width = 8,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 35,
+ .width = 1,
+ },
+ [VCAP_KF_RTP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 36,
+ .width = 10,
+ },
+ [VCAP_KF_PDU_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 46,
+ .width = 4,
+ },
+};
+
+/* keyfield_set */
+static const struct vcap_set is1_keyfield_set[] = {
+ [VCAP_KFS_NORMAL] = {
+ .type_id = 0,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_5TUPLE_IP4] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_NORMAL_IP6] = {
+ .type_id = 0,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_7TUPLE] = {
+ .type_id = 1,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_5TUPLE_IP6] = {
+ .type_id = 2,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_DBL_VID] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_RT] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_DMAC_VID] = {
+ .type_id = 2,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set is2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_MAC_LLC] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_MAC_SNAP] = {
+ .type_id = 2,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 3,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 4,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 5,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 6,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_OAM] = {
+ .type_id = 7,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_TCP_UDP] = {
+ .type_id = 0,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_OTHER] = {
+ .type_id = 1,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_SMAC_SIP4] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_SMAC_SIP6] = {
+ .type_id = 8,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+};
+
+static const struct vcap_set es0_keyfield_set[] = {
+ [VCAP_KFS_VID] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+/* keyfield_set map */
+static const struct vcap_field *is1_keyfield_set_map[] = {
+ [VCAP_KFS_NORMAL] = is1_normal_keyfield,
+ [VCAP_KFS_5TUPLE_IP4] = is1_5tuple_ip4_keyfield,
+ [VCAP_KFS_NORMAL_IP6] = is1_normal_ip6_keyfield,
+ [VCAP_KFS_7TUPLE] = is1_7tuple_keyfield,
+ [VCAP_KFS_5TUPLE_IP6] = is1_5tuple_ip6_keyfield,
+ [VCAP_KFS_DBL_VID] = is1_dbl_vid_keyfield,
+ [VCAP_KFS_RT] = is1_rt_keyfield,
+ [VCAP_KFS_DMAC_VID] = is1_dmac_vid_keyfield,
+};
+
+static const struct vcap_field *is2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
+ [VCAP_KFS_MAC_LLC] = is2_mac_llc_keyfield,
+ [VCAP_KFS_MAC_SNAP] = is2_mac_snap_keyfield,
+ [VCAP_KFS_ARP] = is2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
+ [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
+ [VCAP_KFS_OAM] = is2_oam_keyfield,
+ [VCAP_KFS_IP6_TCP_UDP] = is2_ip6_tcp_udp_keyfield,
+ [VCAP_KFS_IP6_OTHER] = is2_ip6_other_keyfield,
+ [VCAP_KFS_SMAC_SIP4] = is2_smac_sip4_keyfield,
+ [VCAP_KFS_SMAC_SIP6] = is2_smac_sip6_keyfield,
+};
+
+static const struct vcap_field *es0_keyfield_set_map[] = {
+ [VCAP_KFS_VID] = es0_vid_keyfield,
+};
+
+/* keyfield_set map sizes */
+static int is1_keyfield_set_map_size[] = {
+ [VCAP_KFS_NORMAL] = ARRAY_SIZE(is1_normal_keyfield),
+ [VCAP_KFS_5TUPLE_IP4] = ARRAY_SIZE(is1_5tuple_ip4_keyfield),
+ [VCAP_KFS_NORMAL_IP6] = ARRAY_SIZE(is1_normal_ip6_keyfield),
+ [VCAP_KFS_7TUPLE] = ARRAY_SIZE(is1_7tuple_keyfield),
+ [VCAP_KFS_5TUPLE_IP6] = ARRAY_SIZE(is1_5tuple_ip6_keyfield),
+ [VCAP_KFS_DBL_VID] = ARRAY_SIZE(is1_dbl_vid_keyfield),
+ [VCAP_KFS_RT] = ARRAY_SIZE(is1_rt_keyfield),
+ [VCAP_KFS_DMAC_VID] = ARRAY_SIZE(is1_dmac_vid_keyfield),
+};
+
+static int is2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
+ [VCAP_KFS_MAC_LLC] = ARRAY_SIZE(is2_mac_llc_keyfield),
+ [VCAP_KFS_MAC_SNAP] = ARRAY_SIZE(is2_mac_snap_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
+ [VCAP_KFS_OAM] = ARRAY_SIZE(is2_oam_keyfield),
+ [VCAP_KFS_IP6_TCP_UDP] = ARRAY_SIZE(is2_ip6_tcp_udp_keyfield),
+ [VCAP_KFS_IP6_OTHER] = ARRAY_SIZE(is2_ip6_other_keyfield),
+ [VCAP_KFS_SMAC_SIP4] = ARRAY_SIZE(is2_smac_sip4_keyfield),
+ [VCAP_KFS_SMAC_SIP6] = ARRAY_SIZE(is2_smac_sip6_keyfield),
+};
+
+static int es0_keyfield_set_map_size[] = {
+ [VCAP_KFS_VID] = ARRAY_SIZE(es0_vid_keyfield),
+};
+
+/* actionfields */
+static const struct vcap_field is1_s1_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 9,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 8,
+ },
+ [VCAP_AF_ISDX_REPLACE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 30,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_ADD_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 31,
+ .width = 8,
+ },
+ [VCAP_AF_VID_REPLACE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 67,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 71,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 72,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_POP_CNT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 73,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_POP_CNT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 74,
+ .width = 2,
+ },
+ [VCAP_AF_CUSTOM_ACE_TYPE_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 76,
+ .width = 4,
+ },
+ [VCAP_AF_SFID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_AF_SFID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 8,
+ },
+ [VCAP_AF_SGID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_AF_SGID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 8,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 98,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 9,
+ },
+ [VCAP_AF_OAM_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_AF_MRP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 2,
+ },
+ [VCAP_AF_DLR_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 2,
+ },
+};
+
+static const struct vcap_field is2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 3,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 2,
+ },
+ [VCAP_AF_MIRROR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_LRN_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 9,
+ },
+ [VCAP_AF_POLICE_VCAP_ONLY] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 8,
+ },
+ [VCAP_AF_REW_OP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 16,
+ },
+ [VCAP_AF_ISDX_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 44,
+ .width = 1,
+ },
+ [VCAP_AF_ACL_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 6,
+ },
+};
+
+static const struct vcap_field is2_smac_sip_actionfield[] = {
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 3,
+ },
+ [VCAP_AF_FWD_KILL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_AF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es0_vid_actionfield[] = {
+ [VCAP_AF_PUSH_OUTER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_AF_PUSH_INNER_TAG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_A_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_A_VID_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_A_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_A_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_VID_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_B_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 15,
+ .width = 2,
+ },
+ [VCAP_AF_VID_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_A_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_AF_VID_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_B_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 48,
+ .width = 1,
+ },
+ [VCAP_AF_ESDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 49,
+ .width = 8,
+ },
+};
+
+/* actionfield_set */
+static const struct vcap_set is1_actionfield_set[] = {
+ [VCAP_AFS_S1] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set is2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_AFS_SMAC_SIP] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set es0_actionfield_set[] = {
+ [VCAP_AFS_VID] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+/* actionfield_set map */
+static const struct vcap_field *is1_actionfield_set_map[] = {
+ [VCAP_AFS_S1] = is1_s1_actionfield,
+};
+
+static const struct vcap_field *is2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
+ [VCAP_AFS_SMAC_SIP] = is2_smac_sip_actionfield,
+};
+
+static const struct vcap_field *es0_actionfield_set_map[] = {
+ [VCAP_AFS_VID] = es0_vid_actionfield,
+};
+
+/* actionfield_set map size */
+static int is1_actionfield_set_map_size[] = {
+ [VCAP_AFS_S1] = ARRAY_SIZE(is1_s1_actionfield),
+};
+
+static int is2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
+ [VCAP_AFS_SMAC_SIP] = ARRAY_SIZE(is2_smac_sip_actionfield),
+};
+
+static int es0_actionfield_set_map_size[] = {
+ [VCAP_AFS_VID] = ARRAY_SIZE(es0_vid_actionfield),
+};
+
+/* Type Groups */
+static const struct vcap_typegroup is1_x4_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 192,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 288,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is1_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is1_x1_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x4_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 192,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 288,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is1_keyfield_set_typegroups[] = {
+ [4] = is1_x4_keyfield_set_typegroups,
+ [2] = is1_x2_keyfield_set_typegroups,
+ [1] = is1_x1_keyfield_set_typegroups,
+ [5] = NULL,
+};
+
+static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
+ [4] = is2_x4_keyfield_set_typegroups,
+ [2] = is2_x2_keyfield_set_typegroups,
+ [1] = is2_x1_keyfield_set_typegroups,
+ [5] = NULL,
+};
+
+static const struct vcap_typegroup *es0_keyfield_set_typegroups[] = {
+ [1] = es0_x1_keyfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup is1_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 31,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is1_actionfield_set_typegroups[] = {
+ [1] = is1_x1_actionfield_set_typegroups,
+ [5] = NULL,
+};
+
+static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
+ [2] = is2_x2_actionfield_set_typegroups,
+ [1] = is2_x1_actionfield_set_typegroups,
+ [5] = NULL,
+};
+
+static const struct vcap_typegroup *es0_actionfield_set_typegroups[] = {
+ [1] = es0_x1_actionfield_set_typegroups,
+ [2] = NULL,
+};
+
+/* Keyfieldset names */
+static const char * const vcap_keyfield_set_names[] = {
+ [VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_5TUPLE_IP4] = "VCAP_KFS_5TUPLE_IP4",
+ [VCAP_KFS_5TUPLE_IP6] = "VCAP_KFS_5TUPLE_IP6",
+ [VCAP_KFS_7TUPLE] = "VCAP_KFS_7TUPLE",
+ [VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_DBL_VID] = "VCAP_KFS_DBL_VID",
+ [VCAP_KFS_DMAC_VID] = "VCAP_KFS_DMAC_VID",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
+ [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
+ [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
+ [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
+ [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
+ [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_NORMAL] = "VCAP_KFS_NORMAL",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_NORMAL_IP6] = "VCAP_KFS_NORMAL_IP6",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_RT] = "VCAP_KFS_RT",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
+ [VCAP_KFS_VID] = "VCAP_KFS_VID",
+};
+
+/* Actionfieldset names */
+static const char * const vcap_actionfield_set_names[] = {
+ [VCAP_AFS_NO_VALUE] = "(None)",
+ [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_S1] = "VCAP_AFS_S1",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
+ [VCAP_AFS_VID] = "VCAP_AFS_VID",
+};
+
+/* Keyfield names */
+static const char * const vcap_keyfield_names[] = {
+ [VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021CB_R_TAGGED_IS] = "8021CB_R_TAGGED_IS",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
+ [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
+ [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
+ [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = "8021Q_VLAN_DBL_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
+ [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
+ [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS",
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
+ [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
+ [VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
+ [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
+ [VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_PAYLOAD_S1_IP6] = "IP_PAYLOAD_S1_IP6",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
+ [VCAP_KF_ISDX_CLS] = "ISDX_CLS",
+ [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
+ [VCAP_KF_L2_BC_IS] = "L2_BC_IS",
+ [VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
+ [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
+ [VCAP_KF_L2_MAC] = "L2_MAC",
+ [VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
+ [VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
+ [VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
+ [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
+ [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
+ [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
+ [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_DIP_MSB] = "L3_IP6_DIP_MSB",
+ [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP6_SIP_MSB] = "L3_IP6_SIP_MSB",
+ [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
+ [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
+ [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_RT_IS] = "L3_RT_IS",
+ [VCAP_KF_L3_TOS] = "L3_TOS",
+ [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
+ [VCAP_KF_L4_ACK] = "L4_ACK",
+ [VCAP_KF_L4_DPORT] = "L4_DPORT",
+ [VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD",
+ [VCAP_KF_L4_PSH] = "L4_PSH",
+ [VCAP_KF_L4_RNG] = "L4_RNG",
+ [VCAP_KF_L4_RST] = "L4_RST",
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS",
+ [VCAP_KF_L4_SPORT] = "L4_SPORT",
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS",
+ [VCAP_KF_L4_SYN] = "L4_SYN",
+ [VCAP_KF_L4_URG] = "L4_URG",
+ [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
+ [VCAP_KF_LOOKUP_INDEX] = "LOOKUP_INDEX",
+ [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
+ [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PDU_TYPE] = "PDU_TYPE",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
+ [VCAP_KF_RTP_ID] = "RTP_ID",
+ [VCAP_KF_RT_FRMID] = "RT_FRMID",
+ [VCAP_KF_RT_TYPE] = "RT_TYPE",
+ [VCAP_KF_RT_VLAN_IDX] = "RT_VLAN_IDX",
+ [VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
+ [VCAP_KF_TYPE] = "TYPE",
+};
+
+/* Actionfield names */
+static const char * const vcap_actionfield_names[] = {
+ [VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
+ [VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
+ [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
+ [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_CUSTOM_ACE_TYPE_ENA] = "CUSTOM_ACE_TYPE_ENA",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DLR_SEL] = "DLR_SEL",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
+ [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
+ [VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ADD_VAL] = "ISDX_ADD_VAL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_ISDX_REPLACE_ENA] = "ISDX_REPLACE_ENA",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
+ [VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
+ [VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MATCH_ID] = "MATCH_ID",
+ [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_MRP_SEL] = "MRP_SEL",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_OAM_SEL] = "OAM_SEL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
+ [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
+ [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
+ [VCAP_AF_POLICE_ENA] = "POLICE_ENA",
+ [VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
+ [VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
+ [VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_SFID_ENA] = "SFID_ENA",
+ [VCAP_AF_SFID_VAL] = "SFID_VAL",
+ [VCAP_AF_SGID_ENA] = "SGID_ENA",
+ [VCAP_AF_SGID_VAL] = "SGID_VAL",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
+ [VCAP_AF_VID_REPLACE_ENA] = "VID_REPLACE_ENA",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
+ [VCAP_AF_VLAN_POP_CNT] = "VLAN_POP_CNT",
+ [VCAP_AF_VLAN_POP_CNT_ENA] = "VLAN_POP_CNT_ENA",
+};
+
+/* VCAPs */
+const struct vcap_info lan966x_vcaps[] = {
+ [VCAP_TYPE_IS1] = {
+ .name = "is1",
+ .rows = 192,
+ .sw_count = 4,
+ .sw_width = 96,
+ .sticky_width = 32,
+ .act_width = 123,
+ .default_cnt = 0,
+ .require_cnt_dis = 1,
+ .version = 1,
+ .keyfield_set = is1_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is1_keyfield_set),
+ .actionfield_set = is1_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is1_actionfield_set),
+ .keyfield_set_map = is1_keyfield_set_map,
+ .keyfield_set_map_size = is1_keyfield_set_map_size,
+ .actionfield_set_map = is1_actionfield_set_map,
+ .actionfield_set_map_size = is1_actionfield_set_map_size,
+ .keyfield_set_typegroups = is1_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is1_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_IS2] = {
+ .name = "is2",
+ .rows = 64,
+ .sw_count = 4,
+ .sw_width = 96,
+ .sticky_width = 32,
+ .act_width = 31,
+ .default_cnt = 11,
+ .require_cnt_dis = 1,
+ .version = 1,
+ .keyfield_set = is2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set),
+ .actionfield_set = is2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set),
+ .keyfield_set_map = is2_keyfield_set_map,
+ .keyfield_set_map_size = is2_keyfield_set_map_size,
+ .actionfield_set_map = is2_actionfield_set_map,
+ .actionfield_set_map_size = is2_actionfield_set_map_size,
+ .keyfield_set_typegroups = is2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is2_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES0] = {
+ .name = "es0",
+ .rows = 256,
+ .sw_count = 1,
+ .sw_width = 96,
+ .sticky_width = 1,
+ .act_width = 65,
+ .default_cnt = 8,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es0_keyfield_set),
+ .actionfield_set = es0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es0_actionfield_set),
+ .keyfield_set_map = es0_keyfield_set_map,
+ .keyfield_set_map_size = es0_keyfield_set_map_size,
+ .actionfield_set_map = es0_actionfield_set_map,
+ .actionfield_set_map_size = es0_actionfield_set_map_size,
+ .keyfield_set_typegroups = es0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es0_actionfield_set_typegroups,
+ },
+};
+
+const struct vcap_statistics lan966x_vcap_stats = {
+ .name = "lan966x",
+ .count = 3,
+ .keyfield_set_names = vcap_keyfield_set_names,
+ .actionfield_set_names = vcap_actionfield_set_names,
+ .keyfield_names = vcap_keyfield_names,
+ .actionfield_names = vcap_actionfield_names,
+};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h
new file mode 100644
index 0000000000..0d8bbee73b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef __LAN966X_VCAP_AG_API_H__
+#define __LAN966X_VCAP_AG_API_H__
+
+#include "vcap_api.h"
+
+extern const struct vcap_info lan966x_vcaps[];
+extern const struct vcap_statistics lan966x_vcap_stats;
+
+#endif /* __LAN966X_VCAP_AG_API_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
new file mode 100644
index 0000000000..ac525ff150
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "lan966x_vcap_ag_api.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+static void lan966x_vcap_is1_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ out->prf(out->dst, " port[%d] (%s): ", port->chip_port,
+ netdev_name(port->dev));
+
+ val = lan_rd(lan966x, ANA_VCAP_CFG(port->chip_port));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_VCAP_CFG_S1_ENA_GET(val))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ for (int l = 0; l < admin->lookups; ++l) {
+ out->prf(out->dst, "\n Lookup %d: ", l);
+
+ out->prf(out->dst, "\n other: ");
+ switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) {
+ case VCAP_IS1_PS_OTHER_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_OTHER_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_OTHER_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_OTHER_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+
+ out->prf(out->dst, "\n ipv4: ");
+ switch (ANA_VCAP_S1_CFG_KEY_IP4_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV4_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_IPV4_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_IPV4_5TUPLE_IP4:
+ out->prf(out->dst, "5tuple_ipv4");
+ break;
+ case VCAP_IS1_PS_IPV4_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_IPV4_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+
+ out->prf(out->dst, "\n ipv6: ");
+ switch (ANA_VCAP_S1_CFG_KEY_IP6_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV6_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_IPV6_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP4:
+ out->prf(out->dst, "5tuple_ip4");
+ break;
+ case VCAP_IS1_PS_IPV6_NORMAL_IP6:
+ out->prf(out->dst, "normal_ip6");
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP6:
+ out->prf(out->dst, "5tuple_ip6");
+ break;
+ case VCAP_IS1_PS_IPV6_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_IPV6_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+
+ out->prf(out->dst, "\n rt: ");
+ switch (ANA_VCAP_S1_CFG_KEY_RT_CFG_GET(val)) {
+ case VCAP_IS1_PS_RT_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_RT_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_RT_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_RT_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ case VCAP_IS1_PS_RT_FOLLOW_OTHER:
+ out->prf(out->dst, "follow_other");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+ }
+
+ out->prf(out->dst, "\n");
+}
+
+static void lan966x_vcap_is2_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ out->prf(out->dst, " port[%d] (%s): ", port->chip_port,
+ netdev_name(port->dev));
+
+ val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_VCAP_S2_CFG_ENA_GET(val))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ for (int l = 0; l < admin->lookups; ++l) {
+ out->prf(out->dst, "\n Lookup %d: ", l);
+
+ out->prf(out->dst, "\n snap: ");
+ if (ANA_VCAP_S2_CFG_SNAP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_llc");
+ else
+ out->prf(out->dst, "mac_snap");
+
+ out->prf(out->dst, "\n oam: ");
+ if (ANA_VCAP_S2_CFG_OAM_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "mac_oam");
+
+ out->prf(out->dst, "\n arp: ");
+ if (ANA_VCAP_S2_CFG_ARP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "mac_arp");
+
+ out->prf(out->dst, "\n ipv4_other: ");
+ if (ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "ip4_other");
+
+ out->prf(out->dst, "\n ipv4_tcp_udp: ");
+ if (ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "ipv4_tcp_udp");
+
+ out->prf(out->dst, "\n ipv6: ");
+ switch (ANA_VCAP_S2_CFG_IP6_CFG_GET(val) & (0x3 << l)) {
+ case VCAP_IS2_PS_IPV6_TCPUDP_OTHER:
+ out->prf(out->dst, "ipv6_tcp_udp ipv6_tcp_udp");
+ break;
+ case VCAP_IS2_PS_IPV6_STD:
+ out->prf(out->dst, "ipv6_std");
+ break;
+ case VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER:
+ out->prf(out->dst, "ipv4_tcp_udp ipv4_tcp_udp");
+ break;
+ case VCAP_IS2_PS_IPV6_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ }
+ }
+
+ out->prf(out->dst, "\n");
+}
+
+static void lan966x_vcap_es0_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ out->prf(out->dst, " port[%d] (%s): ", port->chip_port,
+ netdev_name(port->dev));
+
+ val = lan_rd(lan966x, REW_PORT_CFG(port->chip_port));
+ out->prf(out->dst, "\n state: ");
+ if (REW_PORT_CFG_ES0_EN_GET(val))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ out->prf(out->dst, "\n");
+}
+
+int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ const struct vcap_info *vcap;
+ struct vcap_control *vctrl;
+
+ vctrl = lan966x->vcap_ctrl;
+ vcap = &vctrl->vcaps[admin->vtype];
+
+ out->prf(out->dst, "%s:\n", vcap->name);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS2:
+ lan966x_vcap_is2_port_keys(port, admin, out);
+ break;
+ case VCAP_TYPE_IS1:
+ lan966x_vcap_is1_port_keys(port, admin, out);
+ break;
+ case VCAP_TYPE_ES0:
+ lan966x_vcap_es0_port_keys(port, admin, out);
+ break;
+ default:
+ out->prf(out->dst, " no info\n");
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
new file mode 100644
index 0000000000..a4414f63c9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "lan966x_vcap_ag_api.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "vcap_api_debugfs.h"
+
+#define STREAMSIZE (64 * 4)
+
+#define LAN966X_IS1_LOOKUPS 3
+#define LAN966X_IS2_LOOKUPS 2
+#define LAN966X_ES0_LOOKUPS 1
+
+#define LAN966X_STAT_ESDX_GRN_BYTES 0x300
+#define LAN966X_STAT_ESDX_GRN_PKTS 0x301
+#define LAN966X_STAT_ESDX_YEL_BYTES 0x302
+#define LAN966X_STAT_ESDX_YEL_PKTS 0x303
+
+static struct lan966x_vcap_inst {
+ enum vcap_type vtype; /* type of vcap */
+ int tgt_inst; /* hardware instance number */
+ int lookups; /* number of lookups in this vcap type */
+ int first_cid; /* first chain id in this vcap */
+ int last_cid; /* last chain id in this vcap */
+ int count; /* number of available addresses */
+ bool ingress; /* is vcap in the ingress path */
+} lan966x_vcap_inst_cfg[] = {
+ {
+ .vtype = VCAP_TYPE_ES0,
+ .tgt_inst = 0,
+ .lookups = LAN966X_ES0_LOOKUPS,
+ .first_cid = LAN966X_VCAP_CID_ES0_L0,
+ .last_cid = LAN966X_VCAP_CID_ES0_MAX,
+ .count = 64,
+ },
+ {
+ .vtype = VCAP_TYPE_IS1, /* IS1-0 */
+ .tgt_inst = 1,
+ .lookups = LAN966X_IS1_LOOKUPS,
+ .first_cid = LAN966X_VCAP_CID_IS1_L0,
+ .last_cid = LAN966X_VCAP_CID_IS1_MAX,
+ .count = 768,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-0 */
+ .tgt_inst = 2,
+ .lookups = LAN966X_IS2_LOOKUPS,
+ .first_cid = LAN966X_VCAP_CID_IS2_L0,
+ .last_cid = LAN966X_VCAP_CID_IS2_MAX,
+ .count = 256,
+ .ingress = true,
+ },
+};
+
+struct lan966x_vcap_cmd_cb {
+ struct lan966x *lan966x;
+ u32 instance;
+};
+
+static u32 lan966x_vcap_read_update_ctrl(const struct lan966x_vcap_cmd_cb *cb)
+{
+ return lan_rd(cb->lan966x, VCAP_UPDATE_CTRL(cb->instance));
+}
+
+static void lan966x_vcap_wait_update(struct lan966x *lan966x, int instance)
+{
+ const struct lan966x_vcap_cmd_cb cb = { .lan966x = lan966x,
+ .instance = instance };
+ u32 val;
+
+ readx_poll_timeout(lan966x_vcap_read_update_ctrl, &cb, val,
+ (val & VCAP_UPDATE_CTRL_UPDATE_SHOT) == 0, 10,
+ 100000);
+}
+
+static void __lan966x_vcap_range_init(struct lan966x *lan966x,
+ struct vcap_admin *admin,
+ u32 addr,
+ u32 count)
+{
+ lan_wr(VCAP_MV_CFG_MV_NUM_POS_SET(0) |
+ VCAP_MV_CFG_MV_SIZE_SET(count - 1),
+ lan966x, VCAP_MV_CFG(admin->tgt_inst));
+
+ lan_wr(VCAP_UPDATE_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_UPDATE_CTRL_UPDATE_SHOT_SET(1),
+ lan966x, VCAP_UPDATE_CTRL(admin->tgt_inst));
+
+ lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
+}
+
+static int lan966x_vcap_is1_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= LAN966X_VCAP_CID_IS1_L1 &&
+ cid < LAN966X_VCAP_CID_IS1_L2)
+ lookup = 1;
+ else if (cid >= LAN966X_VCAP_CID_IS1_L2 &&
+ cid < LAN966X_VCAP_CID_IS1_MAX)
+ lookup = 2;
+
+ return lookup;
+}
+
+static int lan966x_vcap_is2_cid_to_lookup(int cid)
+{
+ if (cid >= LAN966X_VCAP_CID_IS2_L1 &&
+ cid < LAN966X_VCAP_CID_IS2_MAX)
+ return 1;
+
+ return 0;
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int
+lan966x_vcap_is1_get_port_keysets(struct net_device *ndev, int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ val = lan_rd(lan966x, ANA_VCAP_S1_CFG(port->chip_port, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP) {
+ switch (ANA_VCAP_S1_CFG_KEY_IP4_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV4_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_7TUPLE);
+ break;
+ case VCAP_IS1_PS_IPV4_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_5TUPLE_IP4);
+ break;
+ case VCAP_IS1_PS_IPV4_NORMAL:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (ANA_VCAP_S1_CFG_KEY_IP6_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV6_NORMAL:
+ case VCAP_IS1_PS_IPV6_NORMAL_IP6:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL_IP6);
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP6:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_5TUPLE_IP6);
+ break;
+ case VCAP_IS1_PS_IPV6_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_7TUPLE);
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_5TUPLE_IP4);
+ break;
+ case VCAP_IS1_PS_IPV6_DMAC_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_DMAC_VID);
+ break;
+ }
+ }
+
+ switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) {
+ case VCAP_IS1_PS_OTHER_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_7TUPLE);
+ break;
+ case VCAP_IS1_PS_OTHER_NORMAL:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+lan966x_vcap_is2_get_port_keysets(struct net_device *dev, int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool found = false;
+ u32 val;
+
+ val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL)
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_SNAP) {
+ if (ANA_VCAP_S2_CFG_SNAP_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_LLC);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_SNAP);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_CFM) {
+ if (ANA_VCAP_S2_CFG_OAM_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_OAM);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
+ if (ANA_VCAP_S2_CFG_ARP_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP) {
+ if (ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+
+ if (ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (ANA_VCAP_S2_CFG_IP6_CFG_GET(val) & (0x3 << lookup)) {
+ case VCAP_IS2_PS_IPV6_TCPUDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_OTHER);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_TCP_UDP);
+ break;
+ case VCAP_IS2_PS_IPV6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ break;
+ case VCAP_IS2_PS_IPV6_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ }
+
+ found = true;
+ }
+
+ if (!found)
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+
+ return 0;
+}
+
+static enum vcap_keyfield_set
+lan966x_vcap_validate_keyset(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto)
+{
+ struct vcap_keyset_list keysetlist = {};
+ enum vcap_keyfield_set keysets[10] = {};
+ int lookup;
+ int err;
+
+ if (!kslist || kslist->cnt == 0)
+ return VCAP_KFS_NO_VALUE;
+
+ keysetlist.max = ARRAY_SIZE(keysets);
+ keysetlist.keysets = keysets;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ lookup = lan966x_vcap_is1_cid_to_lookup(rule->vcap_chain_id);
+ err = lan966x_vcap_is1_get_port_keysets(dev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = lan966x_vcap_is2_cid_to_lookup(rule->vcap_chain_id);
+ err = lan966x_vcap_is2_get_port_keysets(dev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_ES0:
+ return kslist->keysets[0];
+ default:
+ pr_err("vcap type: %s not supported\n",
+ lan966x_vcaps[admin->vtype].name);
+ return VCAP_KFS_NO_VALUE;
+ }
+
+ if (err)
+ return VCAP_KFS_NO_VALUE;
+
+ /* Check if there is a match and return the match */
+ for (int i = 0; i < kslist->cnt; ++i)
+ for (int j = 0; j < keysetlist.cnt; ++j)
+ if (kslist->keysets[i] == keysets[j])
+ return kslist->keysets[i];
+
+ return VCAP_KFS_NO_VALUE;
+}
+
+static bool lan966x_vcap_is2_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= LAN966X_VCAP_CID_IS2_L0 &&
+ rule->vcap_chain_id < LAN966X_VCAP_CID_IS2_L1);
+}
+
+static void lan966x_vcap_is1_add_default_fields(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ u32 value, mask;
+ u32 lookup;
+
+ if (vcap_rule_get_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK,
+ &value, &mask))
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0,
+ ~BIT(port->chip_port));
+
+ lookup = lan966x_vcap_is1_cid_to_lookup(rule->vcap_chain_id);
+ vcap_rule_add_key_u32(rule, VCAP_KF_LOOKUP_INDEX, lookup, 0x3);
+}
+
+static void lan966x_vcap_is2_add_default_fields(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ u32 value, mask;
+
+ if (vcap_rule_get_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK,
+ &value, &mask))
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0,
+ ~BIT(port->chip_port));
+
+ if (lan966x_vcap_is2_is_first_chain(rule))
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+static void lan966x_vcap_es0_add_default_fields(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_NO,
+ port->chip_port, GENMASK(4, 0));
+}
+
+static void lan966x_vcap_add_default_fields(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ lan966x_vcap_is1_add_default_fields(port, admin, rule);
+ break;
+ case VCAP_TYPE_IS2:
+ lan966x_vcap_is2_add_default_fields(port, admin, rule);
+ break;
+ case VCAP_TYPE_ES0:
+ lan966x_vcap_es0_add_default_fields(port, admin, rule);
+ break;
+ default:
+ pr_err("vcap type: %s not supported\n",
+ lan966x_vcaps[admin->vtype].name);
+ break;
+ }
+}
+
+static void lan966x_vcap_cache_erase(struct vcap_admin *admin)
+{
+ memset(admin->cache.keystream, 0, STREAMSIZE);
+ memset(admin->cache.maskstream, 0, STREAMSIZE);
+ memset(admin->cache.actionstream, 0, STREAMSIZE);
+ memset(&admin->cache.counter, 0, sizeof(admin->cache.counter));
+}
+
+/* The ESDX counter is only used/incremented if the frame has been classified
+ * with an ISDX > 0 (e.g by a rule in IS0). This is not mentioned in the
+ * datasheet.
+ */
+static void lan966x_es0_read_esdx_counter(struct lan966x *lan966x,
+ struct vcap_admin *admin, u32 id)
+{
+ u32 counter;
+
+ id = id & 0xff; /* counter limit */
+ mutex_lock(&lan966x->stats_lock);
+ lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
+ counter = lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)) +
+ lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
+ mutex_unlock(&lan966x->stats_lock);
+ if (counter)
+ admin->cache.counter = counter;
+}
+
+static void lan966x_es0_write_esdx_counter(struct lan966x *lan966x,
+ struct vcap_admin *admin, u32 id)
+{
+ id = id & 0xff; /* counter limit */
+
+ mutex_lock(&lan966x->stats_lock);
+ lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
+ lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_BYTES));
+ lan_wr(admin->cache.counter, lan966x,
+ SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS));
+ lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_BYTES));
+ lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static void lan966x_vcap_cache_write(struct net_device *dev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 *keystr, *mskstr, *actstr;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (int i = 0; i < count; ++i) {
+ lan_wr(keystr[i] & mskstr[i], lan966x,
+ VCAP_ENTRY_DAT(admin->tgt_inst, i));
+ lan_wr(~mskstr[i], lan966x,
+ VCAP_MASK_DAT(admin->tgt_inst, i));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (int i = 0; i < count; ++i)
+ lan_wr(actstr[i], lan966x,
+ VCAP_ACTION_DAT(admin->tgt_inst, i));
+ break;
+ case VCAP_SEL_COUNTER:
+ admin->cache.sticky = admin->cache.counter > 0;
+ lan_wr(admin->cache.counter, lan966x,
+ VCAP_CNT_DAT(admin->tgt_inst, 0));
+
+ if (admin->vtype == VCAP_TYPE_ES0)
+ lan966x_es0_write_esdx_counter(lan966x, admin, start);
+ break;
+ default:
+ break;
+ }
+}
+
+static void lan966x_vcap_cache_read(struct net_device *dev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int instance = admin->tgt_inst;
+ u32 *keystr, *mskstr, *actstr;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (int i = 0; i < count; ++i) {
+ keystr[i] =
+ lan_rd(lan966x, VCAP_ENTRY_DAT(instance, i));
+ mskstr[i] =
+ ~lan_rd(lan966x, VCAP_MASK_DAT(instance, i));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (int i = 0; i < count; ++i)
+ actstr[i] =
+ lan_rd(lan966x, VCAP_ACTION_DAT(instance, i));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ admin->cache.counter =
+ lan_rd(lan966x, VCAP_CNT_DAT(instance, 0));
+ admin->cache.sticky = admin->cache.counter > 0;
+
+ if (admin->vtype == VCAP_TYPE_ES0)
+ lan966x_es0_read_esdx_counter(lan966x, admin, start);
+ }
+}
+
+static void lan966x_vcap_range_init(struct net_device *dev,
+ struct vcap_admin *admin,
+ u32 addr,
+ u32 count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ __lan966x_vcap_range_init(lan966x, admin, addr, count);
+}
+
+static void lan966x_vcap_update(struct net_device *dev,
+ struct vcap_admin *admin,
+ enum vcap_command cmd,
+ enum vcap_selection sel,
+ u32 addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool clear;
+
+ clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ lan_wr(VCAP_MV_CFG_MV_NUM_POS_SET(0) |
+ VCAP_MV_CFG_MV_SIZE_SET(0),
+ lan966x, VCAP_MV_CFG(admin->tgt_inst));
+
+ lan_wr(VCAP_UPDATE_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_UPDATE_CTRL_UPDATE_SHOT,
+ lan966x, VCAP_UPDATE_CTRL(admin->tgt_inst));
+
+ lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
+}
+
+static void lan966x_vcap_move(struct net_device *dev,
+ struct vcap_admin *admin,
+ u32 addr, int offset, int count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ enum vcap_command cmd;
+ u16 mv_num_pos;
+ u16 mv_size;
+
+ mv_size = count - 1;
+ if (offset > 0) {
+ mv_num_pos = offset - 1;
+ cmd = VCAP_CMD_MOVE_DOWN;
+ } else {
+ mv_num_pos = -offset - 1;
+ cmd = VCAP_CMD_MOVE_UP;
+ }
+
+ lan_wr(VCAP_MV_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_MV_CFG_MV_SIZE_SET(mv_size),
+ lan966x, VCAP_MV_CFG(admin->tgt_inst));
+
+ lan_wr(VCAP_UPDATE_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_UPDATE_CTRL_UPDATE_SHOT,
+ lan966x, VCAP_UPDATE_CTRL(admin->tgt_inst));
+
+ lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
+}
+
+static struct vcap_operations lan966x_vcap_ops = {
+ .validate_keyset = lan966x_vcap_validate_keyset,
+ .add_default_fields = lan966x_vcap_add_default_fields,
+ .cache_erase = lan966x_vcap_cache_erase,
+ .cache_write = lan966x_vcap_cache_write,
+ .cache_read = lan966x_vcap_cache_read,
+ .init = lan966x_vcap_range_init,
+ .update = lan966x_vcap_update,
+ .move = lan966x_vcap_move,
+ .port_info = lan966x_vcap_port_info,
+};
+
+static void lan966x_vcap_admin_free(struct vcap_admin *admin)
+{
+ if (!admin)
+ return;
+
+ kfree(admin->cache.keystream);
+ kfree(admin->cache.maskstream);
+ kfree(admin->cache.actionstream);
+ mutex_destroy(&admin->lock);
+ kfree(admin);
+}
+
+static struct vcap_admin *
+lan966x_vcap_admin_alloc(struct lan966x *lan966x, struct vcap_control *ctrl,
+ const struct lan966x_vcap_inst *cfg)
+{
+ struct vcap_admin *admin;
+
+ admin = kzalloc(sizeof(*admin), GFP_KERNEL);
+ if (!admin)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&admin->lock);
+ INIT_LIST_HEAD(&admin->list);
+ INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+
+ admin->vtype = cfg->vtype;
+ admin->vinst = 0;
+ admin->ingress = cfg->ingress;
+ admin->w32be = true;
+ admin->tgt_inst = cfg->tgt_inst;
+
+ admin->lookups = cfg->lookups;
+ admin->lookups_per_instance = cfg->lookups;
+
+ admin->first_cid = cfg->first_cid;
+ admin->last_cid = cfg->last_cid;
+
+ admin->cache.keystream = kzalloc(STREAMSIZE, GFP_KERNEL);
+ admin->cache.maskstream = kzalloc(STREAMSIZE, GFP_KERNEL);
+ admin->cache.actionstream = kzalloc(STREAMSIZE, GFP_KERNEL);
+ if (!admin->cache.keystream ||
+ !admin->cache.maskstream ||
+ !admin->cache.actionstream) {
+ lan966x_vcap_admin_free(admin);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return admin;
+}
+
+static void lan966x_vcap_block_init(struct lan966x *lan966x,
+ struct vcap_admin *admin,
+ struct lan966x_vcap_inst *cfg)
+{
+ admin->first_valid_addr = 0;
+ admin->last_used_addr = cfg->count;
+ admin->last_valid_addr = cfg->count - 1;
+
+ lan_wr(VCAP_CORE_IDX_CORE_IDX_SET(0),
+ lan966x, VCAP_CORE_IDX(admin->tgt_inst));
+ lan_wr(VCAP_CORE_MAP_CORE_MAP_SET(1),
+ lan966x, VCAP_CORE_MAP(admin->tgt_inst));
+
+ __lan966x_vcap_range_init(lan966x, admin, admin->first_valid_addr,
+ admin->last_valid_addr -
+ admin->first_valid_addr);
+}
+
+static void lan966x_vcap_port_key_deselection(struct lan966x *lan966x,
+ struct vcap_admin *admin)
+{
+ u32 val;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ val = ANA_VCAP_S1_CFG_KEY_IP6_CFG_SET(VCAP_IS1_PS_IPV6_5TUPLE_IP6) |
+ ANA_VCAP_S1_CFG_KEY_IP4_CFG_SET(VCAP_IS1_PS_IPV4_5TUPLE_IP4) |
+ ANA_VCAP_S1_CFG_KEY_OTHER_CFG_SET(VCAP_IS1_PS_OTHER_NORMAL);
+
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ for (int l = 0; l < LAN966X_IS1_LOOKUPS; ++l)
+ lan_wr(val, lan966x, ANA_VCAP_S1_CFG(p, l));
+
+ lan_rmw(ANA_VCAP_CFG_S1_ENA_SET(true),
+ ANA_VCAP_CFG_S1_ENA, lan966x,
+ ANA_VCAP_CFG(p));
+ }
+
+ break;
+ case VCAP_TYPE_IS2:
+ for (int p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_wr(0, lan966x, ANA_VCAP_S2_CFG(p));
+
+ break;
+ case VCAP_TYPE_ES0:
+ for (int p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_rmw(REW_PORT_CFG_ES0_EN_SET(false),
+ REW_PORT_CFG_ES0_EN, lan966x,
+ REW_PORT_CFG(p));
+ break;
+ default:
+ pr_err("vcap type: %s not supported\n",
+ lan966x_vcaps[admin->vtype].name);
+ break;
+ }
+}
+
+int lan966x_vcap_init(struct lan966x *lan966x)
+{
+ struct lan966x_vcap_inst *cfg;
+ struct vcap_control *ctrl;
+ struct vcap_admin *admin;
+ struct dentry *dir;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->vcaps = lan966x_vcaps;
+ ctrl->stats = &lan966x_vcap_stats;
+ ctrl->ops = &lan966x_vcap_ops;
+
+ INIT_LIST_HEAD(&ctrl->list);
+ for (int i = 0; i < ARRAY_SIZE(lan966x_vcap_inst_cfg); ++i) {
+ cfg = &lan966x_vcap_inst_cfg[i];
+
+ admin = lan966x_vcap_admin_alloc(lan966x, ctrl, cfg);
+ if (IS_ERR(admin))
+ return PTR_ERR(admin);
+
+ lan966x_vcap_block_init(lan966x, admin, cfg);
+ lan966x_vcap_port_key_deselection(lan966x, admin);
+
+ list_add_tail(&admin->list, &ctrl->list);
+ }
+
+ dir = vcap_debugfs(lan966x->dev, lan966x->debugfs_root, ctrl);
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (lan966x->ports[p]) {
+ vcap_port_debugfs(lan966x->dev, dir, ctrl,
+ lan966x->ports[p]->dev);
+
+ lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(true),
+ ANA_VCAP_S2_CFG_ENA, lan966x,
+ ANA_VCAP_S2_CFG(lan966x->ports[p]->chip_port));
+
+ lan_rmw(ANA_VCAP_CFG_S1_ENA_SET(true),
+ ANA_VCAP_CFG_S1_ENA, lan966x,
+ ANA_VCAP_CFG(lan966x->ports[p]->chip_port));
+
+ lan_rmw(REW_PORT_CFG_ES0_EN_SET(true),
+ REW_PORT_CFG_ES0_EN, lan966x,
+ REW_PORT_CFG(lan966x->ports[p]->chip_port));
+ }
+ }
+
+ /* Statistics: Use ESDX from ES0 if hit, otherwise no counting */
+ lan_rmw(REW_STAT_CFG_STAT_MODE_SET(1),
+ REW_STAT_CFG_STAT_MODE, lan966x,
+ REW_STAT_CFG);
+
+ lan966x->vcap_ctrl = ctrl;
+
+ return 0;
+}
+
+void lan966x_vcap_deinit(struct lan966x *lan966x)
+{
+ struct vcap_admin *admin, *admin_next;
+ struct vcap_control *ctrl;
+
+ ctrl = lan966x->vcap_ctrl;
+ if (!ctrl)
+ return;
+
+ list_for_each_entry_safe(admin, admin_next, &ctrl->list, list) {
+ lan966x_vcap_port_key_deselection(lan966x, admin);
+ vcap_del_rules(ctrl, admin);
+ list_del(&admin->list);
+ lan966x_vcap_admin_free(admin);
+ }
+
+ kfree(ctrl);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
new file mode 100644
index 0000000000..3c44660128
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define VLANACCESS_CMD_IDLE 0
+#define VLANACCESS_CMD_READ 1
+#define VLANACCESS_CMD_WRITE 2
+#define VLANACCESS_CMD_INIT 3
+
+static int lan966x_vlan_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_VLANACCESS);
+}
+
+static int lan966x_vlan_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout(lan966x_vlan_get_status,
+ lan966x, val,
+ (val & ANA_VLANACCESS_VLAN_TBL_CMD) ==
+ VLANACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_vlan_set_mask(struct lan966x *lan966x, u16 vid)
+{
+ u16 mask = lan966x->vlan_mask[vid];
+ bool cpu_dis;
+
+ cpu_dis = !(mask & BIT(CPU_PORT));
+
+ /* Set flags and the VID to configure */
+ lan_rmw(ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(cpu_dis) |
+ ANA_VLANTIDX_V_INDEX_SET(vid),
+ ANA_VLANTIDX_VLAN_PGID_CPU_DIS |
+ ANA_VLANTIDX_V_INDEX,
+ lan966x, ANA_VLANTIDX);
+
+ /* Set the vlan port members mask */
+ lan_rmw(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(mask),
+ ANA_VLAN_PORT_MASK_VLAN_PORT_MASK,
+ lan966x, ANA_VLAN_PORT_MASK);
+
+ /* Issue a write command */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_WRITE),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+
+ if (lan966x_vlan_wait_for_completion(lan966x))
+ dev_err(lan966x->dev, "Vlan set mask failed\n");
+}
+
+static void lan966x_vlan_port_add_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] |= BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_port_del_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] &= ~BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static bool lan966x_vlan_port_any_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return !!(lan966x->vlan_mask[vid] & ~BIT(CPU_PORT));
+}
+
+static void lan966x_vlan_cpu_add_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] |= BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_del_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] &= ~BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_add_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __set_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static void lan966x_vlan_cpu_del_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __clear_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return test_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static u16 lan966x_vlan_port_get_pvid(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->bridge_mask & BIT(port->chip_port)))
+ return HOST_PVID;
+
+ return port->vlan_aware ? port->pvid : UNAWARE_PVID;
+}
+
+int lan966x_vlan_port_set_vid(struct lan966x_port *port, u16 vid,
+ bool pvid, bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Egress vlan classification */
+ if (untagged && port->vid != vid) {
+ if (port->vid) {
+ dev_err(lan966x->dev,
+ "Port already has a native VLAN: %d\n",
+ port->vid);
+ return -EBUSY;
+ }
+ port->vid = vid;
+ }
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ return 0;
+}
+
+static void lan966x_vlan_port_remove_vid(struct lan966x_port *port, u16 vid)
+{
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ if (port->vid == vid)
+ port->vid = 0;
+}
+
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware)
+{
+ port->vlan_aware = vlan_aware;
+}
+
+void lan966x_vlan_port_apply(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u16 pvid;
+ u32 val;
+
+ pvid = lan966x_vlan_port_get_pvid(port);
+
+ /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Default vlan to classify for untagged frames (may be zero) */
+ val = ANA_VLAN_CFG_VLAN_VID_SET(pvid);
+ if (port->vlan_aware)
+ val |= ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1);
+
+ lan_rmw(val,
+ ANA_VLAN_CFG_VLAN_VID | ANA_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_VLAN_CFG_VLAN_POP_CNT,
+ lan966x, ANA_VLAN_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(port->vlan_aware) |
+ DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(port->vlan_aware),
+ DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA,
+ lan966x, DEV_MAC_TAGS_CFG(port->chip_port));
+
+ /* Drop frames with multicast source address */
+ val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1);
+ if (port->vlan_aware && !pvid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val |= ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(1);
+
+ lan_wr(val, lan966x, ANA_DROP_CFG(port->chip_port));
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
+ val = REW_TAG_CFG_TAG_TPID_CFG_SET(0);
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CFG_TAG_CFG_SET(1);
+ else
+ val |= REW_TAG_CFG_TAG_CFG_SET(3);
+ }
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_TPID_CFG | REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+
+ /* Set default VLAN and tag type to 8021Q */
+ lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021Q) |
+ REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
+ REW_PORT_VLAN_CFG_PORT_TPID |
+ REW_PORT_VLAN_CFG_PORT_VID,
+ lan966x, REW_PORT_VLAN_CFG(port->chip_port));
+}
+
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* If the CPU(br) is already part of the vlan then add the fdb
+ * entries in MAC table to copy the frames to the CPU(br).
+ * If the CPU(br) is not part of the vlan then it would
+ * just drop the frames.
+ */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+ lan966x_mdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_port_set_vid(port, vid, pvid, untagged);
+ lan966x_vlan_port_add_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+}
+
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_vlan_port_remove_vid(port, vid);
+ lan966x_vlan_port_del_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+
+ /* In case there are no other ports in vlan then remove the CPU from
+ * that vlan but still keep it in the mask because it may be needed
+ * again then another port gets added in that vlan
+ */
+ if (!lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ lan966x_mdb_erase_entries(lan966x, vid);
+ }
+}
+
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Add an entry in the MAC table for the CPU
+ * Add the CPU part of the vlan only if there is another port in that
+ * vlan otherwise all the broadcast frames in that vlan will go to CPU
+ * even if none of the ports are in the vlan and then the CPU will just
+ * need to discard these frames. It is required to store this
+ * information so when a front port is added then it would add also the
+ * CPU port.
+ */
+ if (lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_mdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+}
+
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Remove the CPU part of the vlan */
+ lan966x_vlan_cpu_del_cpu_vlan_mask(lan966x, vid);
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ lan966x_mdb_erase_entries(lan966x, vid);
+}
+
+void lan966x_vlan_init(struct lan966x *lan966x)
+{
+ u16 port, vid;
+
+ /* Clear VLAN table, by default all ports are members of all VLANS */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_INIT),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+ lan966x_vlan_wait_for_completion(lan966x);
+
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ lan966x->vlan_mask[vid] = 0;
+ lan966x_vlan_set_mask(lan966x, vid);
+ }
+
+ /* Set all the ports + cpu to be part of HOST_PVID and UNAWARE_PVID */
+ lan966x->vlan_mask[HOST_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, HOST_PVID);
+
+ lan966x->vlan_mask[UNAWARE_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, UNAWARE_PVID);
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, UNAWARE_PVID);
+
+ /* Configure the CPU port to be vlan aware */
+ lan_wr(ANA_VLAN_CFG_VLAN_VID_SET(0) |
+ ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1),
+ lan966x, ANA_VLAN_CFG(CPU_PORT));
+
+ /* Set vlan ingress filter mask to all ports */
+ lan_wr(GENMASK(lan966x->num_phys_ports, 0),
+ lan966x, ANA_VLANMASK);
+
+ for (port = 0; port < lan966x->num_phys_ports; port++) {
+ lan_wr(0, lan966x, REW_PORT_VLAN_CFG(port));
+ lan_wr(0, lan966x, REW_TAG_CFG(port));
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
new file mode 100644
index 0000000000..9ee61db869
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/filter.h>
+
+#include "lan966x_main.h"
+
+static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct bpf_prog *old_prog;
+ bool old_xdp, new_xdp;
+ int err;
+
+ if (!lan966x->fdma) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Allow to set xdp only when using fdma");
+ return -EOPNOTSUPP;
+ }
+
+ old_xdp = lan966x_xdp_present(lan966x);
+ old_prog = xchg(&port->xdp_prog, xdp->prog);
+ new_xdp = lan966x_xdp_present(lan966x);
+
+ if (old_xdp == new_xdp)
+ goto out;
+
+ err = lan966x_fdma_reload_page_pool(lan966x);
+ if (err) {
+ xchg(&port->xdp_prog, old_prog);
+ return err;
+ }
+
+out:
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return lan966x_xdp_setup(dev, xdp);
+ default:
+ return -EINVAL;
+ }
+}
+
+int lan966x_xdp_xmit(struct net_device *dev,
+ int n,
+ struct xdp_frame **frames,
+ u32 flags)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int nxmit = 0;
+
+ for (int i = 0; i < n; ++i) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = lan966x_fdma_xmit_xdpf(port, xdpf, 0);
+ if (err)
+ break;
+
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
+int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
+{
+ struct bpf_prog *xdp_prog = port->xdp_prog;
+ struct lan966x *lan966x = port->lan966x;
+ struct xdp_buff xdp;
+ u32 act;
+
+ xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order,
+ &port->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(page),
+ IFH_LEN_BYTES + XDP_PACKET_HEADROOM,
+ data_len - IFH_LEN_BYTES, false);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ return FDMA_PASS;
+ case XDP_TX:
+ return lan966x_fdma_xmit_xdpf(port, page,
+ data_len - IFH_LEN_BYTES) ?
+ FDMA_DROP : FDMA_TX;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(port->dev, &xdp, xdp_prog))
+ return FDMA_DROP;
+
+ return FDMA_REDIRECT;
+ default:
+ bpf_warn_invalid_xdp_action(port->dev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(port->dev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ return FDMA_DROP;
+ }
+}
+
+bool lan966x_xdp_present(struct lan966x *lan966x)
+{
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ if (lan966x_xdp_port_present(lan966x->ports[p]))
+ return true;
+ }
+
+ return false;
+}
+
+int lan966x_xdp_port_init(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ return xdp_rxq_info_reg(&port->xdp_rxq, port->dev, 0,
+ lan966x->napi.napi_id);
+}
+
+void lan966x_xdp_port_deinit(struct lan966x_port *port)
+{
+ if (xdp_rxq_info_is_reg(&port->xdp_rxq))
+ xdp_rxq_info_unreg(&port->xdp_rxq);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
new file mode 100644
index 0000000000..f58c506bda
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -0,0 +1,25 @@
+config SPARX5_SWITCH
+ tristate "Sparx5 switch driver"
+ depends on NET_SWITCHDEV
+ depends on HAS_IOMEM
+ depends on OF
+ depends on ARCH_SPARX5 || COMPILE_TEST
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on BRIDGE || BRIDGE=n
+ select PHYLINK
+ select PHY_SPARX5_SERDES
+ select RESET_CONTROLLER
+ select VCAP
+ help
+ This driver supports the Sparx5 network switch device.
+
+config SPARX5_DCB
+ bool "Data Center Bridging (DCB) support"
+ depends on SPARX5_SWITCH && DCB
+ default y
+ help
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver. This can be used to assign priority to traffic, based on
+ DSCP and PCP.
+
+ If unsure, set to Y.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
new file mode 100644
index 0000000000..1cb1cc3f1a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip Sparx5 network device drivers.
+#
+
+obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
+
+sparx5-switch-y := sparx5_main.o sparx5_packet.o \
+ sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
+ sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
+ sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \
+ sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o \
+ sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o sparx5_psfp.o
+
+sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
+sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
+
+# Provide include files
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
new file mode 100644
index 0000000000..76a8bb596a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+/* QSYS calendar information */
+#define SPX5_PORTS_PER_CALREG 10 /* Ports mapped in a calendar register */
+#define SPX5_CALBITS_PER_PORT 3 /* Bit per port in calendar register */
+
+/* DSM calendar information */
+#define SPX5_DSM_CAL_LEN 64
+#define SPX5_DSM_CAL_EMPTY 0xFFFF
+#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
+#define SPX5_DSM_CAL_TAXIS 8
+#define SPX5_DSM_CAL_BW_LOSS 553
+
+#define SPX5_TAXI_PORT_MAX 70
+
+#define SPEED_12500 12500
+
+/* Maps from taxis to port numbers */
+static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] = {
+ {57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23},
+ {58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31},
+ {59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39},
+ {60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47},
+ {61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+ {62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+ {56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+ {64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+};
+
+struct sparx5_calendar_data {
+ u32 schedule[SPX5_DSM_CAL_LEN];
+ u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 new_slots[SPX5_DSM_CAL_LEN];
+ u32 temp_sched[SPX5_DSM_CAL_LEN];
+ u32 indices[SPX5_DSM_CAL_LEN];
+ u32 short_list[SPX5_DSM_CAL_LEN];
+ u32 long_list[SPX5_DSM_CAL_LEN];
+};
+
+static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
+{
+ switch (sparx5->target_ct) {
+ case SPX5_TARGET_CT_7546:
+ case SPX5_TARGET_CT_7546TSN:
+ return 65000;
+ case SPX5_TARGET_CT_7549:
+ case SPX5_TARGET_CT_7549TSN:
+ return 91000;
+ case SPX5_TARGET_CT_7552:
+ case SPX5_TARGET_CT_7552TSN:
+ return 129000;
+ case SPX5_TARGET_CT_7556:
+ case SPX5_TARGET_CT_7556TSN:
+ return 161000;
+ case SPX5_TARGET_CT_7558:
+ case SPX5_TARGET_CT_7558TSN:
+ return 201000;
+ default:
+ return 0;
+ }
+}
+
+/* This is used in calendar configuration */
+enum sparx5_cal_bw {
+ SPX5_CAL_SPEED_NONE = 0,
+ SPX5_CAL_SPEED_1G = 1,
+ SPX5_CAL_SPEED_2G5 = 2,
+ SPX5_CAL_SPEED_5G = 3,
+ SPX5_CAL_SPEED_10G = 4,
+ SPX5_CAL_SPEED_25G = 5,
+ SPX5_CAL_SPEED_0G5 = 6,
+ SPX5_CAL_SPEED_12G5 = 7
+};
+
+static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
+{
+ switch (cclock) {
+ case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */
+ case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */
+ case SPX5_CORE_CLOCK_625MHZ: return 208000; /* 625000 / 3 */
+ default: return 0;
+ }
+ return 0;
+}
+
+static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
+{
+ switch (speed) {
+ case SPX5_CAL_SPEED_1G: return 1000;
+ case SPX5_CAL_SPEED_2G5: return 2500;
+ case SPX5_CAL_SPEED_5G: return 5000;
+ case SPX5_CAL_SPEED_10G: return 10000;
+ case SPX5_CAL_SPEED_25G: return 25000;
+ case SPX5_CAL_SPEED_0G5: return 500;
+ case SPX5_CAL_SPEED_12G5: return 12500;
+ default: return 0;
+ }
+}
+
+static u32 sparx5_bandwidth_to_calendar(u32 bw)
+{
+ switch (bw) {
+ case SPEED_10: return SPX5_CAL_SPEED_0G5;
+ case SPEED_100: return SPX5_CAL_SPEED_0G5;
+ case SPEED_1000: return SPX5_CAL_SPEED_1G;
+ case SPEED_2500: return SPX5_CAL_SPEED_2G5;
+ case SPEED_5000: return SPX5_CAL_SPEED_5G;
+ case SPEED_10000: return SPX5_CAL_SPEED_10G;
+ case SPEED_12500: return SPX5_CAL_SPEED_12G5;
+ case SPEED_25000: return SPX5_CAL_SPEED_25G;
+ case SPEED_UNKNOWN: return SPX5_CAL_SPEED_1G;
+ default: return SPX5_CAL_SPEED_NONE;
+ }
+}
+
+static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5,
+ u32 portno)
+{
+ struct sparx5_port *port;
+
+ if (portno >= SPX5_PORTS) {
+ /* Internal ports */
+ if (portno == SPX5_PORT_CPU_0 || portno == SPX5_PORT_CPU_1) {
+ /* Equals 1.25G */
+ return SPX5_CAL_SPEED_2G5;
+ } else if (portno == SPX5_PORT_VD0) {
+ /* IPMC only idle BW */
+ return SPX5_CAL_SPEED_NONE;
+ } else if (portno == SPX5_PORT_VD1) {
+ /* OAM only idle BW */
+ return SPX5_CAL_SPEED_NONE;
+ } else if (portno == SPX5_PORT_VD2) {
+ /* IPinIP gets only idle BW */
+ return SPX5_CAL_SPEED_NONE;
+ }
+ /* not in port map */
+ return SPX5_CAL_SPEED_NONE;
+ }
+ /* Front ports - may be used */
+ port = sparx5->ports[portno];
+ if (!port)
+ return SPX5_CAL_SPEED_NONE;
+ return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
+}
+
+/* Auto configure the QSYS calendar based on port configuration */
+int sparx5_config_auto_calendar(struct sparx5 *sparx5)
+{
+ u32 cal[7], value, idx, portno;
+ u32 max_core_bw;
+ u32 total_bw = 0, used_port_bw = 0;
+ int err = 0;
+ enum sparx5_cal_bw spd;
+
+ memset(cal, 0, sizeof(cal));
+
+ max_core_bw = sparx5_clk_to_bandwidth(sparx5->coreclock);
+ if (max_core_bw == 0) {
+ dev_err(sparx5->dev, "Core clock not supported");
+ return -EINVAL;
+ }
+
+ /* Setup the calendar with the bandwidth to each port */
+ for (portno = 0; portno < SPX5_PORTS_ALL; portno++) {
+ u64 reg, offset, this_bw;
+
+ spd = sparx5_get_port_cal_speed(sparx5, portno);
+ if (spd == SPX5_CAL_SPEED_NONE)
+ continue;
+
+ this_bw = sparx5_cal_speed_to_value(spd);
+ if (portno < SPX5_PORTS)
+ used_port_bw += this_bw;
+ else
+ /* Internal ports are granted half the value */
+ this_bw = this_bw / 2;
+ total_bw += this_bw;
+ reg = portno;
+ offset = do_div(reg, SPX5_PORTS_PER_CALREG);
+ cal[reg] |= spd << (offset * SPX5_CALBITS_PER_PORT);
+ }
+
+ if (used_port_bw > sparx5_target_bandwidth(sparx5)) {
+ dev_err(sparx5->dev,
+ "Port BW %u above target BW %u\n",
+ used_port_bw, sparx5_target_bandwidth(sparx5));
+ return -EINVAL;
+ }
+
+ if (total_bw > max_core_bw) {
+ dev_err(sparx5->dev,
+ "Total BW %u above switch core BW %u\n",
+ total_bw, max_core_bw);
+ return -EINVAL;
+ }
+
+ /* Halt the calendar while changing it */
+ spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
+ QSYS_CAL_CTRL_CAL_MODE,
+ sparx5, QSYS_CAL_CTRL);
+
+ /* Assign port bandwidth to auto calendar */
+ for (idx = 0; idx < ARRAY_SIZE(cal); idx++)
+ spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx));
+
+ /* Increase grant rate of all ports to account for
+ * core clock ppm deviations
+ */
+ spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */
+ QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE,
+ sparx5,
+ QSYS_CAL_CTRL);
+
+ /* Grant idle usage to VD 0-2 */
+ for (idx = 2; idx < 5; idx++)
+ spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12),
+ sparx5,
+ HSCH_OUTB_SHARE_ENA(idx));
+
+ /* Enable Auto mode */
+ spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8),
+ QSYS_CAL_CTRL_CAL_MODE,
+ sparx5, QSYS_CAL_CTRL);
+
+ /* Verify successful calendar config */
+ value = spx5_rd(sparx5, QSYS_CAL_CTRL);
+ if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value)) {
+ dev_err(sparx5->dev, "QSYS calendar error\n");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static u32 sparx5_dsm_exb_gcd(u32 a, u32 b)
+{
+ if (b == 0)
+ return a;
+ return sparx5_dsm_exb_gcd(b, a % b);
+}
+
+static u32 sparx5_dsm_cal_len(u32 *cal)
+{
+ u32 idx = 0, len = 0;
+
+ while (idx < SPX5_DSM_CAL_LEN) {
+ if (cal[idx] != SPX5_DSM_CAL_EMPTY)
+ len++;
+ idx++;
+ }
+ return len;
+}
+
+static u32 sparx5_dsm_cp_cal(u32 *sched)
+{
+ u32 idx = 0, tmp;
+
+ while (idx < SPX5_DSM_CAL_LEN) {
+ if (sched[idx] != SPX5_DSM_CAL_EMPTY) {
+ tmp = sched[idx];
+ sched[idx] = SPX5_DSM_CAL_EMPTY;
+ return tmp;
+ }
+ idx++;
+ }
+ return SPX5_DSM_CAL_EMPTY;
+}
+
+static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data)
+{
+ bool slow_mode;
+ u32 gcd, idx, sum, min, factor;
+ u32 num_of_slots, slot_spd, empty_slots;
+ u32 taxi_bw, clk_period_ps;
+
+ clk_period_ps = sparx5_clk_period(sparx5->coreclock);
+ taxi_bw = 128 * 1000000 / clk_period_ps;
+ slow_mode = !!(clk_period_ps > 2000);
+ memcpy(data->taxi_ports, &sparx5_taxi_ports[taxi],
+ sizeof(data->taxi_ports));
+
+ for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
+ data->new_slots[idx] = SPX5_DSM_CAL_EMPTY;
+ data->schedule[idx] = SPX5_DSM_CAL_EMPTY;
+ data->temp_sched[idx] = SPX5_DSM_CAL_EMPTY;
+ }
+ /* Default empty calendar */
+ data->schedule[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
+
+ /* Map ports to taxi positions */
+ for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) {
+ u32 portno = data->taxi_ports[idx];
+
+ if (portno < SPX5_TAXI_PORT_MAX) {
+ data->taxi_speeds[idx] = sparx5_cal_speed_to_value
+ (sparx5_get_port_cal_speed(sparx5, portno));
+ } else {
+ data->taxi_speeds[idx] = 0;
+ }
+ }
+
+ sum = 0;
+ min = 25000;
+ for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
+ u32 jdx;
+
+ sum += data->taxi_speeds[idx];
+ if (data->taxi_speeds[idx] && data->taxi_speeds[idx] < min)
+ min = data->taxi_speeds[idx];
+ gcd = min;
+ for (jdx = 0; jdx < ARRAY_SIZE(data->taxi_speeds); jdx++)
+ gcd = sparx5_dsm_exb_gcd(gcd, data->taxi_speeds[jdx]);
+ }
+ if (sum == 0) /* Empty calendar */
+ return 0;
+ /* Make room for overhead traffic */
+ factor = 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS);
+
+ if (sum * factor > (taxi_bw * 1000)) {
+ dev_err(sparx5->dev,
+ "Taxi %u, Requested BW %u above available BW %u\n",
+ taxi, sum, taxi_bw);
+ return -EINVAL;
+ }
+ for (idx = 0; idx < 4; idx++) {
+ u32 raw_spd;
+
+ if (idx == 0)
+ raw_spd = gcd / 5;
+ else if (idx == 1)
+ raw_spd = gcd / 2;
+ else if (idx == 2)
+ raw_spd = gcd;
+ else
+ raw_spd = min;
+ slot_spd = raw_spd * factor / 1000;
+ num_of_slots = taxi_bw / slot_spd;
+ if (num_of_slots <= 64)
+ break;
+ }
+
+ num_of_slots = num_of_slots > 64 ? 64 : num_of_slots;
+ slot_spd = taxi_bw / num_of_slots;
+
+ sum = 0;
+ for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
+ u32 spd = data->taxi_speeds[idx];
+ u32 adjusted_speed = data->taxi_speeds[idx] * factor / 1000;
+
+ if (adjusted_speed > 0) {
+ data->avg_dist[idx] = (128 * 1000000 * 10) /
+ (adjusted_speed * clk_period_ps);
+ } else {
+ data->avg_dist[idx] = -1;
+ }
+ data->dev_slots[idx] = ((spd * factor / slot_spd) + 999) / 1000;
+ if (spd != 25000 && (spd != 10000 || !slow_mode)) {
+ if (num_of_slots < (5 * data->dev_slots[idx])) {
+ dev_err(sparx5->dev,
+ "Taxi %u, speed %u, Low slot sep.\n",
+ taxi, spd);
+ return -EINVAL;
+ }
+ }
+ sum += data->dev_slots[idx];
+ if (sum > num_of_slots) {
+ dev_err(sparx5->dev,
+ "Taxi %u with overhead factor %u\n",
+ taxi, factor);
+ return -EINVAL;
+ }
+ }
+
+ empty_slots = num_of_slots - sum;
+
+ for (idx = 0; idx < empty_slots; idx++)
+ data->schedule[idx] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
+
+ for (idx = 1; idx < num_of_slots; idx++) {
+ u32 indices_len = 0;
+ u32 slot, jdx, kdx, ts;
+ s32 cnt;
+ u32 num_of_old_slots, num_of_new_slots, tgt_score;
+
+ for (slot = 0; slot < ARRAY_SIZE(data->dev_slots); slot++) {
+ if (data->dev_slots[slot] == idx) {
+ data->indices[indices_len] = slot;
+ indices_len++;
+ }
+ }
+ if (indices_len == 0)
+ continue;
+ kdx = 0;
+ for (slot = 0; slot < idx; slot++) {
+ for (jdx = 0; jdx < indices_len; jdx++, kdx++)
+ data->new_slots[kdx] = data->indices[jdx];
+ }
+
+ for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+ if (data->schedule[slot] == SPX5_DSM_CAL_EMPTY)
+ break;
+ }
+
+ num_of_old_slots = slot;
+ num_of_new_slots = kdx;
+ cnt = 0;
+ ts = 0;
+
+ if (num_of_new_slots > num_of_old_slots) {
+ memcpy(data->short_list, data->schedule,
+ sizeof(data->short_list));
+ memcpy(data->long_list, data->new_slots,
+ sizeof(data->long_list));
+ tgt_score = 100000 * num_of_old_slots /
+ num_of_new_slots;
+ } else {
+ memcpy(data->short_list, data->new_slots,
+ sizeof(data->short_list));
+ memcpy(data->long_list, data->schedule,
+ sizeof(data->long_list));
+ tgt_score = 100000 * num_of_new_slots /
+ num_of_old_slots;
+ }
+
+ while (sparx5_dsm_cal_len(data->short_list) > 0 ||
+ sparx5_dsm_cal_len(data->long_list) > 0) {
+ u32 act = 0;
+
+ if (sparx5_dsm_cal_len(data->short_list) > 0) {
+ data->temp_sched[ts] =
+ sparx5_dsm_cp_cal(data->short_list);
+ ts++;
+ cnt += 100000;
+ act = 1;
+ }
+ while (sparx5_dsm_cal_len(data->long_list) > 0 &&
+ cnt > 0) {
+ data->temp_sched[ts] =
+ sparx5_dsm_cp_cal(data->long_list);
+ ts++;
+ cnt -= tgt_score;
+ act = 1;
+ }
+ if (act == 0) {
+ dev_err(sparx5->dev,
+ "Error in DSM calendar calculation\n");
+ return -EINVAL;
+ }
+ }
+
+ for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+ if (data->temp_sched[slot] == SPX5_DSM_CAL_EMPTY)
+ break;
+ }
+ for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+ data->schedule[slot] = data->temp_sched[slot];
+ data->temp_sched[slot] = SPX5_DSM_CAL_EMPTY;
+ data->new_slots[slot] = SPX5_DSM_CAL_EMPTY;
+ }
+ }
+ return 0;
+}
+
+static int sparx5_dsm_calendar_check(struct sparx5 *sparx5,
+ struct sparx5_calendar_data *data)
+{
+ u32 num_of_slots, idx, port;
+ int cnt, max_dist;
+ u32 slot_indices[SPX5_DSM_CAL_LEN], distances[SPX5_DSM_CAL_LEN];
+ u32 cal_length = sparx5_dsm_cal_len(data->schedule);
+
+ for (port = 0; port < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; port++) {
+ num_of_slots = 0;
+ max_dist = data->avg_dist[port];
+ for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
+ slot_indices[idx] = SPX5_DSM_CAL_EMPTY;
+ distances[idx] = SPX5_DSM_CAL_EMPTY;
+ }
+
+ for (idx = 0; idx < cal_length; idx++) {
+ if (data->schedule[idx] == port) {
+ slot_indices[num_of_slots] = idx;
+ num_of_slots++;
+ }
+ }
+
+ slot_indices[num_of_slots] = slot_indices[0] + cal_length;
+
+ for (idx = 0; idx < num_of_slots; idx++) {
+ distances[idx] = (slot_indices[idx + 1] -
+ slot_indices[idx]) * 10;
+ }
+
+ for (idx = 0; idx < num_of_slots; idx++) {
+ u32 jdx, kdx;
+
+ cnt = distances[idx] - max_dist;
+ if (cnt < 0)
+ cnt = -cnt;
+ kdx = 0;
+ for (jdx = (idx + 1) % num_of_slots;
+ jdx != idx;
+ jdx = (jdx + 1) % num_of_slots, kdx++) {
+ cnt = cnt + distances[jdx] - max_dist;
+ if (cnt < 0)
+ cnt = -cnt;
+ if (cnt > max_dist)
+ goto check_err;
+ }
+ }
+ }
+ return 0;
+check_err:
+ dev_err(sparx5->dev,
+ "Port %u: distance %u above limit %d\n",
+ port, cnt, max_dist);
+ return -EINVAL;
+}
+
+static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data)
+{
+ u32 idx;
+ u32 cal_len = sparx5_dsm_cal_len(data->schedule), len;
+
+ spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
+ for (idx = 0; idx < cal_len; idx++) {
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx),
+ DSM_TAXI_CAL_CFG_CAL_IDX,
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data->schedule[idx]),
+ DSM_TAXI_CAL_CFG_CAL_PGM_VAL,
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
+ }
+ spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
+ len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5,
+ DSM_TAXI_CAL_CFG(taxi)));
+ if (len != cal_len - 1)
+ goto update_err;
+ return 0;
+update_err:
+ dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len);
+ return -EINVAL;
+}
+
+/* Configure the DSM calendar based on port configuration */
+int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
+{
+ int taxi;
+ struct sparx5_calendar_data *data;
+ int err = 0;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ for (taxi = 0; taxi < SPX5_DSM_CAL_TAXIS; ++taxi) {
+ err = sparx5_dsm_calendar_calc(sparx5, taxi, data);
+ if (err) {
+ dev_err(sparx5->dev, "DSM calendar calculation failed\n");
+ goto cal_out;
+ }
+ err = sparx5_dsm_calendar_check(sparx5, data);
+ if (err) {
+ dev_err(sparx5->dev, "DSM calendar check failed\n");
+ goto cal_out;
+ }
+ err = sparx5_dsm_calendar_update(sparx5, taxi, data);
+ if (err) {
+ dev_err(sparx5->dev, "DSM calendar update failed\n");
+ goto cal_out;
+ }
+ }
+cal_out:
+ kfree(data);
+ return err;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
new file mode 100644
index 0000000000..2d763664dc
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/dcbnl.h>
+
+#include "sparx5_port.h"
+
+enum sparx5_dcb_apptrust_values {
+ SPARX5_DCB_APPTRUST_EMPTY,
+ SPARX5_DCB_APPTRUST_DSCP,
+ SPARX5_DCB_APPTRUST_PCP,
+ SPARX5_DCB_APPTRUST_DSCP_PCP,
+ __SPARX5_DCB_APPTRUST_MAX
+};
+
+static const struct sparx5_dcb_apptrust {
+ u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1];
+ int nselectors;
+} *sparx5_port_apptrust[SPX5_PORTS];
+
+static const char *sparx5_dcb_apptrust_names[__SPARX5_DCB_APPTRUST_MAX] = {
+ [SPARX5_DCB_APPTRUST_EMPTY] = "empty",
+ [SPARX5_DCB_APPTRUST_DSCP] = "dscp",
+ [SPARX5_DCB_APPTRUST_PCP] = "pcp",
+ [SPARX5_DCB_APPTRUST_DSCP_PCP] = "dscp pcp"
+};
+
+/* Sparx5 supported apptrust policies */
+static const struct sparx5_dcb_apptrust
+ sparx5_dcb_apptrust_policies[__SPARX5_DCB_APPTRUST_MAX] = {
+ /* Empty *must* be first */
+ [SPARX5_DCB_APPTRUST_EMPTY] = { { 0 }, 0 },
+ [SPARX5_DCB_APPTRUST_DSCP] = { { IEEE_8021QAZ_APP_SEL_DSCP }, 1 },
+ [SPARX5_DCB_APPTRUST_PCP] = { { DCB_APP_SEL_PCP }, 1 },
+ [SPARX5_DCB_APPTRUST_DSCP_PCP] = { { IEEE_8021QAZ_APP_SEL_DSCP,
+ DCB_APP_SEL_PCP }, 2 },
+};
+
+/* Validate app entry.
+ *
+ * Check for valid selectors and valid protocol and priority ranges.
+ */
+static int sparx5_dcb_app_validate(struct net_device *dev,
+ const struct dcb_app *app)
+{
+ int err = 0;
+
+ switch (app->selector) {
+ /* Default priority checks */
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ if (app->protocol != 0)
+ err = -EINVAL;
+ else if (app->priority >= SPX5_PRIOS)
+ err = -ERANGE;
+ break;
+ /* Dscp checks */
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ if (app->protocol >= SPARX5_PORT_QOS_DSCP_COUNT)
+ err = -EINVAL;
+ else if (app->priority >= SPX5_PRIOS)
+ err = -ERANGE;
+ break;
+ /* Pcp checks */
+ case DCB_APP_SEL_PCP:
+ if (app->protocol >= SPARX5_PORT_QOS_PCP_DEI_COUNT)
+ err = -EINVAL;
+ else if (app->priority >= SPX5_PRIOS)
+ err = -ERANGE;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ netdev_err(dev, "Invalid entry: %d:%d\n", app->protocol,
+ app->priority);
+
+ return err;
+}
+
+/* Validate apptrust configuration.
+ *
+ * Return index of supported apptrust configuration if valid, otherwise return
+ * error.
+ */
+static int sparx5_dcb_apptrust_validate(struct net_device *dev, u8 *selectors,
+ int nselectors, int *err)
+{
+ bool match = false;
+ int i, ii;
+
+ for (i = 0; i < ARRAY_SIZE(sparx5_dcb_apptrust_policies); i++) {
+ if (sparx5_dcb_apptrust_policies[i].nselectors != nselectors)
+ continue;
+ match = true;
+ for (ii = 0; ii < nselectors; ii++) {
+ if (sparx5_dcb_apptrust_policies[i].selectors[ii] !=
+ *(selectors + ii)) {
+ match = false;
+ break;
+ }
+ }
+ if (match)
+ break;
+ }
+
+ /* Requested trust configuration is not supported */
+ if (!match) {
+ netdev_err(dev, "Valid apptrust configurations are:\n");
+ for (i = 0; i < ARRAY_SIZE(sparx5_dcb_apptrust_names); i++)
+ pr_info("order: %s\n", sparx5_dcb_apptrust_names[i]);
+ *err = -EOPNOTSUPP;
+ }
+
+ return i;
+}
+
+static bool sparx5_dcb_apptrust_contains(int portno, u8 selector)
+{
+ const struct sparx5_dcb_apptrust *conf = sparx5_port_apptrust[portno];
+ int i;
+
+ for (i = 0; i < conf->nselectors; i++)
+ if (conf->selectors[i] == selector)
+ return true;
+
+ return false;
+}
+
+static int sparx5_dcb_app_update(struct net_device *dev)
+{
+ struct dcb_ieee_app_prio_map dscp_rewr_map = {0};
+ struct dcb_rewr_prio_pcp_map pcp_rewr_map = {0};
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5_port_qos_dscp_map *dscp_map;
+ struct sparx5_port_qos_pcp_map *pcp_map;
+ struct sparx5_port_qos qos = {0};
+ struct dcb_app app_itr = {0};
+ int portno = port->portno;
+ bool dscp_rewr = false;
+ bool pcp_rewr = false;
+ u16 dscp;
+ int i;
+
+ dscp_map = &qos.dscp.map;
+ pcp_map = &qos.pcp.map;
+
+ /* Get default prio. */
+ qos.default_prio = dcb_ieee_getapp_default_prio_mask(dev);
+ if (qos.default_prio)
+ qos.default_prio = fls(qos.default_prio) - 1;
+
+ /* Get dscp ingress mapping */
+ for (i = 0; i < ARRAY_SIZE(dscp_map->map); i++) {
+ app_itr.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ app_itr.protocol = i;
+ dscp_map->map[i] = dcb_getapp(dev, &app_itr);
+ }
+
+ /* Get pcp ingress mapping */
+ for (i = 0; i < ARRAY_SIZE(pcp_map->map); i++) {
+ app_itr.selector = DCB_APP_SEL_PCP;
+ app_itr.protocol = i;
+ pcp_map->map[i] = dcb_getapp(dev, &app_itr);
+ }
+
+ /* Get pcp rewrite mapping */
+ dcb_getrewr_prio_pcp_mask_map(dev, &pcp_rewr_map);
+ for (i = 0; i < ARRAY_SIZE(pcp_rewr_map.map); i++) {
+ if (!pcp_rewr_map.map[i])
+ continue;
+ pcp_rewr = true;
+ qos.pcp_rewr.map.map[i] = fls(pcp_rewr_map.map[i]) - 1;
+ }
+
+ /* Get dscp rewrite mapping */
+ dcb_getrewr_prio_dscp_mask_map(dev, &dscp_rewr_map);
+ for (i = 0; i < ARRAY_SIZE(dscp_rewr_map.map); i++) {
+ if (!dscp_rewr_map.map[i])
+ continue;
+
+ /* The rewrite table of the switch has 32 entries; one for each
+ * priority for each DP level. Currently, the rewrite map does
+ * not indicate DP level, so we map classified QoS class to
+ * classified DSCP, for each classified DP level. Rewrite of
+ * DSCP is only enabled, if we have active mappings.
+ */
+ dscp_rewr = true;
+ dscp = fls64(dscp_rewr_map.map[i]) - 1;
+ qos.dscp_rewr.map.map[i] = dscp; /* DP 0 */
+ qos.dscp_rewr.map.map[i + 8] = dscp; /* DP 1 */
+ qos.dscp_rewr.map.map[i + 16] = dscp; /* DP 2 */
+ qos.dscp_rewr.map.map[i + 24] = dscp; /* DP 3 */
+ }
+
+ /* Enable use of pcp for queue classification ? */
+ if (sparx5_dcb_apptrust_contains(portno, DCB_APP_SEL_PCP)) {
+ qos.pcp.qos_enable = true;
+ qos.pcp.dp_enable = qos.pcp.qos_enable;
+ /* Enable rewrite of PCP and DEI if PCP is trusted *and* rewrite
+ * table is not empty.
+ */
+ if (pcp_rewr)
+ qos.pcp_rewr.enable = true;
+ }
+
+ /* Enable use of dscp for queue classification ? */
+ if (sparx5_dcb_apptrust_contains(portno, IEEE_8021QAZ_APP_SEL_DSCP)) {
+ qos.dscp.qos_enable = true;
+ qos.dscp.dp_enable = qos.dscp.qos_enable;
+ if (dscp_rewr)
+ /* Do not enable rewrite if no mappings are active, as
+ * classified DSCP will then be zero for all classified
+ * QoS class and DP combinations.
+ */
+ qos.dscp_rewr.enable = true;
+ }
+
+ return sparx5_port_qos_set(port, &qos);
+}
+
+/* Set or delete DSCP app entry.
+ *
+ * DSCP mapping is global for all ports, so set and delete app entries are
+ * replicated for each port.
+ */
+static int sparx5_dcb_ieee_dscp_setdel(struct net_device *dev,
+ struct dcb_app *app,
+ int (*setdel)(struct net_device *,
+ struct dcb_app *))
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5_port *port_itr;
+ int err, i;
+
+ for (i = 0; i < SPX5_PORTS; i++) {
+ port_itr = port->sparx5->ports[i];
+ if (!port_itr)
+ continue;
+ err = setdel(port_itr->ndev, app);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
+ else
+ err = dcb_ieee_delapp(dev, app);
+
+ if (err < 0)
+ return err;
+
+ return sparx5_dcb_app_update(dev);
+}
+
+static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app app_itr;
+ int err = 0;
+ u8 prio;
+
+ err = sparx5_dcb_app_validate(dev, app);
+ if (err)
+ goto out;
+
+ /* Delete current mapping, if it exists */
+ prio = dcb_getapp(dev, app);
+ if (prio) {
+ app_itr = *app;
+ app_itr.priority = prio;
+ sparx5_dcb_ieee_delapp(dev, &app_itr);
+ }
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_setapp);
+ else
+ err = dcb_ieee_setapp(dev, app);
+
+ if (err)
+ goto out;
+
+ sparx5_dcb_app_update(dev);
+
+out:
+ return err;
+}
+
+static int sparx5_dcb_setapptrust(struct net_device *dev, u8 *selectors,
+ int nselectors)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ int err = 0, idx;
+
+ idx = sparx5_dcb_apptrust_validate(dev, selectors, nselectors, &err);
+ if (err < 0)
+ return err;
+
+ sparx5_port_apptrust[port->portno] = &sparx5_dcb_apptrust_policies[idx];
+
+ return sparx5_dcb_app_update(dev);
+}
+
+static int sparx5_dcb_getapptrust(struct net_device *dev, u8 *selectors,
+ int *nselectors)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ const struct sparx5_dcb_apptrust *trust;
+
+ trust = sparx5_port_apptrust[port->portno];
+
+ memcpy(selectors, trust->selectors, trust->nselectors);
+ *nselectors = trust->nselectors;
+
+ return 0;
+}
+
+static int sparx5_dcb_delrewr(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_delrewr);
+ else
+ err = dcb_delrewr(dev, app);
+
+ if (err < 0)
+ return err;
+
+ return sparx5_dcb_app_update(dev);
+}
+
+static int sparx5_dcb_setrewr(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app app_itr;
+ int err = 0;
+ u16 proto;
+
+ err = sparx5_dcb_app_validate(dev, app);
+ if (err)
+ goto out;
+
+ /* Delete current mapping, if it exists. */
+ proto = dcb_getrewr(dev, app);
+ if (proto) {
+ app_itr = *app;
+ app_itr.protocol = proto;
+ sparx5_dcb_delrewr(dev, &app_itr);
+ }
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_setrewr);
+ else
+ err = dcb_setrewr(dev, app);
+
+ if (err)
+ goto out;
+
+ sparx5_dcb_app_update(dev);
+
+out:
+ return err;
+}
+
+const struct dcbnl_rtnl_ops sparx5_dcbnl_ops = {
+ .ieee_setapp = sparx5_dcb_ieee_setapp,
+ .ieee_delapp = sparx5_dcb_ieee_delapp,
+ .dcbnl_setapptrust = sparx5_dcb_setapptrust,
+ .dcbnl_getapptrust = sparx5_dcb_getapptrust,
+ .dcbnl_setrewr = sparx5_dcb_setrewr,
+ .dcbnl_delrewr = sparx5_dcb_delrewr,
+};
+
+int sparx5_dcb_init(struct sparx5 *sparx5)
+{
+ struct sparx5_port *port;
+ int i;
+
+ for (i = 0; i < SPX5_PORTS; i++) {
+ port = sparx5->ports[i];
+ if (!port)
+ continue;
+ port->ndev->dcbnl_ops = &sparx5_dcbnl_ops;
+ /* Initialize [dscp, pcp] default trust */
+ sparx5_port_apptrust[port->portno] =
+ &sparx5_dcb_apptrust_policies
+ [SPARX5_DCB_APPTRUST_DSCP_PCP];
+
+ /* Enable DSCP classification based on classified QoS class and
+ * DP, for all DSCP values, for all ports.
+ */
+ sparx5_port_qos_dscp_rewr_mode_set(port,
+ SPARX5_PORT_REW_DSCP_ALL);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
new file mode 100644
index 0000000000..01f3a3a41c
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -0,0 +1,1264 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/ethtool.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+/* Index of ANA_AC port counters */
+#define SPX5_PORT_POLICER_DROPS 0
+
+/* Add a potentially wrapping 32 bit value to a 64 bit counter */
+static void sparx5_update_counter(u64 *cnt, u32 val)
+{
+ if (val < (*cnt & U32_MAX))
+ *cnt += (u64)1 << 32; /* value has wrapped */
+ *cnt = (*cnt & ~(u64)U32_MAX) + val;
+}
+
+enum sparx5_stats_entry {
+ spx5_stats_rx_symbol_err_cnt = 0,
+ spx5_stats_pmac_rx_symbol_err_cnt = 1,
+ spx5_stats_tx_uc_cnt = 2,
+ spx5_stats_pmac_tx_uc_cnt = 3,
+ spx5_stats_tx_mc_cnt = 4,
+ spx5_stats_tx_bc_cnt = 5,
+ spx5_stats_tx_backoff1_cnt = 6,
+ spx5_stats_tx_multi_coll_cnt = 7,
+ spx5_stats_rx_uc_cnt = 8,
+ spx5_stats_pmac_rx_uc_cnt = 9,
+ spx5_stats_rx_mc_cnt = 10,
+ spx5_stats_rx_bc_cnt = 11,
+ spx5_stats_rx_crc_err_cnt = 12,
+ spx5_stats_pmac_rx_crc_err_cnt = 13,
+ spx5_stats_rx_alignment_lost_cnt = 14,
+ spx5_stats_pmac_rx_alignment_lost_cnt = 15,
+ spx5_stats_tx_ok_bytes_cnt = 16,
+ spx5_stats_pmac_tx_ok_bytes_cnt = 17,
+ spx5_stats_tx_defer_cnt = 18,
+ spx5_stats_tx_late_coll_cnt = 19,
+ spx5_stats_tx_xcoll_cnt = 20,
+ spx5_stats_tx_csense_cnt = 21,
+ spx5_stats_rx_ok_bytes_cnt = 22,
+ spx5_stats_pmac_rx_ok_bytes_cnt = 23,
+ spx5_stats_pmac_tx_mc_cnt = 24,
+ spx5_stats_pmac_tx_bc_cnt = 25,
+ spx5_stats_tx_xdefer_cnt = 26,
+ spx5_stats_pmac_rx_mc_cnt = 27,
+ spx5_stats_pmac_rx_bc_cnt = 28,
+ spx5_stats_rx_in_range_len_err_cnt = 29,
+ spx5_stats_pmac_rx_in_range_len_err_cnt = 30,
+ spx5_stats_rx_out_of_range_len_err_cnt = 31,
+ spx5_stats_pmac_rx_out_of_range_len_err_cnt = 32,
+ spx5_stats_rx_oversize_cnt = 33,
+ spx5_stats_pmac_rx_oversize_cnt = 34,
+ spx5_stats_tx_pause_cnt = 35,
+ spx5_stats_pmac_tx_pause_cnt = 36,
+ spx5_stats_rx_pause_cnt = 37,
+ spx5_stats_pmac_rx_pause_cnt = 38,
+ spx5_stats_rx_unsup_opcode_cnt = 39,
+ spx5_stats_pmac_rx_unsup_opcode_cnt = 40,
+ spx5_stats_rx_undersize_cnt = 41,
+ spx5_stats_pmac_rx_undersize_cnt = 42,
+ spx5_stats_rx_fragments_cnt = 43,
+ spx5_stats_pmac_rx_fragments_cnt = 44,
+ spx5_stats_rx_jabbers_cnt = 45,
+ spx5_stats_pmac_rx_jabbers_cnt = 46,
+ spx5_stats_rx_size64_cnt = 47,
+ spx5_stats_pmac_rx_size64_cnt = 48,
+ spx5_stats_rx_size65to127_cnt = 49,
+ spx5_stats_pmac_rx_size65to127_cnt = 50,
+ spx5_stats_rx_size128to255_cnt = 51,
+ spx5_stats_pmac_rx_size128to255_cnt = 52,
+ spx5_stats_rx_size256to511_cnt = 53,
+ spx5_stats_pmac_rx_size256to511_cnt = 54,
+ spx5_stats_rx_size512to1023_cnt = 55,
+ spx5_stats_pmac_rx_size512to1023_cnt = 56,
+ spx5_stats_rx_size1024to1518_cnt = 57,
+ spx5_stats_pmac_rx_size1024to1518_cnt = 58,
+ spx5_stats_rx_size1519tomax_cnt = 59,
+ spx5_stats_pmac_rx_size1519tomax_cnt = 60,
+ spx5_stats_tx_size64_cnt = 61,
+ spx5_stats_pmac_tx_size64_cnt = 62,
+ spx5_stats_tx_size65to127_cnt = 63,
+ spx5_stats_pmac_tx_size65to127_cnt = 64,
+ spx5_stats_tx_size128to255_cnt = 65,
+ spx5_stats_pmac_tx_size128to255_cnt = 66,
+ spx5_stats_tx_size256to511_cnt = 67,
+ spx5_stats_pmac_tx_size256to511_cnt = 68,
+ spx5_stats_tx_size512to1023_cnt = 69,
+ spx5_stats_pmac_tx_size512to1023_cnt = 70,
+ spx5_stats_tx_size1024to1518_cnt = 71,
+ spx5_stats_pmac_tx_size1024to1518_cnt = 72,
+ spx5_stats_tx_size1519tomax_cnt = 73,
+ spx5_stats_pmac_tx_size1519tomax_cnt = 74,
+ spx5_stats_mm_rx_assembly_err_cnt = 75,
+ spx5_stats_mm_rx_assembly_ok_cnt = 76,
+ spx5_stats_mm_rx_merge_frag_cnt = 77,
+ spx5_stats_mm_rx_smd_err_cnt = 78,
+ spx5_stats_mm_tx_pfragment_cnt = 79,
+ spx5_stats_rx_bad_bytes_cnt = 80,
+ spx5_stats_pmac_rx_bad_bytes_cnt = 81,
+ spx5_stats_rx_in_bytes_cnt = 82,
+ spx5_stats_rx_ipg_shrink_cnt = 83,
+ spx5_stats_rx_sync_lost_err_cnt = 84,
+ spx5_stats_rx_tagged_frms_cnt = 85,
+ spx5_stats_rx_untagged_frms_cnt = 86,
+ spx5_stats_tx_out_bytes_cnt = 87,
+ spx5_stats_tx_tagged_frms_cnt = 88,
+ spx5_stats_tx_untagged_frms_cnt = 89,
+ spx5_stats_rx_hih_cksm_err_cnt = 90,
+ spx5_stats_pmac_rx_hih_cksm_err_cnt = 91,
+ spx5_stats_rx_xgmii_prot_err_cnt = 92,
+ spx5_stats_pmac_rx_xgmii_prot_err_cnt = 93,
+ spx5_stats_ana_ac_port_stat_lsb_cnt = 94,
+ spx5_stats_green_p0_rx_fwd = 95,
+ spx5_stats_green_p0_rx_port_drop = 111,
+ spx5_stats_green_p0_tx_port = 127,
+ spx5_stats_rx_local_drop = 143,
+ spx5_stats_tx_local_drop = 144,
+ spx5_stats_count = 145,
+};
+
+static const char *const sparx5_stats_layout[] = {
+ "mm_rx_assembly_err_cnt",
+ "mm_rx_assembly_ok_cnt",
+ "mm_rx_merge_frag_cnt",
+ "mm_rx_smd_err_cnt",
+ "mm_tx_pfragment_cnt",
+ "rx_bad_bytes_cnt",
+ "pmac_rx_bad_bytes_cnt",
+ "rx_in_bytes_cnt",
+ "rx_ipg_shrink_cnt",
+ "rx_sync_lost_err_cnt",
+ "rx_tagged_frms_cnt",
+ "rx_untagged_frms_cnt",
+ "tx_out_bytes_cnt",
+ "tx_tagged_frms_cnt",
+ "tx_untagged_frms_cnt",
+ "rx_hih_cksm_err_cnt",
+ "pmac_rx_hih_cksm_err_cnt",
+ "rx_xgmii_prot_err_cnt",
+ "pmac_rx_xgmii_prot_err_cnt",
+ "rx_port_policer_drop",
+ "rx_fwd_green_p0",
+ "rx_fwd_green_p1",
+ "rx_fwd_green_p2",
+ "rx_fwd_green_p3",
+ "rx_fwd_green_p4",
+ "rx_fwd_green_p5",
+ "rx_fwd_green_p6",
+ "rx_fwd_green_p7",
+ "rx_fwd_yellow_p0",
+ "rx_fwd_yellow_p1",
+ "rx_fwd_yellow_p2",
+ "rx_fwd_yellow_p3",
+ "rx_fwd_yellow_p4",
+ "rx_fwd_yellow_p5",
+ "rx_fwd_yellow_p6",
+ "rx_fwd_yellow_p7",
+ "rx_port_drop_green_p0",
+ "rx_port_drop_green_p1",
+ "rx_port_drop_green_p2",
+ "rx_port_drop_green_p3",
+ "rx_port_drop_green_p4",
+ "rx_port_drop_green_p5",
+ "rx_port_drop_green_p6",
+ "rx_port_drop_green_p7",
+ "rx_port_drop_yellow_p0",
+ "rx_port_drop_yellow_p1",
+ "rx_port_drop_yellow_p2",
+ "rx_port_drop_yellow_p3",
+ "rx_port_drop_yellow_p4",
+ "rx_port_drop_yellow_p5",
+ "rx_port_drop_yellow_p6",
+ "rx_port_drop_yellow_p7",
+ "tx_port_green_p0",
+ "tx_port_green_p1",
+ "tx_port_green_p2",
+ "tx_port_green_p3",
+ "tx_port_green_p4",
+ "tx_port_green_p5",
+ "tx_port_green_p6",
+ "tx_port_green_p7",
+ "tx_port_yellow_p0",
+ "tx_port_yellow_p1",
+ "tx_port_yellow_p2",
+ "tx_port_yellow_p3",
+ "tx_port_yellow_p4",
+ "tx_port_yellow_p5",
+ "tx_port_yellow_p6",
+ "tx_port_yellow_p7",
+ "rx_local_drop",
+ "tx_local_drop",
+};
+
+static void sparx5_get_queue_sys_stats(struct sparx5 *sparx5, int portno)
+{
+ u64 *portstats;
+ u64 *stats;
+ u32 addr;
+ int idx;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ mutex_lock(&sparx5->queue_stats_lock);
+ spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno), sparx5, XQS_STAT_CFG);
+ addr = 0;
+ stats = &portstats[spx5_stats_green_p0_rx_fwd];
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+ sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+ addr = 16;
+ stats = &portstats[spx5_stats_green_p0_rx_port_drop];
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+ sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+ addr = 256;
+ stats = &portstats[spx5_stats_green_p0_tx_port];
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+ sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_local_drop],
+ spx5_rd(sparx5, XQS_CNT(32)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_local_drop],
+ spx5_rd(sparx5, XQS_CNT(272)));
+ mutex_unlock(&sparx5->queue_stats_lock);
+}
+
+static void sparx5_get_ana_ac_stats_stats(struct sparx5 *sparx5, int portno)
+{
+ u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+
+ sparx5_update_counter(&portstats[spx5_stats_ana_ac_port_stat_lsb_cnt],
+ spx5_rd(sparx5, ANA_AC_PORT_STAT_LSB_CNT(portno,
+ SPX5_PORT_POLICER_DROPS)));
+}
+
+static void sparx5_get_dev_phy_stats(u64 *portstats, void __iomem *inst, u32
+ tinst)
+{
+ sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SYMBOL_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SYMBOL_ERR_CNT(tinst)));
+}
+
+static void sparx5_get_dev_mac_stats(u64 *portstats, void __iomem *inst, u32
+ tinst)
+{
+ sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_UC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_TX_UC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_MC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_BC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_UC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_RX_UC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_MC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_BC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_CRC_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_CRC_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_ALIGNMENT_LOST_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_OK_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_OK_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_OK_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_OK_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_TX_MC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_TX_BC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_RX_MC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_RX_BC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_IN_RANGE_LEN_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_OVERSIZE_CNT(tinst)));
+}
+
+static void sparx5_get_dev_mac_ctrl_stats(u64 *portstats, void __iomem *inst,
+ u32 tinst)
+{
+ sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_PAUSE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_PAUSE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_PAUSE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_PAUSE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_UNSUP_OPCODE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(tinst)));
+}
+
+static void sparx5_get_dev_rmon_stats(u64 *portstats, void __iomem *inst, u32
+ tinst)
+{
+ sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_UNDERSIZE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_UNDERSIZE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_OVERSIZE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_FRAGMENTS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_FRAGMENTS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_JABBERS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_JABBERS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_SIZE64_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE64_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE65TO127_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE65TO127_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE128TO255_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE128TO255_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE256TO511_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE256TO511_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE512TO1023_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE512TO1023_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE1024TO1518_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE1024TO1518_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE1519TOMAX_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_SIZE64_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE64_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE65TO127_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE65TO127_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE128TO255_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE128TO255_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE256TO511_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE256TO511_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE512TO1023_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE512TO1023_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE1024TO1518_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE1024TO1518_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE1519TOMAX_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(tinst)));
+}
+
+static void sparx5_get_dev_misc_stats(u64 *portstats, void __iomem *inst, u32
+ tinst)
+{
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_MM_RX_ASSEMBLY_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_MM_RX_ASSEMBLY_OK_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_MM_RX_MERGE_FRAG_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_MM_RX_SMD_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_MM_TX_PFRAGMENT_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_BAD_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_BAD_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_IN_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_IPG_SHRINK_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_TAGGED_FRMS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_UNTAGGED_FRMS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_OUT_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_TAGGED_FRMS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_UNTAGGED_FRMS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_hih_cksm_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_HIH_CKSM_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_hih_cksm_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_xgmii_prot_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_XGMII_PROT_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_xgmii_prot_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(tinst)));
+}
+
+static void sparx5_get_device_stats(struct sparx5 *sparx5, int portno)
+{
+ u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+ void __iomem *inst;
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_phy_stats(portstats, inst, tinst);
+ sparx5_get_dev_mac_stats(portstats, inst, tinst);
+ sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst);
+ sparx5_get_dev_rmon_stats(portstats, inst, tinst);
+ sparx5_get_dev_misc_stats(portstats, inst, tinst);
+}
+
+static void sparx5_get_asm_phy_stats(u64 *portstats, void __iomem *inst, int
+ portno)
+{
+ sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SYMBOL_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SYMBOL_ERR_CNT(portno)));
+}
+
+static void sparx5_get_asm_mac_stats(u64 *portstats, void __iomem *inst, int
+ portno)
+{
+ sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt],
+ spx5_inst_rd(inst, ASM_TX_UC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_TX_UC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt],
+ spx5_inst_rd(inst, ASM_TX_MC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt],
+ spx5_inst_rd(inst, ASM_TX_BC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_backoff1_cnt],
+ spx5_inst_rd(inst, ASM_TX_BACKOFF1_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_multi_coll_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_MULTI_COLL_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt],
+ spx5_inst_rd(inst, ASM_RX_UC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_RX_UC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt],
+ spx5_inst_rd(inst, ASM_RX_MC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt],
+ spx5_inst_rd(inst, ASM_RX_BC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt],
+ spx5_inst_rd(inst, ASM_RX_CRC_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_CRC_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_ALIGNMENT_LOST_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_ALIGNMENT_LOST_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt],
+ spx5_inst_rd(inst, ASM_TX_OK_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_OK_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_defer_cnt],
+ spx5_inst_rd(inst, ASM_TX_DEFER_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_late_coll_cnt],
+ spx5_inst_rd(inst, ASM_TX_LATE_COLL_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_xcoll_cnt],
+ spx5_inst_rd(inst, ASM_TX_XCOLL_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_csense_cnt],
+ spx5_inst_rd(inst, ASM_TX_CSENSE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt],
+ spx5_inst_rd(inst, ASM_RX_OK_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_OK_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_TX_MC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_TX_BC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_xdefer_cnt],
+ spx5_inst_rd(inst, ASM_TX_XDEFER_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_RX_MC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_RX_BC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_IN_RANGE_LEN_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+ spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_OVERSIZE_CNT(portno)));
+}
+
+static void sparx5_get_asm_mac_ctrl_stats(u64 *portstats, void __iomem *inst,
+ int portno)
+{
+ sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt],
+ spx5_inst_rd(inst, ASM_TX_PAUSE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_PAUSE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt],
+ spx5_inst_rd(inst, ASM_RX_PAUSE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_PAUSE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_UNSUP_OPCODE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_UNSUP_OPCODE_CNT(portno)));
+}
+
+static void sparx5_get_asm_rmon_stats(u64 *portstats, void __iomem *inst, int
+ portno)
+{
+ sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt],
+ spx5_inst_rd(inst, ASM_RX_UNDERSIZE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_UNDERSIZE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+ spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_OVERSIZE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt],
+ spx5_inst_rd(inst, ASM_RX_FRAGMENTS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_FRAGMENTS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt],
+ spx5_inst_rd(inst, ASM_RX_JABBERS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_JABBERS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt],
+ spx5_inst_rd(inst, ASM_RX_SIZE64_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE64_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE65TO127_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE65TO127_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE128TO255_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE128TO255_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE256TO511_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE256TO511_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE512TO1023_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE512TO1023_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE1024TO1518_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE1024TO1518_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE1519TOMAX_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE1519TOMAX_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt],
+ spx5_inst_rd(inst, ASM_TX_SIZE64_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE64_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE65TO127_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE65TO127_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE128TO255_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE128TO255_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE256TO511_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE256TO511_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE512TO1023_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE512TO1023_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE1024TO1518_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE1024TO1518_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE1519TOMAX_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE1519TOMAX_CNT(portno)));
+}
+
+static void sparx5_get_asm_misc_stats(u64 *portstats, void __iomem *inst, int
+ portno)
+{
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_MM_RX_ASSEMBLY_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt],
+ spx5_inst_rd(inst,
+ ASM_MM_RX_ASSEMBLY_OK_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt],
+ spx5_inst_rd(inst,
+ ASM_MM_RX_MERGE_FRAG_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_MM_RX_SMD_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt],
+ spx5_inst_rd(inst,
+ ASM_MM_TX_PFRAGMENT_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt],
+ spx5_inst_rd(inst, ASM_RX_BAD_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_BAD_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt],
+ spx5_inst_rd(inst, ASM_RX_IN_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_IPG_SHRINK_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_sync_lost_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SYNC_LOST_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_TAGGED_FRMS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_UNTAGGED_FRMS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt],
+ spx5_inst_rd(inst, ASM_TX_OUT_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_TAGGED_FRMS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_UNTAGGED_FRMS_CNT(portno)));
+}
+
+static void sparx5_get_asm_stats(struct sparx5 *sparx5, int portno)
+{
+ u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+ void __iomem *inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+
+ sparx5_get_asm_phy_stats(portstats, inst, portno);
+ sparx5_get_asm_mac_stats(portstats, inst, portno);
+ sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno);
+ sparx5_get_asm_rmon_stats(portstats, inst, portno);
+ sparx5_get_asm_misc_stats(portstats, inst, portno);
+}
+
+static const struct ethtool_rmon_hist_range sparx5_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {}
+};
+
+static void sparx5_get_eth_phy_stats(struct net_device *ndev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ void __iomem *inst;
+ u64 *portstats;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ if (sparx5_is_baser(port->conf.portmode)) {
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_phy_stats(portstats, inst, tinst);
+ } else {
+ inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+ sparx5_get_asm_phy_stats(portstats, inst, portno);
+ }
+ phy_stats->SymbolErrorDuringCarrier =
+ portstats[spx5_stats_rx_symbol_err_cnt] +
+ portstats[spx5_stats_pmac_rx_symbol_err_cnt];
+}
+
+static void sparx5_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ void __iomem *inst;
+ u64 *portstats;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ if (sparx5_is_baser(port->conf.portmode)) {
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_mac_stats(portstats, inst, tinst);
+ } else {
+ inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+ sparx5_get_asm_mac_stats(portstats, inst, portno);
+ }
+ mac_stats->FramesTransmittedOK = portstats[spx5_stats_tx_uc_cnt] +
+ portstats[spx5_stats_pmac_tx_uc_cnt] +
+ portstats[spx5_stats_tx_mc_cnt] +
+ portstats[spx5_stats_tx_bc_cnt];
+ mac_stats->SingleCollisionFrames =
+ portstats[spx5_stats_tx_backoff1_cnt];
+ mac_stats->MultipleCollisionFrames =
+ portstats[spx5_stats_tx_multi_coll_cnt];
+ mac_stats->FramesReceivedOK = portstats[spx5_stats_rx_uc_cnt] +
+ portstats[spx5_stats_pmac_rx_uc_cnt] +
+ portstats[spx5_stats_rx_mc_cnt] +
+ portstats[spx5_stats_rx_bc_cnt];
+ mac_stats->FrameCheckSequenceErrors =
+ portstats[spx5_stats_rx_crc_err_cnt] +
+ portstats[spx5_stats_pmac_rx_crc_err_cnt];
+ mac_stats->AlignmentErrors = portstats[spx5_stats_rx_alignment_lost_cnt]
+ + portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+ mac_stats->OctetsTransmittedOK = portstats[spx5_stats_tx_ok_bytes_cnt] +
+ portstats[spx5_stats_pmac_tx_ok_bytes_cnt];
+ mac_stats->FramesWithDeferredXmissions =
+ portstats[spx5_stats_tx_defer_cnt];
+ mac_stats->LateCollisions =
+ portstats[spx5_stats_tx_late_coll_cnt];
+ mac_stats->FramesAbortedDueToXSColls =
+ portstats[spx5_stats_tx_xcoll_cnt];
+ mac_stats->CarrierSenseErrors = portstats[spx5_stats_tx_csense_cnt];
+ mac_stats->OctetsReceivedOK = portstats[spx5_stats_rx_ok_bytes_cnt] +
+ portstats[spx5_stats_pmac_rx_ok_bytes_cnt];
+ mac_stats->MulticastFramesXmittedOK = portstats[spx5_stats_tx_mc_cnt] +
+ portstats[spx5_stats_pmac_tx_mc_cnt];
+ mac_stats->BroadcastFramesXmittedOK = portstats[spx5_stats_tx_bc_cnt] +
+ portstats[spx5_stats_pmac_tx_bc_cnt];
+ mac_stats->FramesWithExcessiveDeferral =
+ portstats[spx5_stats_tx_xdefer_cnt];
+ mac_stats->MulticastFramesReceivedOK = portstats[spx5_stats_rx_mc_cnt] +
+ portstats[spx5_stats_pmac_rx_mc_cnt];
+ mac_stats->BroadcastFramesReceivedOK = portstats[spx5_stats_rx_bc_cnt] +
+ portstats[spx5_stats_pmac_rx_bc_cnt];
+ mac_stats->InRangeLengthErrors =
+ portstats[spx5_stats_rx_in_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_in_range_len_err_cnt];
+ mac_stats->OutOfRangeLengthField =
+ portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt];
+ mac_stats->FrameTooLongErrors = portstats[spx5_stats_rx_oversize_cnt] +
+ portstats[spx5_stats_pmac_rx_oversize_cnt];
+}
+
+static void sparx5_get_eth_mac_ctrl_stats(struct net_device *ndev,
+ struct ethtool_eth_ctrl_stats *mac_ctrl_stats)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ void __iomem *inst;
+ u64 *portstats;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ if (sparx5_is_baser(port->conf.portmode)) {
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst);
+ } else {
+ inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+ sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno);
+ }
+ mac_ctrl_stats->MACControlFramesTransmitted =
+ portstats[spx5_stats_tx_pause_cnt] +
+ portstats[spx5_stats_pmac_tx_pause_cnt];
+ mac_ctrl_stats->MACControlFramesReceived =
+ portstats[spx5_stats_rx_pause_cnt] +
+ portstats[spx5_stats_pmac_rx_pause_cnt];
+ mac_ctrl_stats->UnsupportedOpcodesReceived =
+ portstats[spx5_stats_rx_unsup_opcode_cnt] +
+ portstats[spx5_stats_pmac_rx_unsup_opcode_cnt];
+}
+
+static void sparx5_get_eth_rmon_stats(struct net_device *ndev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ void __iomem *inst;
+ u64 *portstats;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ if (sparx5_is_baser(port->conf.portmode)) {
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_rmon_stats(portstats, inst, tinst);
+ } else {
+ inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+ sparx5_get_asm_rmon_stats(portstats, inst, portno);
+ }
+ rmon_stats->undersize_pkts = portstats[spx5_stats_rx_undersize_cnt] +
+ portstats[spx5_stats_pmac_rx_undersize_cnt];
+ rmon_stats->oversize_pkts = portstats[spx5_stats_rx_oversize_cnt] +
+ portstats[spx5_stats_pmac_rx_oversize_cnt];
+ rmon_stats->fragments = portstats[spx5_stats_rx_fragments_cnt] +
+ portstats[spx5_stats_pmac_rx_fragments_cnt];
+ rmon_stats->jabbers = portstats[spx5_stats_rx_jabbers_cnt] +
+ portstats[spx5_stats_pmac_rx_jabbers_cnt];
+ rmon_stats->hist[0] = portstats[spx5_stats_rx_size64_cnt] +
+ portstats[spx5_stats_pmac_rx_size64_cnt];
+ rmon_stats->hist[1] = portstats[spx5_stats_rx_size65to127_cnt] +
+ portstats[spx5_stats_pmac_rx_size65to127_cnt];
+ rmon_stats->hist[2] = portstats[spx5_stats_rx_size128to255_cnt] +
+ portstats[spx5_stats_pmac_rx_size128to255_cnt];
+ rmon_stats->hist[3] = portstats[spx5_stats_rx_size256to511_cnt] +
+ portstats[spx5_stats_pmac_rx_size256to511_cnt];
+ rmon_stats->hist[4] = portstats[spx5_stats_rx_size512to1023_cnt] +
+ portstats[spx5_stats_pmac_rx_size512to1023_cnt];
+ rmon_stats->hist[5] = portstats[spx5_stats_rx_size1024to1518_cnt] +
+ portstats[spx5_stats_pmac_rx_size1024to1518_cnt];
+ rmon_stats->hist[6] = portstats[spx5_stats_rx_size1519tomax_cnt] +
+ portstats[spx5_stats_pmac_rx_size1519tomax_cnt];
+ rmon_stats->hist_tx[0] = portstats[spx5_stats_tx_size64_cnt] +
+ portstats[spx5_stats_pmac_tx_size64_cnt];
+ rmon_stats->hist_tx[1] = portstats[spx5_stats_tx_size65to127_cnt] +
+ portstats[spx5_stats_pmac_tx_size65to127_cnt];
+ rmon_stats->hist_tx[2] = portstats[spx5_stats_tx_size128to255_cnt] +
+ portstats[spx5_stats_pmac_tx_size128to255_cnt];
+ rmon_stats->hist_tx[3] = portstats[spx5_stats_tx_size256to511_cnt] +
+ portstats[spx5_stats_pmac_tx_size256to511_cnt];
+ rmon_stats->hist_tx[4] = portstats[spx5_stats_tx_size512to1023_cnt] +
+ portstats[spx5_stats_pmac_tx_size512to1023_cnt];
+ rmon_stats->hist_tx[5] = portstats[spx5_stats_tx_size1024to1518_cnt] +
+ portstats[spx5_stats_pmac_tx_size1024to1518_cnt];
+ rmon_stats->hist_tx[6] = portstats[spx5_stats_tx_size1519tomax_cnt] +
+ portstats[spx5_stats_pmac_tx_size1519tomax_cnt];
+ *ranges = sparx5_rmon_ranges;
+}
+
+static int sparx5_get_sset_count(struct net_device *ndev, int sset)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+ return sparx5->num_ethtool_stats;
+}
+
+static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int idx;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (idx = 0; idx < sparx5->num_ethtool_stats; idx++)
+ strncpy(data + idx * ETH_GSTRING_LEN,
+ sparx5->stats_layout[idx], ETH_GSTRING_LEN);
+}
+
+static void sparx5_get_sset_data(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ void __iomem *inst;
+ u64 *portstats;
+ int idx;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ if (sparx5_is_baser(port->conf.portmode)) {
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_misc_stats(portstats, inst, tinst);
+ } else {
+ inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+ sparx5_get_asm_misc_stats(portstats, inst, portno);
+ }
+ sparx5_get_ana_ac_stats_stats(sparx5, portno);
+ sparx5_get_queue_sys_stats(sparx5, portno);
+ /* Copy port counters to the ethtool buffer */
+ for (idx = spx5_stats_mm_rx_assembly_err_cnt;
+ idx < spx5_stats_mm_rx_assembly_err_cnt +
+ sparx5->num_ethtool_stats; idx++)
+ *data++ = portstats[idx];
+}
+
+void sparx5_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ u64 *portstats;
+ int idx;
+
+ if (!sparx5->stats)
+ return; /* Not initialized yet */
+
+ portstats = &sparx5->stats[port->portno * sparx5->num_stats];
+
+ stats->rx_packets = portstats[spx5_stats_rx_uc_cnt] +
+ portstats[spx5_stats_pmac_rx_uc_cnt] +
+ portstats[spx5_stats_rx_mc_cnt] +
+ portstats[spx5_stats_rx_bc_cnt];
+ stats->tx_packets = portstats[spx5_stats_tx_uc_cnt] +
+ portstats[spx5_stats_pmac_tx_uc_cnt] +
+ portstats[spx5_stats_tx_mc_cnt] +
+ portstats[spx5_stats_tx_bc_cnt];
+ stats->rx_bytes = portstats[spx5_stats_rx_ok_bytes_cnt] +
+ portstats[spx5_stats_pmac_rx_ok_bytes_cnt];
+ stats->tx_bytes = portstats[spx5_stats_tx_ok_bytes_cnt] +
+ portstats[spx5_stats_pmac_tx_ok_bytes_cnt];
+ stats->rx_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] +
+ portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] +
+ portstats[spx5_stats_rx_oversize_cnt] +
+ portstats[spx5_stats_pmac_rx_oversize_cnt] +
+ portstats[spx5_stats_rx_crc_err_cnt] +
+ portstats[spx5_stats_pmac_rx_crc_err_cnt] +
+ portstats[spx5_stats_rx_alignment_lost_cnt] +
+ portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+ stats->tx_errors = portstats[spx5_stats_tx_xcoll_cnt] +
+ portstats[spx5_stats_tx_csense_cnt] +
+ portstats[spx5_stats_tx_late_coll_cnt];
+ stats->multicast = portstats[spx5_stats_rx_mc_cnt] +
+ portstats[spx5_stats_pmac_rx_mc_cnt];
+ stats->collisions = portstats[spx5_stats_tx_late_coll_cnt] +
+ portstats[spx5_stats_tx_xcoll_cnt] +
+ portstats[spx5_stats_tx_backoff1_cnt];
+ stats->rx_length_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] +
+ portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] +
+ portstats[spx5_stats_rx_oversize_cnt] +
+ portstats[spx5_stats_pmac_rx_oversize_cnt];
+ stats->rx_crc_errors = portstats[spx5_stats_rx_crc_err_cnt] +
+ portstats[spx5_stats_pmac_rx_crc_err_cnt];
+ stats->rx_frame_errors = portstats[spx5_stats_rx_alignment_lost_cnt] +
+ portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+ stats->tx_aborted_errors = portstats[spx5_stats_tx_xcoll_cnt];
+ stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
+ stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
+ stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx)
+ stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
+ + idx];
+ stats->tx_dropped = portstats[spx5_stats_tx_local_drop];
+}
+
+static void sparx5_update_port_stats(struct sparx5 *sparx5, int portno)
+{
+ if (sparx5_is_baser(sparx5->ports[portno]->conf.portmode))
+ sparx5_get_device_stats(sparx5, portno);
+ else
+ sparx5_get_asm_stats(sparx5, portno);
+ sparx5_get_ana_ac_stats_stats(sparx5, portno);
+ sparx5_get_queue_sys_stats(sparx5, portno);
+}
+
+static void sparx5_update_stats(struct sparx5 *sparx5)
+{
+ int idx;
+
+ for (idx = 0; idx < SPX5_PORTS; idx++)
+ if (sparx5->ports[idx])
+ sparx5_update_port_stats(sparx5, idx);
+}
+
+static void sparx5_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct sparx5 *sparx5 = container_of(dwork,
+ struct sparx5,
+ stats_work);
+
+ sparx5_update_stats(sparx5);
+
+ queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,
+ SPX5_STATS_CHECK_DELAY);
+}
+
+static int sparx5_get_link_settings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int sparx5_set_link_settings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static void sparx5_config_stats(struct sparx5 *sparx5)
+{
+ /* Enable global events for port policer drops */
+ spx5_rmw(ANA_AC_PORT_SGE_CFG_MASK_SET(0xf0f0),
+ ANA_AC_PORT_SGE_CFG_MASK,
+ sparx5,
+ ANA_AC_PORT_SGE_CFG(SPX5_PORT_POLICER_DROPS));
+}
+
+static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno)
+{
+ /* Clear Queue System counters */
+ spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno) |
+ XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(3), sparx5,
+ XQS_STAT_CFG);
+
+ /* Use counter for port policer drop count */
+ spx5_rmw(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(1) |
+ ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(0) |
+ ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(0xff),
+ ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE |
+ ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE |
+ ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK,
+ sparx5, ANA_AC_PORT_STAT_CFG(portno, SPX5_PORT_POLICER_DROPS));
+}
+
+static int sparx5_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_phc *phc;
+
+ if (!sparx5->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+
+ info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+const struct ethtool_ops sparx5_ethtool_ops = {
+ .get_sset_count = sparx5_get_sset_count,
+ .get_strings = sparx5_get_sset_strings,
+ .get_ethtool_stats = sparx5_get_sset_data,
+ .get_link_ksettings = sparx5_get_link_settings,
+ .set_link_ksettings = sparx5_set_link_settings,
+ .get_link = ethtool_op_get_link,
+ .get_eth_phy_stats = sparx5_get_eth_phy_stats,
+ .get_eth_mac_stats = sparx5_get_eth_mac_stats,
+ .get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats,
+ .get_rmon_stats = sparx5_get_eth_rmon_stats,
+ .get_ts_info = sparx5_get_ts_info,
+};
+
+int sparx_stats_init(struct sparx5 *sparx5)
+{
+ char queue_name[32];
+ int portno;
+
+ sparx5->stats_layout = sparx5_stats_layout;
+ sparx5->num_stats = spx5_stats_count;
+ sparx5->num_ethtool_stats = ARRAY_SIZE(sparx5_stats_layout);
+ sparx5->stats = devm_kcalloc(sparx5->dev,
+ SPX5_PORTS_ALL * sparx5->num_stats,
+ sizeof(u64), GFP_KERNEL);
+ if (!sparx5->stats)
+ return -ENOMEM;
+
+ mutex_init(&sparx5->queue_stats_lock);
+ sparx5_config_stats(sparx5);
+ for (portno = 0; portno < SPX5_PORTS; portno++)
+ if (sparx5->ports[portno])
+ sparx5_config_port_stats(sparx5, portno);
+
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(sparx5->dev));
+ sparx5->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!sparx5->stats_queue)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&sparx5->stats_work, sparx5_check_stats_work);
+ queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,
+ SPX5_STATS_CHECK_DELAY);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
new file mode 100644
index 0000000000..141897dfe3
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/dma-mapping.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
+
+#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_INFO_TOKEN BIT(17)
+#define FDMA_DCB_INFO_INTR BIT(18)
+#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24))
+
+#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_STATUS_SOF BIT(16)
+#define FDMA_DCB_STATUS_EOF BIT(17)
+#define FDMA_DCB_STATUS_INTR BIT(18)
+#define FDMA_DCB_STATUS_DONE BIT(19)
+#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define FDMA_DCB_INVALID_DATA 0x1
+
+#define FDMA_XTR_BUFFER_SIZE 2048
+#define FDMA_WEIGHT 4
+
+/* Frame DMA DCB format
+ *
+ * +---------------------------+
+ * | Next Ptr |
+ * +---------------------------+
+ * | Reserved | Info |
+ * +---------------------------+
+ * | Data0 Ptr |
+ * +---------------------------+
+ * | Reserved | Status0 |
+ * +---------------------------+
+ * | Data1 Ptr |
+ * +---------------------------+
+ * | Reserved | Status1 |
+ * +---------------------------+
+ * | Data2 Ptr |
+ * +---------------------------+
+ * | Reserved | Status2 |
+ * |-------------|-------------|
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * |---------------------------|
+ * | Data14 Ptr |
+ * +-------------|-------------+
+ * | Reserved | Status14 |
+ * +-------------|-------------+
+ */
+
+/* For each hardware DB there is an entry in this list and when the HW DB
+ * entry is used, this SW DB entry is moved to the back of the list
+ */
+struct sparx5_db {
+ struct list_head list;
+ void *cpu_addr;
+};
+
+static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx,
+ struct sparx5_rx_dcb_hw *dcb,
+ u64 nextptr)
+{
+ int idx = 0;
+
+ /* Reset the status of the DB */
+ for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) {
+ struct sparx5_db_hw *db = &dcb->db[idx];
+
+ db->status = FDMA_DCB_STATUS_INTR;
+ }
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE);
+ rx->last_entry->nextptr = nextptr;
+ rx->last_entry = dcb;
+}
+
+static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx,
+ struct sparx5_tx_dcb_hw *dcb,
+ u64 nextptr)
+{
+ int idx = 0;
+
+ /* Reset the status of the DB */
+ for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) {
+ struct sparx5_db_hw *db = &dcb->db[idx];
+
+ db->status = FDMA_DCB_STATUS_DONE;
+ }
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE);
+}
+
+static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
+{
+ /* Write the buffer address in the LLP and LLP1 regs */
+ spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5,
+ FDMA_DCB_LLP(rx->channel_id));
+ spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id));
+
+ /* Set the number of RX DBs to be used, and DB end-of-frame interrupt */
+ spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE),
+ sparx5, FDMA_CH_CFG(rx->channel_id));
+
+ /* Set the RX Watermark to max */
+ spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM,
+ sparx5,
+ FDMA_XTR_CFG);
+
+ /* Start RX fdma */
+ spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), FDMA_PORT_CTRL_XTR_STOP,
+ sparx5, FDMA_PORT_CTRL(0));
+
+ /* Enable RX channel DB interrupt */
+ spx5_rmw(BIT(rx->channel_id),
+ BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ sparx5, FDMA_INTR_DB_ENA);
+
+ /* Activate the RX channel */
+ spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE);
+}
+
+static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx)
+{
+ /* Dectivate the RX channel */
+ spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ sparx5, FDMA_CH_ACTIVATE);
+
+ /* Disable RX channel DB interrupt */
+ spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ sparx5, FDMA_INTR_DB_ENA);
+
+ /* Stop RX fdma */
+ spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(1), FDMA_PORT_CTRL_XTR_STOP,
+ sparx5, FDMA_PORT_CTRL(0));
+}
+
+static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx)
+{
+ /* Write the buffer address in the LLP and LLP1 regs */
+ spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5,
+ FDMA_DCB_LLP(tx->channel_id));
+ spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id));
+
+ /* Set the number of TX DBs to be used, and DB end-of-frame interrupt */
+ spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE),
+ sparx5, FDMA_CH_CFG(tx->channel_id));
+
+ /* Start TX fdma */
+ spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP,
+ sparx5, FDMA_PORT_CTRL(0));
+
+ /* Activate the channel */
+ spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE);
+}
+
+static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx)
+{
+ /* Disable the channel */
+ spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ sparx5, FDMA_CH_ACTIVATE);
+}
+
+static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx)
+{
+ /* Reload the RX channel */
+ spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD);
+}
+
+static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx)
+{
+ /* Reload the TX channel */
+ spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD);
+}
+
+static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx)
+{
+ return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE,
+ GFP_ATOMIC);
+}
+
+static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx)
+{
+ struct sparx5_db_hw *db_hw;
+ unsigned int packet_size;
+ struct sparx5_port *port;
+ struct sk_buff *new_skb;
+ struct frame_info fi;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ /* Check if the DCB is done */
+ db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index];
+ if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE)))
+ return false;
+ skb = rx->skb[rx->dcb_index][rx->db_index];
+ /* Replace the DB entry with a new SKB */
+ new_skb = sparx5_fdma_rx_alloc_skb(rx);
+ if (unlikely(!new_skb))
+ return false;
+ /* Map the new skb data and set the new skb */
+ dma_addr = virt_to_phys(new_skb->data);
+ rx->skb[rx->dcb_index][rx->db_index] = new_skb;
+ db_hw->dataptr = dma_addr;
+ packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status);
+ skb_put(skb, packet_size);
+ /* Now do the normal processing of the skb */
+ sparx5_ifh_parse((u32 *)skb->data, &fi);
+ /* Map to port netdev */
+ port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL;
+ if (!port || !port->ndev) {
+ dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
+ sparx5_xtr_flush(sparx5, XTR_QUEUE);
+ return false;
+ }
+ skb->dev = port->ndev;
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ /* Everything we see on an interface that is in the HW bridge
+ * has already been forwarded
+ */
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ skb->offload_fwd_mark = 1;
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+ rx->packets++;
+ netif_receive_skb(skb);
+ return true;
+}
+
+static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
+{
+ struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
+ struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
+ int counter = 0;
+
+ while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) {
+ struct sparx5_rx_dcb_hw *old_dcb;
+
+ rx->db_index++;
+ counter++;
+ /* Check if the DCB can be reused */
+ if (rx->db_index != FDMA_RX_DCB_MAX_DBS)
+ continue;
+ /* As the DCB can be reused, just advance the dcb_index
+ * pointer and set the nextptr in the DCB
+ */
+ rx->db_index = 0;
+ old_dcb = &rx->dcb_entries[rx->dcb_index];
+ rx->dcb_index++;
+ rx->dcb_index &= FDMA_DCB_MAX - 1;
+ sparx5_fdma_rx_add_dcb(rx, old_dcb,
+ rx->dma +
+ ((unsigned long)old_dcb -
+ (unsigned long)rx->dcb_entries));
+ }
+ if (counter < weight) {
+ napi_complete_done(&rx->napi, counter);
+ spx5_rmw(BIT(rx->channel_id),
+ BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ sparx5, FDMA_INTR_DB_ENA);
+ }
+ if (counter)
+ sparx5_fdma_rx_reload(sparx5, rx);
+ return counter;
+}
+
+static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx,
+ struct sparx5_tx_dcb_hw *dcb)
+{
+ struct sparx5_tx_dcb_hw *next_dcb;
+
+ next_dcb = dcb;
+ next_dcb++;
+ /* Handle wrap-around */
+ if ((unsigned long)next_dcb >=
+ ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb)))
+ next_dcb = tx->first_entry;
+ return next_dcb;
+}
+
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
+{
+ struct sparx5_tx_dcb_hw *next_dcb_hw;
+ struct sparx5_tx *tx = &sparx5->tx;
+ static bool first_time = true;
+ struct sparx5_db_hw *db_hw;
+ struct sparx5_db *db;
+
+ next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry);
+ db_hw = &next_dcb_hw->db[0];
+ if (!(db_hw->status & FDMA_DCB_STATUS_DONE))
+ return -EINVAL;
+ db = list_first_entry(&tx->db_list, struct sparx5_db, list);
+ list_move_tail(&db->list, &tx->db_list);
+ next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA;
+ tx->curr_entry->nextptr = tx->dma +
+ ((unsigned long)next_dcb_hw -
+ (unsigned long)tx->first_entry);
+ tx->curr_entry = next_dcb_hw;
+ memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE);
+ memcpy(db->cpu_addr, ifh, IFH_LEN * 4);
+ memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len);
+ db_hw->status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4);
+ if (first_time) {
+ sparx5_fdma_tx_activate(sparx5, tx);
+ first_time = false;
+ } else {
+ sparx5_fdma_tx_reload(sparx5, tx);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_rx_dcb_hw *dcb;
+ int idx, jdx;
+ int size;
+
+ size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL);
+ if (!rx->dcb_entries)
+ return -ENOMEM;
+ rx->dma = virt_to_phys(rx->dcb_entries);
+ rx->last_entry = rx->dcb_entries;
+ rx->db_index = 0;
+ rx->dcb_index = 0;
+ /* Now for each dcb allocate the db */
+ for (idx = 0; idx < FDMA_DCB_MAX; ++idx) {
+ dcb = &rx->dcb_entries[idx];
+ dcb->info = 0;
+ /* For each db allocate an skb and map skb data pointer to the DB
+ * dataptr. In this way when the frame is received the skb->data
+ * will contain the frame, so no memcpy is needed
+ */
+ for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) {
+ struct sparx5_db_hw *db_hw = &dcb->db[jdx];
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+
+ skb = sparx5_fdma_rx_alloc_skb(rx);
+ if (!skb)
+ return -ENOMEM;
+
+ dma_addr = virt_to_phys(skb->data);
+ db_hw->dataptr = dma_addr;
+ db_hw->status = 0;
+ rx->skb[idx][jdx] = skb;
+ }
+ sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx);
+ }
+ netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
+ FDMA_WEIGHT);
+ napi_enable(&rx->napi);
+ sparx5_fdma_rx_activate(sparx5, rx);
+ return 0;
+}
+
+static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct sparx5_tx_dcb_hw *dcb;
+ int idx, jdx;
+ int size;
+
+ size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL);
+ if (!tx->curr_entry)
+ return -ENOMEM;
+ tx->dma = virt_to_phys(tx->curr_entry);
+ tx->first_entry = tx->curr_entry;
+ INIT_LIST_HEAD(&tx->db_list);
+ /* Now for each dcb allocate the db */
+ for (idx = 0; idx < FDMA_DCB_MAX; ++idx) {
+ dcb = &tx->curr_entry[idx];
+ dcb->info = 0;
+ /* TX databuffers must be 16byte aligned */
+ for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) {
+ struct sparx5_db_hw *db_hw = &dcb->db[jdx];
+ struct sparx5_db *db;
+ dma_addr_t phys;
+ void *cpu_addr;
+
+ cpu_addr = devm_kzalloc(sparx5->dev,
+ FDMA_XTR_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ return -ENOMEM;
+ phys = virt_to_phys(cpu_addr);
+ db_hw->dataptr = phys;
+ db_hw->status = 0;
+ db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return -ENOMEM;
+ db->cpu_addr = cpu_addr;
+ list_add_tail(&db->list, &tx->db_list);
+ }
+ sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx);
+ /* Let the curr_entry to point to the last allocated entry */
+ if (idx == FDMA_DCB_MAX - 1)
+ tx->curr_entry = dcb;
+ }
+ return 0;
+}
+
+static void sparx5_fdma_rx_init(struct sparx5 *sparx5,
+ struct sparx5_rx *rx, int channel)
+{
+ int idx;
+
+ rx->channel_id = channel;
+ /* Fetch a netdev for SKB and NAPI use, any will do */
+ for (idx = 0; idx < SPX5_PORTS; ++idx) {
+ struct sparx5_port *port = sparx5->ports[idx];
+
+ if (port && port->ndev) {
+ rx->ndev = port->ndev;
+ break;
+ }
+ }
+}
+
+static void sparx5_fdma_tx_init(struct sparx5 *sparx5,
+ struct sparx5_tx *tx, int channel)
+{
+ tx->channel_id = channel;
+}
+
+irqreturn_t sparx5_fdma_handler(int irq, void *args)
+{
+ struct sparx5 *sparx5 = args;
+ u32 db = 0, err = 0;
+
+ db = spx5_rd(sparx5, FDMA_INTR_DB);
+ err = spx5_rd(sparx5, FDMA_INTR_ERR);
+ /* Clear interrupt */
+ if (db) {
+ spx5_wr(0, sparx5, FDMA_INTR_DB_ENA);
+ spx5_wr(db, sparx5, FDMA_INTR_DB);
+ napi_schedule(&sparx5->rx.napi);
+ }
+ if (err) {
+ u32 err_type = spx5_rd(sparx5, FDMA_ERRORS);
+
+ dev_err_ratelimited(sparx5->dev,
+ "ERR: int: %#x, type: %#x\n",
+ err, err_type);
+ spx5_wr(err, sparx5, FDMA_INTR_ERR);
+ spx5_wr(err_type, sparx5, FDMA_ERRORS);
+ }
+ return IRQ_HANDLED;
+}
+
+static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
+{
+ const int byte_swap = 1;
+ int portno;
+ int urgency;
+
+ /* Change mode to fdma extraction and injection */
+ spx5_wr(QS_XTR_GRP_CFG_MODE_SET(2) |
+ QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
+ QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap),
+ sparx5, QS_XTR_GRP_CFG(XTR_QUEUE));
+ spx5_wr(QS_INJ_GRP_CFG_MODE_SET(2) |
+ QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap),
+ sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
+
+ /* CPU ports capture setup */
+ for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
+ /* ASM CPU port: No preamble, IFH, enable padding */
+ spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
+ ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
+ ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */
+ sparx5, ASM_PORT_CFG(portno));
+
+ /* Reset WM cnt to unclog queued frames */
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
+ DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(portno));
+
+ /* Set Disassembler Stop Watermark level */
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(100),
+ DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(portno));
+
+ /* Enable port in queue system */
+ urgency = sparx5_port_fwd_urg(sparx5, SPEED_2500);
+ spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
+ QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
+ QFWD_SWITCH_PORT_MODE_PORT_ENA |
+ QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
+ sparx5,
+ QFWD_SWITCH_PORT_MODE(portno));
+
+ /* Disable Disassembler buffer underrun watchdog
+ * to avoid truncated packets in XTR
+ */
+ spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(1),
+ DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS,
+ sparx5,
+ DSM_BUF_CFG(portno));
+
+ /* Disabling frame aging */
+ spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(1),
+ HSCH_PORT_MODE_AGE_DIS,
+ sparx5,
+ HSCH_PORT_MODE(portno));
+ }
+}
+
+int sparx5_fdma_start(struct sparx5 *sparx5)
+{
+ int err;
+
+ /* Reset FDMA state */
+ spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
+ spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
+
+ /* Force ACP caching but disable read/write allocation */
+ spx5_rmw(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(1) |
+ CPU_PROC_CTRL_ACP_AWCACHE_SET(0) |
+ CPU_PROC_CTRL_ACP_ARCACHE_SET(0),
+ CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA |
+ CPU_PROC_CTRL_ACP_AWCACHE |
+ CPU_PROC_CTRL_ACP_ARCACHE,
+ sparx5, CPU_PROC_CTRL);
+
+ sparx5_fdma_injection_mode(sparx5);
+ sparx5_fdma_rx_init(sparx5, &sparx5->rx, FDMA_XTR_CHANNEL);
+ sparx5_fdma_tx_init(sparx5, &sparx5->tx, FDMA_INJ_CHANNEL);
+ err = sparx5_fdma_rx_alloc(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Could not allocate RX buffers: %d\n", err);
+ return err;
+ }
+ err = sparx5_fdma_tx_alloc(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Could not allocate TX buffers: %d\n", err);
+ return err;
+ }
+ return err;
+}
+
+static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5)
+{
+ return spx5_rd(sparx5, FDMA_PORT_CTRL(0));
+}
+
+int sparx5_fdma_stop(struct sparx5 *sparx5)
+{
+ u32 val;
+
+ napi_disable(&sparx5->rx.napi);
+ /* Stop the fdma and channel interrupts */
+ sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx);
+ sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx);
+ /* Wait for the RX channel to stop */
+ read_poll_timeout(sparx5_fdma_port_ctrl, val,
+ FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
+ 500, 10000, 0, sparx5);
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
new file mode 100644
index 0000000000..4af285918e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
+#include <linux/iopoll.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+/* Commands for Mac Table Command register */
+#define MAC_CMD_LEARN 0 /* Insert (Learn) 1 entry */
+#define MAC_CMD_UNLEARN 1 /* Unlearn (Forget) 1 entry */
+#define MAC_CMD_LOOKUP 2 /* Look up 1 entry */
+#define MAC_CMD_READ 3 /* Read entry at Mac Table Index */
+#define MAC_CMD_WRITE 4 /* Write entry at Mac Table Index */
+#define MAC_CMD_SCAN 5 /* Scan (Age or find next) */
+#define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */
+#define MAC_CMD_CLEAR_ALL 7 /* Delete all entries in table */
+
+/* Commands for MAC_ENTRY_ADDR_TYPE */
+#define MAC_ENTRY_ADDR_TYPE_UPSID_PN 0
+#define MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1
+#define MAC_ENTRY_ADDR_TYPE_GLAG 2
+#define MAC_ENTRY_ADDR_TYPE_MC_IDX 3
+
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
+struct sparx5_mact_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN];
+ u32 flags;
+#define MAC_ENT_ALIVE BIT(0)
+#define MAC_ENT_MOVED BIT(1)
+#define MAC_ENT_LOCK BIT(2)
+ u16 vid;
+ u16 port;
+};
+
+static int sparx5_mact_get_status(struct sparx5 *sparx5)
+{
+ return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL);
+}
+
+static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5)
+{
+ u32 val;
+
+ return readx_poll_timeout(sparx5_mact_get_status,
+ sparx5, val,
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void sparx5_mact_select(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN],
+ u16 vid)
+{
+ u32 macl = 0, mach = 0;
+
+ /* Set the MAC address to handle and the vlan associated in a format
+ * understood by the hardware.
+ */
+ mach |= vid << 16;
+ mach |= mac[0] << 8;
+ mach |= mac[1] << 0;
+ macl |= mac[2] << 24;
+ macl |= mac[3] << 16;
+ macl |= mac[4] << 8;
+ macl |= mac[5] << 0;
+
+ spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0);
+ spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1);
+}
+
+int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
+ const unsigned char mac[ETH_ALEN], u16 vid)
+{
+ int addr, type, ret;
+
+ if (pgid < SPX5_PORTS) {
+ type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
+ addr = pgid % 32;
+ addr += (pgid / 32) << 5; /* Add upsid */
+ } else {
+ type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
+ addr = pgid - SPX5_PORTS;
+ }
+
+ mutex_lock(&sparx5->lock);
+
+ sparx5_mact_select(sparx5, mac, vid);
+
+ /* MAC entry properties */
+ spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) |
+ LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) |
+ LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) |
+ LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1),
+ sparx5, LRN_MAC_ACCESS_CFG_2);
+ spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3);
+
+ /* Insert/learn new entry */
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+
+ ret = sparx5_mact_wait_for_completion(sparx5);
+
+ mutex_unlock(&sparx5->lock);
+
+ return ret;
+}
+
+int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ return sparx5_mact_forget(sparx5, addr, port->pvid);
+}
+
+int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid);
+}
+
+static int sparx5_mact_get(struct sparx5 *sparx5,
+ unsigned char mac[ETH_ALEN],
+ u16 *vid, u32 *pcfg2)
+{
+ u32 mach, macl, cfg2;
+ int ret = -ENOENT;
+
+ cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
+ if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) {
+ mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0);
+ macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1);
+ mac[0] = ((mach >> 8) & 0xff);
+ mac[1] = ((mach >> 0) & 0xff);
+ mac[2] = ((macl >> 24) & 0xff);
+ mac[3] = ((macl >> 16) & 0xff);
+ mac[4] = ((macl >> 8) & 0xff);
+ mac[5] = ((macl >> 0) & 0xff);
+ *vid = mach >> 16;
+ *pcfg2 = cfg2;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+bool sparx5_mact_getnext(struct sparx5 *sparx5,
+ unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2)
+{
+ u32 cfg2;
+ int ret;
+
+ mutex_lock(&sparx5->lock);
+
+ sparx5_mact_select(sparx5, mac, *vid);
+
+ spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) |
+ LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
+ sparx5, LRN_SCAN_NEXT_CFG);
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
+ (MAC_CMD_FIND_SMALLEST) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+
+ ret = sparx5_mact_wait_for_completion(sparx5);
+ if (ret == 0) {
+ ret = sparx5_mact_get(sparx5, mac, vid, &cfg2);
+ if (ret == 0)
+ *pcfg2 = cfg2;
+ }
+
+ mutex_unlock(&sparx5->lock);
+
+ return ret == 0;
+}
+
+int sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
+{
+ int ret;
+ u32 cfg2;
+
+ mutex_lock(&sparx5->lock);
+
+ sparx5_mact_select(sparx5, mac, vid);
+
+ /* Issue a lookup command */
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+
+ ret = sparx5_mact_wait_for_completion(sparx5);
+ if (ret == 0) {
+ cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
+ if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2))
+ *pcfg2 = cfg2;
+ else
+ ret = -ENOENT;
+ }
+
+ mutex_unlock(&sparx5->lock);
+
+ return ret;
+}
+
+int sparx5_mact_forget(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid)
+{
+ int ret;
+
+ mutex_lock(&sparx5->lock);
+
+ sparx5_mact_select(sparx5, mac, vid);
+
+ /* Issue an unlearn command */
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+
+ ret = sparx5_mact_wait_for_completion(sparx5);
+
+ mutex_unlock(&sparx5->lock);
+
+ return ret;
+}
+
+static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5,
+ const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct sparx5_mact_entry *mact_entry;
+
+ mact_entry = devm_kzalloc(sparx5->dev,
+ sizeof(*mact_entry), GFP_ATOMIC);
+ if (!mact_entry)
+ return NULL;
+
+ memcpy(mact_entry->mac, mac, ETH_ALEN);
+ mact_entry->vid = vid;
+ mact_entry->port = port_index;
+ return mact_entry;
+}
+
+static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5,
+ const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct sparx5_mact_entry *mact_entry;
+ struct sparx5_mact_entry *res = NULL;
+
+ mutex_lock(&sparx5->mact_lock);
+ list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
+ if (mact_entry->vid == vid &&
+ ether_addr_equal(mac, mact_entry->mac) &&
+ mact_entry->port == port_index) {
+ res = mact_entry;
+ break;
+ }
+ }
+ mutex_unlock(&sparx5->mact_lock);
+
+ return res;
+}
+
+static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev, bool offloaded)
+{
+ struct switchdev_notifier_fdb_info info = {};
+
+ info.addr = mac;
+ info.vid = vid;
+ info.offloaded = offloaded;
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+int sparx5_add_mact_entry(struct sparx5 *sparx5,
+ struct net_device *dev,
+ u16 portno,
+ const unsigned char *addr, u16 vid)
+{
+ struct sparx5_mact_entry *mact_entry;
+ int ret;
+ u32 cfg2;
+
+ ret = sparx5_mact_find(sparx5, addr, vid, &cfg2);
+ if (!ret)
+ return 0;
+
+ /* In case the entry already exists, don't add it again to SW,
+ * just update HW, but we need to look in the actual HW because
+ * it is possible for an entry to be learn by HW and before the
+ * mact thread to start the frame will reach CPU and the CPU will
+ * add the entry but without the extern_learn flag.
+ */
+ mact_entry = find_mact_entry(sparx5, addr, vid, portno);
+ if (mact_entry)
+ goto update_hw;
+
+ /* Add the entry in SW MAC table not to get the notification when
+ * SW is pulling again
+ */
+ mact_entry = alloc_mact_entry(sparx5, addr, vid, portno);
+ if (!mact_entry)
+ return -ENOMEM;
+
+ mutex_lock(&sparx5->mact_lock);
+ list_add_tail(&mact_entry->list, &sparx5->mact_entries);
+ mutex_unlock(&sparx5->mact_lock);
+
+update_hw:
+ ret = sparx5_mact_learn(sparx5, portno, addr, vid);
+
+ /* New entry? */
+ if (mact_entry->flags == 0) {
+ mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
+ sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
+ dev, true);
+ }
+
+ return ret;
+}
+
+int sparx5_del_mact_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct sparx5_mact_entry *mact_entry, *tmp;
+
+ /* Delete the entry in SW MAC table not to get the notification when
+ * SW is pulling again
+ */
+ mutex_lock(&sparx5->mact_lock);
+ list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
+ list) {
+ if ((vid == 0 || mact_entry->vid == vid) &&
+ ether_addr_equal(addr, mact_entry->mac)) {
+ list_del(&mact_entry->list);
+ devm_kfree(sparx5->dev, mact_entry);
+
+ sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+ }
+ }
+ mutex_unlock(&sparx5->mact_lock);
+
+ return 0;
+}
+
+static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
+ unsigned char mac[ETH_ALEN],
+ u16 vid, u32 cfg2)
+{
+ struct sparx5_mact_entry *mact_entry;
+ bool found = false;
+ u16 port;
+
+ if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) !=
+ MAC_ENTRY_ADDR_TYPE_UPSID_PN)
+ return;
+
+ port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
+ if (port >= SPX5_PORTS)
+ return;
+
+ if (!test_bit(port, sparx5->bridge_mask))
+ return;
+
+ mutex_lock(&sparx5->mact_lock);
+ list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
+ if (mact_entry->vid == vid &&
+ ether_addr_equal(mac, mact_entry->mac)) {
+ found = true;
+ mact_entry->flags |= MAC_ENT_ALIVE;
+ if (mact_entry->port != port) {
+ dev_warn(sparx5->dev, "Entry move: %d -> %d\n",
+ mact_entry->port, port);
+ mact_entry->port = port;
+ mact_entry->flags |= MAC_ENT_MOVED;
+ }
+ /* Entry handled */
+ break;
+ }
+ }
+ mutex_unlock(&sparx5->mact_lock);
+
+ if (found && !(mact_entry->flags & MAC_ENT_MOVED))
+ /* Present, not moved */
+ return;
+
+ if (!found) {
+ /* Entry not found - now add */
+ mact_entry = alloc_mact_entry(sparx5, mac, vid, port);
+ if (!mact_entry)
+ return;
+
+ mact_entry->flags |= MAC_ENT_ALIVE;
+ mutex_lock(&sparx5->mact_lock);
+ list_add_tail(&mact_entry->list, &sparx5->mact_entries);
+ mutex_unlock(&sparx5->mact_lock);
+ }
+
+ /* New or moved entry - notify bridge */
+ sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ mac, vid, sparx5->ports[port]->ndev,
+ true);
+}
+
+void sparx5_mact_pull_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct sparx5 *sparx5 = container_of(del_work, struct sparx5,
+ mact_work);
+ struct sparx5_mact_entry *mact_entry, *tmp;
+ unsigned char mac[ETH_ALEN];
+ u32 cfg2;
+ u16 vid;
+ int ret;
+
+ /* Reset MAC entry flags */
+ mutex_lock(&sparx5->mact_lock);
+ list_for_each_entry(mact_entry, &sparx5->mact_entries, list)
+ mact_entry->flags &= MAC_ENT_LOCK;
+ mutex_unlock(&sparx5->mact_lock);
+
+ /* MAIN mac address processing loop */
+ vid = 0;
+ memset(mac, 0, sizeof(mac));
+ do {
+ mutex_lock(&sparx5->lock);
+ sparx5_mact_select(sparx5, mac, vid);
+ spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
+ sparx5, LRN_SCAN_NEXT_CFG);
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
+ (MAC_CMD_FIND_SMALLEST) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+ ret = sparx5_mact_wait_for_completion(sparx5);
+ if (ret == 0)
+ ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2);
+ mutex_unlock(&sparx5->lock);
+ if (ret == 0)
+ sparx5_mact_handle_entry(sparx5, mac, vid, cfg2);
+ } while (ret == 0);
+
+ mutex_lock(&sparx5->mact_lock);
+ list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
+ list) {
+ /* If the entry is in HW or permanent, then skip */
+ if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK))
+ continue;
+
+ sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mact_entry->mac, mact_entry->vid,
+ sparx5->ports[mact_entry->port]->ndev,
+ true);
+
+ list_del(&mact_entry->list);
+ devm_kfree(sparx5->dev, mact_entry);
+ }
+ mutex_unlock(&sparx5->mact_lock);
+
+ queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
+ SPX5_MACT_PULL_DELAY);
+}
+
+void sparx5_set_ageing(struct sparx5 *sparx5, int msecs)
+{
+ int value = max(1, msecs / 10); /* unit 10 ms */
+
+ spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */
+ LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */
+ LRN_AUTOAGE_CFG_UNIT_SIZE |
+ LRN_AUTOAGE_CFG_PERIOD_VAL,
+ sparx5,
+ LRN_AUTOAGE_CFG(0));
+}
+
+void sparx5_mact_init(struct sparx5 *sparx5)
+{
+ mutex_init(&sparx5->lock);
+
+ /* Flush MAC table */
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+
+ if (sparx5_mact_wait_for_completion(sparx5) != 0)
+ dev_warn(sparx5->dev, "MAC flush error\n");
+
+ sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
new file mode 100644
index 0000000000..dc9af480bf
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -0,0 +1,957 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <net/switchdev.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/printk.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/reset.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+#include "sparx5_qos.h"
+
+#define QLIM_WM(fraction) \
+ ((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
+#define IO_RANGES 3
+
+struct initial_port_config {
+ u32 portno;
+ struct device_node *node;
+ struct sparx5_port_config conf;
+ struct phy *serdes;
+};
+
+struct sparx5_ram_config {
+ void __iomem *init_reg;
+ u32 init_val;
+};
+
+struct sparx5_main_io_resource {
+ enum sparx5_target id;
+ phys_addr_t offset;
+ int range;
+};
+
+static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
+ { TARGET_CPU, 0, 0 }, /* 0x600000000 */
+ { TARGET_FDMA, 0x80000, 0 }, /* 0x600080000 */
+ { TARGET_PCEP, 0x400000, 0 }, /* 0x600400000 */
+ { TARGET_DEV2G5, 0x10004000, 1 }, /* 0x610004000 */
+ { TARGET_DEV5G, 0x10008000, 1 }, /* 0x610008000 */
+ { TARGET_PCS5G_BR, 0x1000c000, 1 }, /* 0x61000c000 */
+ { TARGET_DEV2G5 + 1, 0x10010000, 1 }, /* 0x610010000 */
+ { TARGET_DEV5G + 1, 0x10014000, 1 }, /* 0x610014000 */
+ { TARGET_PCS5G_BR + 1, 0x10018000, 1 }, /* 0x610018000 */
+ { TARGET_DEV2G5 + 2, 0x1001c000, 1 }, /* 0x61001c000 */
+ { TARGET_DEV5G + 2, 0x10020000, 1 }, /* 0x610020000 */
+ { TARGET_PCS5G_BR + 2, 0x10024000, 1 }, /* 0x610024000 */
+ { TARGET_DEV2G5 + 6, 0x10028000, 1 }, /* 0x610028000 */
+ { TARGET_DEV5G + 6, 0x1002c000, 1 }, /* 0x61002c000 */
+ { TARGET_PCS5G_BR + 6, 0x10030000, 1 }, /* 0x610030000 */
+ { TARGET_DEV2G5 + 7, 0x10034000, 1 }, /* 0x610034000 */
+ { TARGET_DEV5G + 7, 0x10038000, 1 }, /* 0x610038000 */
+ { TARGET_PCS5G_BR + 7, 0x1003c000, 1 }, /* 0x61003c000 */
+ { TARGET_DEV2G5 + 8, 0x10040000, 1 }, /* 0x610040000 */
+ { TARGET_DEV5G + 8, 0x10044000, 1 }, /* 0x610044000 */
+ { TARGET_PCS5G_BR + 8, 0x10048000, 1 }, /* 0x610048000 */
+ { TARGET_DEV2G5 + 9, 0x1004c000, 1 }, /* 0x61004c000 */
+ { TARGET_DEV5G + 9, 0x10050000, 1 }, /* 0x610050000 */
+ { TARGET_PCS5G_BR + 9, 0x10054000, 1 }, /* 0x610054000 */
+ { TARGET_DEV2G5 + 10, 0x10058000, 1 }, /* 0x610058000 */
+ { TARGET_DEV5G + 10, 0x1005c000, 1 }, /* 0x61005c000 */
+ { TARGET_PCS5G_BR + 10, 0x10060000, 1 }, /* 0x610060000 */
+ { TARGET_DEV2G5 + 11, 0x10064000, 1 }, /* 0x610064000 */
+ { TARGET_DEV5G + 11, 0x10068000, 1 }, /* 0x610068000 */
+ { TARGET_PCS5G_BR + 11, 0x1006c000, 1 }, /* 0x61006c000 */
+ { TARGET_DEV2G5 + 12, 0x10070000, 1 }, /* 0x610070000 */
+ { TARGET_DEV10G, 0x10074000, 1 }, /* 0x610074000 */
+ { TARGET_PCS10G_BR, 0x10078000, 1 }, /* 0x610078000 */
+ { TARGET_DEV2G5 + 14, 0x1007c000, 1 }, /* 0x61007c000 */
+ { TARGET_DEV10G + 2, 0x10080000, 1 }, /* 0x610080000 */
+ { TARGET_PCS10G_BR + 2, 0x10084000, 1 }, /* 0x610084000 */
+ { TARGET_DEV2G5 + 15, 0x10088000, 1 }, /* 0x610088000 */
+ { TARGET_DEV10G + 3, 0x1008c000, 1 }, /* 0x61008c000 */
+ { TARGET_PCS10G_BR + 3, 0x10090000, 1 }, /* 0x610090000 */
+ { TARGET_DEV2G5 + 16, 0x10094000, 1 }, /* 0x610094000 */
+ { TARGET_DEV2G5 + 17, 0x10098000, 1 }, /* 0x610098000 */
+ { TARGET_DEV2G5 + 18, 0x1009c000, 1 }, /* 0x61009c000 */
+ { TARGET_DEV2G5 + 19, 0x100a0000, 1 }, /* 0x6100a0000 */
+ { TARGET_DEV2G5 + 20, 0x100a4000, 1 }, /* 0x6100a4000 */
+ { TARGET_DEV2G5 + 21, 0x100a8000, 1 }, /* 0x6100a8000 */
+ { TARGET_DEV2G5 + 22, 0x100ac000, 1 }, /* 0x6100ac000 */
+ { TARGET_DEV2G5 + 23, 0x100b0000, 1 }, /* 0x6100b0000 */
+ { TARGET_DEV2G5 + 32, 0x100b4000, 1 }, /* 0x6100b4000 */
+ { TARGET_DEV2G5 + 33, 0x100b8000, 1 }, /* 0x6100b8000 */
+ { TARGET_DEV2G5 + 34, 0x100bc000, 1 }, /* 0x6100bc000 */
+ { TARGET_DEV2G5 + 35, 0x100c0000, 1 }, /* 0x6100c0000 */
+ { TARGET_DEV2G5 + 36, 0x100c4000, 1 }, /* 0x6100c4000 */
+ { TARGET_DEV2G5 + 37, 0x100c8000, 1 }, /* 0x6100c8000 */
+ { TARGET_DEV2G5 + 38, 0x100cc000, 1 }, /* 0x6100cc000 */
+ { TARGET_DEV2G5 + 39, 0x100d0000, 1 }, /* 0x6100d0000 */
+ { TARGET_DEV2G5 + 40, 0x100d4000, 1 }, /* 0x6100d4000 */
+ { TARGET_DEV2G5 + 41, 0x100d8000, 1 }, /* 0x6100d8000 */
+ { TARGET_DEV2G5 + 42, 0x100dc000, 1 }, /* 0x6100dc000 */
+ { TARGET_DEV2G5 + 43, 0x100e0000, 1 }, /* 0x6100e0000 */
+ { TARGET_DEV2G5 + 44, 0x100e4000, 1 }, /* 0x6100e4000 */
+ { TARGET_DEV2G5 + 45, 0x100e8000, 1 }, /* 0x6100e8000 */
+ { TARGET_DEV2G5 + 46, 0x100ec000, 1 }, /* 0x6100ec000 */
+ { TARGET_DEV2G5 + 47, 0x100f0000, 1 }, /* 0x6100f0000 */
+ { TARGET_DEV2G5 + 57, 0x100f4000, 1 }, /* 0x6100f4000 */
+ { TARGET_DEV25G + 1, 0x100f8000, 1 }, /* 0x6100f8000 */
+ { TARGET_PCS25G_BR + 1, 0x100fc000, 1 }, /* 0x6100fc000 */
+ { TARGET_DEV2G5 + 59, 0x10104000, 1 }, /* 0x610104000 */
+ { TARGET_DEV25G + 3, 0x10108000, 1 }, /* 0x610108000 */
+ { TARGET_PCS25G_BR + 3, 0x1010c000, 1 }, /* 0x61010c000 */
+ { TARGET_DEV2G5 + 60, 0x10114000, 1 }, /* 0x610114000 */
+ { TARGET_DEV25G + 4, 0x10118000, 1 }, /* 0x610118000 */
+ { TARGET_PCS25G_BR + 4, 0x1011c000, 1 }, /* 0x61011c000 */
+ { TARGET_DEV2G5 + 64, 0x10124000, 1 }, /* 0x610124000 */
+ { TARGET_DEV5G + 12, 0x10128000, 1 }, /* 0x610128000 */
+ { TARGET_PCS5G_BR + 12, 0x1012c000, 1 }, /* 0x61012c000 */
+ { TARGET_PORT_CONF, 0x10130000, 1 }, /* 0x610130000 */
+ { TARGET_DEV2G5 + 3, 0x10404000, 1 }, /* 0x610404000 */
+ { TARGET_DEV5G + 3, 0x10408000, 1 }, /* 0x610408000 */
+ { TARGET_PCS5G_BR + 3, 0x1040c000, 1 }, /* 0x61040c000 */
+ { TARGET_DEV2G5 + 4, 0x10410000, 1 }, /* 0x610410000 */
+ { TARGET_DEV5G + 4, 0x10414000, 1 }, /* 0x610414000 */
+ { TARGET_PCS5G_BR + 4, 0x10418000, 1 }, /* 0x610418000 */
+ { TARGET_DEV2G5 + 5, 0x1041c000, 1 }, /* 0x61041c000 */
+ { TARGET_DEV5G + 5, 0x10420000, 1 }, /* 0x610420000 */
+ { TARGET_PCS5G_BR + 5, 0x10424000, 1 }, /* 0x610424000 */
+ { TARGET_DEV2G5 + 13, 0x10428000, 1 }, /* 0x610428000 */
+ { TARGET_DEV10G + 1, 0x1042c000, 1 }, /* 0x61042c000 */
+ { TARGET_PCS10G_BR + 1, 0x10430000, 1 }, /* 0x610430000 */
+ { TARGET_DEV2G5 + 24, 0x10434000, 1 }, /* 0x610434000 */
+ { TARGET_DEV2G5 + 25, 0x10438000, 1 }, /* 0x610438000 */
+ { TARGET_DEV2G5 + 26, 0x1043c000, 1 }, /* 0x61043c000 */
+ { TARGET_DEV2G5 + 27, 0x10440000, 1 }, /* 0x610440000 */
+ { TARGET_DEV2G5 + 28, 0x10444000, 1 }, /* 0x610444000 */
+ { TARGET_DEV2G5 + 29, 0x10448000, 1 }, /* 0x610448000 */
+ { TARGET_DEV2G5 + 30, 0x1044c000, 1 }, /* 0x61044c000 */
+ { TARGET_DEV2G5 + 31, 0x10450000, 1 }, /* 0x610450000 */
+ { TARGET_DEV2G5 + 48, 0x10454000, 1 }, /* 0x610454000 */
+ { TARGET_DEV10G + 4, 0x10458000, 1 }, /* 0x610458000 */
+ { TARGET_PCS10G_BR + 4, 0x1045c000, 1 }, /* 0x61045c000 */
+ { TARGET_DEV2G5 + 49, 0x10460000, 1 }, /* 0x610460000 */
+ { TARGET_DEV10G + 5, 0x10464000, 1 }, /* 0x610464000 */
+ { TARGET_PCS10G_BR + 5, 0x10468000, 1 }, /* 0x610468000 */
+ { TARGET_DEV2G5 + 50, 0x1046c000, 1 }, /* 0x61046c000 */
+ { TARGET_DEV10G + 6, 0x10470000, 1 }, /* 0x610470000 */
+ { TARGET_PCS10G_BR + 6, 0x10474000, 1 }, /* 0x610474000 */
+ { TARGET_DEV2G5 + 51, 0x10478000, 1 }, /* 0x610478000 */
+ { TARGET_DEV10G + 7, 0x1047c000, 1 }, /* 0x61047c000 */
+ { TARGET_PCS10G_BR + 7, 0x10480000, 1 }, /* 0x610480000 */
+ { TARGET_DEV2G5 + 52, 0x10484000, 1 }, /* 0x610484000 */
+ { TARGET_DEV10G + 8, 0x10488000, 1 }, /* 0x610488000 */
+ { TARGET_PCS10G_BR + 8, 0x1048c000, 1 }, /* 0x61048c000 */
+ { TARGET_DEV2G5 + 53, 0x10490000, 1 }, /* 0x610490000 */
+ { TARGET_DEV10G + 9, 0x10494000, 1 }, /* 0x610494000 */
+ { TARGET_PCS10G_BR + 9, 0x10498000, 1 }, /* 0x610498000 */
+ { TARGET_DEV2G5 + 54, 0x1049c000, 1 }, /* 0x61049c000 */
+ { TARGET_DEV10G + 10, 0x104a0000, 1 }, /* 0x6104a0000 */
+ { TARGET_PCS10G_BR + 10, 0x104a4000, 1 }, /* 0x6104a4000 */
+ { TARGET_DEV2G5 + 55, 0x104a8000, 1 }, /* 0x6104a8000 */
+ { TARGET_DEV10G + 11, 0x104ac000, 1 }, /* 0x6104ac000 */
+ { TARGET_PCS10G_BR + 11, 0x104b0000, 1 }, /* 0x6104b0000 */
+ { TARGET_DEV2G5 + 56, 0x104b4000, 1 }, /* 0x6104b4000 */
+ { TARGET_DEV25G, 0x104b8000, 1 }, /* 0x6104b8000 */
+ { TARGET_PCS25G_BR, 0x104bc000, 1 }, /* 0x6104bc000 */
+ { TARGET_DEV2G5 + 58, 0x104c4000, 1 }, /* 0x6104c4000 */
+ { TARGET_DEV25G + 2, 0x104c8000, 1 }, /* 0x6104c8000 */
+ { TARGET_PCS25G_BR + 2, 0x104cc000, 1 }, /* 0x6104cc000 */
+ { TARGET_DEV2G5 + 61, 0x104d4000, 1 }, /* 0x6104d4000 */
+ { TARGET_DEV25G + 5, 0x104d8000, 1 }, /* 0x6104d8000 */
+ { TARGET_PCS25G_BR + 5, 0x104dc000, 1 }, /* 0x6104dc000 */
+ { TARGET_DEV2G5 + 62, 0x104e4000, 1 }, /* 0x6104e4000 */
+ { TARGET_DEV25G + 6, 0x104e8000, 1 }, /* 0x6104e8000 */
+ { TARGET_PCS25G_BR + 6, 0x104ec000, 1 }, /* 0x6104ec000 */
+ { TARGET_DEV2G5 + 63, 0x104f4000, 1 }, /* 0x6104f4000 */
+ { TARGET_DEV25G + 7, 0x104f8000, 1 }, /* 0x6104f8000 */
+ { TARGET_PCS25G_BR + 7, 0x104fc000, 1 }, /* 0x6104fc000 */
+ { TARGET_DSM, 0x10504000, 1 }, /* 0x610504000 */
+ { TARGET_ASM, 0x10600000, 1 }, /* 0x610600000 */
+ { TARGET_GCB, 0x11010000, 2 }, /* 0x611010000 */
+ { TARGET_QS, 0x11030000, 2 }, /* 0x611030000 */
+ { TARGET_PTP, 0x11040000, 2 }, /* 0x611040000 */
+ { TARGET_ANA_ACL, 0x11050000, 2 }, /* 0x611050000 */
+ { TARGET_LRN, 0x11060000, 2 }, /* 0x611060000 */
+ { TARGET_VCAP_SUPER, 0x11080000, 2 }, /* 0x611080000 */
+ { TARGET_QSYS, 0x110a0000, 2 }, /* 0x6110a0000 */
+ { TARGET_QFWD, 0x110b0000, 2 }, /* 0x6110b0000 */
+ { TARGET_XQS, 0x110c0000, 2 }, /* 0x6110c0000 */
+ { TARGET_VCAP_ES2, 0x110d0000, 2 }, /* 0x6110d0000 */
+ { TARGET_VCAP_ES0, 0x110e0000, 2 }, /* 0x6110e0000 */
+ { TARGET_CLKGEN, 0x11100000, 2 }, /* 0x611100000 */
+ { TARGET_ANA_AC_POL, 0x11200000, 2 }, /* 0x611200000 */
+ { TARGET_QRES, 0x11280000, 2 }, /* 0x611280000 */
+ { TARGET_EACL, 0x112c0000, 2 }, /* 0x6112c0000 */
+ { TARGET_ANA_CL, 0x11400000, 2 }, /* 0x611400000 */
+ { TARGET_ANA_L3, 0x11480000, 2 }, /* 0x611480000 */
+ { TARGET_ANA_AC_SDLB, 0x11500000, 2 }, /* 0x611500000 */
+ { TARGET_HSCH, 0x11580000, 2 }, /* 0x611580000 */
+ { TARGET_REW, 0x11600000, 2 }, /* 0x611600000 */
+ { TARGET_ANA_L2, 0x11800000, 2 }, /* 0x611800000 */
+ { TARGET_ANA_AC, 0x11900000, 2 }, /* 0x611900000 */
+ { TARGET_VOP, 0x11a00000, 2 }, /* 0x611a00000 */
+};
+
+static int sparx5_create_targets(struct sparx5 *sparx5)
+{
+ struct resource *iores[IO_RANGES];
+ void __iomem *iomem[IO_RANGES];
+ void __iomem *begin[IO_RANGES];
+ int range_id[IO_RANGES];
+ int idx, jdx;
+
+ for (idx = 0, jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
+ const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+
+ if (idx == iomap->range) {
+ range_id[idx] = jdx;
+ idx++;
+ }
+ }
+ for (idx = 0; idx < IO_RANGES; idx++) {
+ iores[idx] = platform_get_resource(sparx5->pdev, IORESOURCE_MEM,
+ idx);
+ if (!iores[idx]) {
+ dev_err(sparx5->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
+ iomem[idx] = devm_ioremap(sparx5->dev,
+ iores[idx]->start,
+ resource_size(iores[idx]));
+ if (!iomem[idx]) {
+ dev_err(sparx5->dev, "Unable to get switch registers: %s\n",
+ iores[idx]->name);
+ return -ENOMEM;
+ }
+ begin[idx] = iomem[idx] - sparx5_main_iomap[range_id[idx]].offset;
+ }
+ for (jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
+ const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+
+ sparx5->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+ }
+ return 0;
+}
+
+static int sparx5_create_port(struct sparx5 *sparx5,
+ struct initial_port_config *config)
+{
+ struct sparx5_port *spx5_port;
+ struct net_device *ndev;
+ struct phylink *phylink;
+ int err;
+
+ ndev = sparx5_create_netdev(sparx5, config->portno);
+ if (IS_ERR(ndev)) {
+ dev_err(sparx5->dev, "Could not create net device: %02u\n",
+ config->portno);
+ return PTR_ERR(ndev);
+ }
+ spx5_port = netdev_priv(ndev);
+ spx5_port->of_node = config->node;
+ spx5_port->serdes = config->serdes;
+ spx5_port->pvid = NULL_VID;
+ spx5_port->signd_internal = true;
+ spx5_port->signd_active_high = true;
+ spx5_port->signd_enable = true;
+ spx5_port->max_vlan_tags = SPX5_PORT_MAX_TAGS_NONE;
+ spx5_port->vlan_type = SPX5_VLAN_PORT_TYPE_UNAWARE;
+ spx5_port->custom_etype = 0x8880; /* Vitesse */
+ spx5_port->phylink_pcs.poll = true;
+ spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
+ spx5_port->phylink_pcs.neg_mode = true;
+ spx5_port->is_mrouter = false;
+ INIT_LIST_HEAD(&spx5_port->tc_templates);
+ sparx5->ports[config->portno] = spx5_port;
+
+ err = sparx5_port_init(sparx5, spx5_port, &config->conf);
+ if (err) {
+ dev_err(sparx5->dev, "port init failed\n");
+ return err;
+ }
+ spx5_port->conf = config->conf;
+
+ /* Setup VLAN */
+ sparx5_vlan_port_setup(sparx5, spx5_port->portno);
+
+ /* Create a phylink for PHY management. Also handles SFPs */
+ spx5_port->phylink_config.dev = &spx5_port->ndev->dev;
+ spx5_port->phylink_config.type = PHYLINK_NETDEV;
+ spx5_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_5000 ||
+ spx5_port->conf.bandwidth == SPEED_10000 ||
+ spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_10000 ||
+ spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_25GBASER,
+ spx5_port->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&spx5_port->phylink_config,
+ of_fwnode_handle(config->node),
+ config->conf.phy_mode,
+ &sparx5_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ spx5_port->phylink = phylink;
+
+ return 0;
+}
+
+static int sparx5_init_ram(struct sparx5 *s5)
+{
+ const struct sparx5_ram_config spx5_ram_cfg[] = {
+ {spx5_reg_get(s5, ANA_AC_STAT_RESET), ANA_AC_STAT_RESET_RESET},
+ {spx5_reg_get(s5, ASM_STAT_CFG), ASM_STAT_CFG_STAT_CNT_CLR_SHOT},
+ {spx5_reg_get(s5, QSYS_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, REW_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, VOP_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, ANA_AC_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, ASM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, EACL_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, VCAP_SUPER_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, DSM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}
+ };
+ const struct sparx5_ram_config *cfg;
+ u32 value, pending, jdx, idx;
+
+ for (jdx = 0; jdx < 10; jdx++) {
+ pending = ARRAY_SIZE(spx5_ram_cfg);
+ for (idx = 0; idx < ARRAY_SIZE(spx5_ram_cfg); idx++) {
+ cfg = &spx5_ram_cfg[idx];
+ if (jdx == 0) {
+ writel(cfg->init_val, cfg->init_reg);
+ } else {
+ value = readl(cfg->init_reg);
+ if ((value & cfg->init_val) != cfg->init_val)
+ pending--;
+ }
+ }
+ if (!pending)
+ break;
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ }
+
+ if (pending > 0) {
+ /* Still initializing, should be complete in
+ * less than 1ms
+ */
+ dev_err(s5->dev, "Memory initialization error\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sparx5_init_switchcore(struct sparx5 *sparx5)
+{
+ u32 value;
+ int err = 0;
+
+ spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(1),
+ EACL_POL_EACL_CFG_EACL_FORCE_INIT,
+ sparx5,
+ EACL_POL_EACL_CFG);
+
+ spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(0),
+ EACL_POL_EACL_CFG_EACL_FORCE_INIT,
+ sparx5,
+ EACL_POL_EACL_CFG);
+
+ /* Initialize memories, if not done already */
+ value = spx5_rd(sparx5, HSCH_RESET_CFG);
+ if (!(value & HSCH_RESET_CFG_CORE_ENA)) {
+ err = sparx5_init_ram(sparx5);
+ if (err)
+ return err;
+ }
+
+ /* Reset counters */
+ spx5_wr(ANA_AC_STAT_RESET_RESET_SET(1), sparx5, ANA_AC_STAT_RESET);
+ spx5_wr(ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(1), sparx5, ASM_STAT_CFG);
+
+ /* Enable switch-core and queue system */
+ spx5_wr(HSCH_RESET_CFG_CORE_ENA_SET(1), sparx5, HSCH_RESET_CFG);
+
+ return 0;
+}
+
+static int sparx5_init_coreclock(struct sparx5 *sparx5)
+{
+ enum sparx5_core_clockfreq freq = sparx5->coreclock;
+ u32 clk_div, clk_period, pol_upd_int, idx;
+
+ /* Verify if core clock frequency is supported on target.
+ * If 'VTSS_CORE_CLOCK_DEFAULT' then the highest supported
+ * freq. is used
+ */
+ switch (sparx5->target_ct) {
+ case SPX5_TARGET_CT_7546:
+ if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+ freq = SPX5_CORE_CLOCK_250MHZ;
+ else if (sparx5->coreclock != SPX5_CORE_CLOCK_250MHZ)
+ freq = 0; /* Not supported */
+ break;
+ case SPX5_TARGET_CT_7549:
+ case SPX5_TARGET_CT_7552:
+ case SPX5_TARGET_CT_7556:
+ if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+ freq = SPX5_CORE_CLOCK_500MHZ;
+ else if (sparx5->coreclock != SPX5_CORE_CLOCK_500MHZ)
+ freq = 0; /* Not supported */
+ break;
+ case SPX5_TARGET_CT_7558:
+ case SPX5_TARGET_CT_7558TSN:
+ if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+ freq = SPX5_CORE_CLOCK_625MHZ;
+ else if (sparx5->coreclock != SPX5_CORE_CLOCK_625MHZ)
+ freq = 0; /* Not supported */
+ break;
+ case SPX5_TARGET_CT_7546TSN:
+ if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+ freq = SPX5_CORE_CLOCK_625MHZ;
+ break;
+ case SPX5_TARGET_CT_7549TSN:
+ case SPX5_TARGET_CT_7552TSN:
+ case SPX5_TARGET_CT_7556TSN:
+ if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+ freq = SPX5_CORE_CLOCK_625MHZ;
+ else if (sparx5->coreclock == SPX5_CORE_CLOCK_250MHZ)
+ freq = 0; /* Not supported */
+ break;
+ default:
+ dev_err(sparx5->dev, "Target (%#04x) not supported\n",
+ sparx5->target_ct);
+ return -ENODEV;
+ }
+
+ switch (freq) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ clk_div = 10;
+ pol_upd_int = 312;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ clk_div = 5;
+ pol_upd_int = 624;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ clk_div = 4;
+ pol_upd_int = 780;
+ break;
+ default:
+ dev_err(sparx5->dev, "%d coreclock not supported on (%#04x)\n",
+ sparx5->coreclock, sparx5->target_ct);
+ return -EINVAL;
+ }
+
+ /* Update state with chosen frequency */
+ sparx5->coreclock = freq;
+
+ /* Configure the LCPLL */
+ spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1),
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA,
+ sparx5,
+ CLKGEN_LCPLL1_CORE_CLK_CFG);
+
+ clk_period = sparx5_clk_period(freq);
+
+ spx5_rmw(HSCH_SYS_CLK_PER_100PS_SET(clk_period / 100),
+ HSCH_SYS_CLK_PER_100PS,
+ sparx5,
+ HSCH_SYS_CLK_PER);
+
+ spx5_rmw(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100),
+ ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS,
+ sparx5,
+ ANA_AC_POL_BDLB_DLB_CTRL);
+
+ spx5_rmw(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100),
+ ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS,
+ sparx5,
+ ANA_AC_POL_SLB_DLB_CTRL);
+
+ spx5_rmw(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(clk_period / 100),
+ LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS,
+ sparx5,
+ LRN_AUTOAGE_CFG_1);
+
+ for (idx = 0; idx < 3; idx++)
+ spx5_rmw(GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(clk_period / 100),
+ GCB_SIO_CLOCK_SYS_CLK_PERIOD,
+ sparx5,
+ GCB_SIO_CLOCK(idx));
+
+ spx5_rmw(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET
+ ((256 * 1000) / clk_period),
+ HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY,
+ sparx5,
+ HSCH_TAS_STATEMACHINE_CFG);
+
+ spx5_rmw(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(pol_upd_int),
+ ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT,
+ sparx5,
+ ANA_AC_POL_POL_UPD_INT_CFG);
+
+ return 0;
+}
+
+static int sparx5_qlim_set(struct sparx5 *sparx5)
+{
+ u32 res, dp, prio;
+
+ for (res = 0; res < 2; res++) {
+ for (prio = 0; prio < 8; prio++)
+ spx5_wr(0xFFF, sparx5,
+ QRES_RES_CFG(prio + 630 + res * 1024));
+
+ for (dp = 0; dp < 4; dp++)
+ spx5_wr(0xFFF, sparx5,
+ QRES_RES_CFG(dp + 638 + res * 1024));
+ }
+
+ /* Set 80,90,95,100% of memory size for top watermarks */
+ spx5_wr(QLIM_WM(80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0));
+ spx5_wr(QLIM_WM(90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0));
+ spx5_wr(QLIM_WM(95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0));
+ spx5_wr(QLIM_WM(100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0));
+
+ return 0;
+}
+
+/* Some boards needs to map the SGPIO for signal detect explicitly to the
+ * port module
+ */
+static void sparx5_board_init(struct sparx5 *sparx5)
+{
+ int idx;
+
+ if (!sparx5->sd_sgpio_remapping)
+ return;
+
+ /* Enable SGPIO Signal Detect remapping */
+ spx5_rmw(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL,
+ GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL,
+ sparx5,
+ GCB_HW_SGPIO_SD_CFG);
+
+ /* Refer to LOS SGPIO */
+ for (idx = 0; idx < SPX5_PORTS; idx++)
+ if (sparx5->ports[idx])
+ if (sparx5->ports[idx]->conf.sd_sgpio != ~0)
+ spx5_wr(sparx5->ports[idx]->conf.sd_sgpio,
+ sparx5,
+ GCB_HW_SGPIO_TO_SD_MAP_CFG(idx));
+}
+
+static int sparx5_start(struct sparx5 *sparx5)
+{
+ u8 broadcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ char queue_name[32];
+ u32 idx;
+ int err;
+
+ /* Setup own UPSIDs */
+ for (idx = 0; idx < 3; idx++) {
+ spx5_wr(idx, sparx5, ANA_AC_OWN_UPSID(idx));
+ spx5_wr(idx, sparx5, ANA_CL_OWN_UPSID(idx));
+ spx5_wr(idx, sparx5, ANA_L2_OWN_UPSID(idx));
+ spx5_wr(idx, sparx5, REW_OWN_UPSID(idx));
+ }
+
+ /* Enable CPU ports */
+ for (idx = SPX5_PORTS; idx < SPX5_PORTS_ALL; idx++)
+ spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1),
+ QFWD_SWITCH_PORT_MODE_PORT_ENA,
+ sparx5,
+ QFWD_SWITCH_PORT_MODE(idx));
+
+ /* Init masks */
+ sparx5_update_fwd(sparx5);
+
+ /* CPU copy CPU pgids */
+ spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+ sparx5, ANA_AC_PGID_MISC_CFG(PGID_CPU));
+ spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+ sparx5, ANA_AC_PGID_MISC_CFG(PGID_BCAST));
+
+ /* Recalc injected frame FCS */
+ for (idx = SPX5_PORT_CPU_0; idx <= SPX5_PORT_CPU_1; idx++)
+ spx5_rmw(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(1),
+ ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA,
+ sparx5, ANA_CL_FILTER_CTRL(idx));
+
+ /* Init MAC table, ageing */
+ sparx5_mact_init(sparx5);
+
+ /* Init PGID table arbitrator */
+ sparx5_pgid_init(sparx5);
+
+ /* Setup VLANs */
+ sparx5_vlan_init(sparx5);
+
+ /* Add host mode BC address (points only to CPU) */
+ sparx5_mact_learn(sparx5, PGID_CPU, broadcast, NULL_VID);
+
+ /* Enable queue limitation watermarks */
+ sparx5_qlim_set(sparx5);
+
+ err = sparx5_config_auto_calendar(sparx5);
+ if (err)
+ return err;
+
+ err = sparx5_config_dsm_calendar(sparx5);
+ if (err)
+ return err;
+
+ /* Init stats */
+ err = sparx_stats_init(sparx5);
+ if (err)
+ return err;
+
+ /* Init mact_sw struct */
+ mutex_init(&sparx5->mact_lock);
+ INIT_LIST_HEAD(&sparx5->mact_entries);
+ snprintf(queue_name, sizeof(queue_name), "%s-mact",
+ dev_name(sparx5->dev));
+ sparx5->mact_queue = create_singlethread_workqueue(queue_name);
+ if (!sparx5->mact_queue)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&sparx5->mact_work, sparx5_mact_pull_work);
+ queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
+ SPX5_MACT_PULL_DELAY);
+
+ mutex_init(&sparx5->mdb_lock);
+ INIT_LIST_HEAD(&sparx5->mdb_entries);
+
+ err = sparx5_register_netdevs(sparx5);
+ if (err)
+ return err;
+
+ sparx5_board_init(sparx5);
+ err = sparx5_register_notifier_blocks(sparx5);
+ if (err)
+ return err;
+
+ err = sparx5_vcap_init(sparx5);
+ if (err) {
+ sparx5_unregister_notifier_blocks(sparx5);
+ return err;
+ }
+
+ /* Start Frame DMA with fallback to register based INJ/XTR */
+ err = -ENXIO;
+ if (sparx5->fdma_irq >= 0) {
+ if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
+ err = devm_request_threaded_irq(sparx5->dev,
+ sparx5->fdma_irq,
+ NULL,
+ sparx5_fdma_handler,
+ IRQF_ONESHOT,
+ "sparx5-fdma", sparx5);
+ if (!err)
+ err = sparx5_fdma_start(sparx5);
+ if (err)
+ sparx5->fdma_irq = -ENXIO;
+ } else {
+ sparx5->fdma_irq = -ENXIO;
+ }
+ if (err && sparx5->xtr_irq >= 0) {
+ err = devm_request_irq(sparx5->dev, sparx5->xtr_irq,
+ sparx5_xtr_handler, IRQF_SHARED,
+ "sparx5-xtr", sparx5);
+ if (!err)
+ err = sparx5_manual_injection_mode(sparx5);
+ if (err)
+ sparx5->xtr_irq = -ENXIO;
+ } else {
+ sparx5->xtr_irq = -ENXIO;
+ }
+
+ if (sparx5->ptp_irq >= 0) {
+ err = devm_request_threaded_irq(sparx5->dev, sparx5->ptp_irq,
+ NULL, sparx5_ptp_irq_handler,
+ IRQF_ONESHOT, "sparx5-ptp",
+ sparx5);
+ if (err)
+ sparx5->ptp_irq = -ENXIO;
+
+ sparx5->ptp = 1;
+ }
+
+ return err;
+}
+
+static void sparx5_cleanup_ports(struct sparx5 *sparx5)
+{
+ sparx5_unregister_netdevs(sparx5);
+ sparx5_destroy_netdevs(sparx5);
+}
+
+static int mchp_sparx5_probe(struct platform_device *pdev)
+{
+ struct initial_port_config *configs, *config;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *ports, *portnp;
+ struct reset_control *reset;
+ struct sparx5 *sparx5;
+ int idx = 0, err = 0;
+
+ if (!np && !pdev->dev.platform_data)
+ return -ENODEV;
+
+ sparx5 = devm_kzalloc(&pdev->dev, sizeof(*sparx5), GFP_KERNEL);
+ if (!sparx5)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sparx5);
+ sparx5->pdev = pdev;
+ sparx5->dev = &pdev->dev;
+
+ /* Do switch core reset if available */
+ reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
+ if (IS_ERR(reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(reset),
+ "Failed to get switch reset controller.\n");
+ reset_control_reset(reset);
+
+ /* Default values, some from DT */
+ sparx5->coreclock = SPX5_CORE_CLOCK_DEFAULT;
+
+ sparx5->debugfs_root = debugfs_create_dir("sparx5", NULL);
+
+ ports = of_get_child_by_name(np, "ethernet-ports");
+ if (!ports) {
+ dev_err(sparx5->dev, "no ethernet-ports child node found\n");
+ return -ENODEV;
+ }
+ sparx5->port_count = of_get_child_count(ports);
+
+ configs = kcalloc(sparx5->port_count,
+ sizeof(struct initial_port_config), GFP_KERNEL);
+ if (!configs) {
+ err = -ENOMEM;
+ goto cleanup_pnode;
+ }
+
+ for_each_available_child_of_node(ports, portnp) {
+ struct sparx5_port_config *conf;
+ struct phy *serdes;
+ u32 portno;
+
+ err = of_property_read_u32(portnp, "reg", &portno);
+ if (err) {
+ dev_err(sparx5->dev, "port reg property error\n");
+ continue;
+ }
+ config = &configs[idx];
+ conf = &config->conf;
+ conf->speed = SPEED_UNKNOWN;
+ conf->bandwidth = SPEED_UNKNOWN;
+ err = of_get_phy_mode(portnp, &conf->phy_mode);
+ if (err) {
+ dev_err(sparx5->dev, "port %u: missing phy-mode\n",
+ portno);
+ continue;
+ }
+ err = of_property_read_u32(portnp, "microchip,bandwidth",
+ &conf->bandwidth);
+ if (err) {
+ dev_err(sparx5->dev, "port %u: missing bandwidth\n",
+ portno);
+ continue;
+ }
+ err = of_property_read_u32(portnp, "microchip,sd-sgpio", &conf->sd_sgpio);
+ if (err)
+ conf->sd_sgpio = ~0;
+ else
+ sparx5->sd_sgpio_remapping = true;
+ serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
+ "port %u: missing serdes\n",
+ portno);
+ of_node_put(portnp);
+ goto cleanup_config;
+ }
+ config->portno = portno;
+ config->node = portnp;
+ config->serdes = serdes;
+
+ conf->media = PHY_MEDIA_DAC;
+ conf->serdes_reset = true;
+ conf->portmode = conf->phy_mode;
+ conf->power_down = true;
+ idx++;
+ }
+
+ err = sparx5_create_targets(sparx5);
+ if (err)
+ goto cleanup_config;
+
+ if (of_get_mac_address(np, sparx5->base_mac)) {
+ dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n");
+ eth_random_addr(sparx5->base_mac);
+ sparx5->base_mac[5] = 0;
+ }
+
+ sparx5->fdma_irq = platform_get_irq_byname(sparx5->pdev, "fdma");
+ sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr");
+ sparx5->ptp_irq = platform_get_irq_byname(sparx5->pdev, "ptp");
+
+ /* Read chip ID to check CPU interface */
+ sparx5->chip_id = spx5_rd(sparx5, GCB_CHIP_ID);
+
+ sparx5->target_ct = (enum spx5_target_chiptype)
+ GCB_CHIP_ID_PART_ID_GET(sparx5->chip_id);
+
+ /* Initialize Switchcore and internal RAMs */
+ err = sparx5_init_switchcore(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Switchcore initialization error\n");
+ goto cleanup_config;
+ }
+
+ /* Initialize the LC-PLL (core clock) and set affected registers */
+ err = sparx5_init_coreclock(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "LC-PLL initialization error\n");
+ goto cleanup_config;
+ }
+
+ for (idx = 0; idx < sparx5->port_count; ++idx) {
+ config = &configs[idx];
+ if (!config->node)
+ continue;
+
+ err = sparx5_create_port(sparx5, config);
+ if (err) {
+ dev_err(sparx5->dev, "port create error\n");
+ goto cleanup_ports;
+ }
+ }
+
+ err = sparx5_start(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Start failed\n");
+ goto cleanup_ports;
+ }
+
+ err = sparx5_qos_init(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to initialize QoS\n");
+ goto cleanup_ports;
+ }
+
+ err = sparx5_ptp_init(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "PTP failed\n");
+ goto cleanup_ports;
+ }
+ goto cleanup_config;
+
+cleanup_ports:
+ sparx5_cleanup_ports(sparx5);
+ if (sparx5->mact_queue)
+ destroy_workqueue(sparx5->mact_queue);
+cleanup_config:
+ kfree(configs);
+cleanup_pnode:
+ of_node_put(ports);
+ return err;
+}
+
+static int mchp_sparx5_remove(struct platform_device *pdev)
+{
+ struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(sparx5->debugfs_root);
+ if (sparx5->xtr_irq) {
+ disable_irq(sparx5->xtr_irq);
+ sparx5->xtr_irq = -ENXIO;
+ }
+ if (sparx5->fdma_irq) {
+ disable_irq(sparx5->fdma_irq);
+ sparx5->fdma_irq = -ENXIO;
+ }
+ sparx5_ptp_deinit(sparx5);
+ sparx5_fdma_stop(sparx5);
+ sparx5_cleanup_ports(sparx5);
+ sparx5_vcap_destroy(sparx5);
+ /* Unregister netdevs */
+ sparx5_unregister_notifier_blocks(sparx5);
+ destroy_workqueue(sparx5->mact_queue);
+
+ return 0;
+}
+
+static const struct of_device_id mchp_sparx5_match[] = {
+ { .compatible = "microchip,sparx5-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mchp_sparx5_match);
+
+static struct platform_driver mchp_sparx5_driver = {
+ .probe = mchp_sparx5_probe,
+ .remove = mchp_sparx5_remove,
+ .driver = {
+ .name = "sparx5-switch",
+ .of_match_table = mchp_sparx5_match,
+ },
+};
+
+module_platform_driver(mchp_sparx5_driver);
+
+MODULE_DESCRIPTION("Microchip Sparx5 switch driver");
+MODULE_AUTHOR("Steen Hegelund <steen.hegelund@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
new file mode 100644
index 0000000000..6f565c0c0c
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -0,0 +1,693 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_MAIN_H__
+#define __SPARX5_MAIN_H__
+
+#include <linux/types.h>
+#include <linux/phy/phy.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitmap.h>
+#include <linux/phylink.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/hrtimer.h>
+#include <linux/debugfs.h>
+
+#include "sparx5_main_regs.h"
+
+/* Target chip type */
+enum spx5_target_chiptype {
+ SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */
+ SPX5_TARGET_CT_7549 = 0x7549, /* SparX-5-90 Enterprise */
+ SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */
+ SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */
+ SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */
+ SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */
+ SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */
+ SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */
+ SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */
+ SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */
+};
+
+enum sparx5_port_max_tags {
+ SPX5_PORT_MAX_TAGS_NONE, /* No extra tags allowed */
+ SPX5_PORT_MAX_TAGS_ONE, /* Single tag allowed */
+ SPX5_PORT_MAX_TAGS_TWO /* Single and double tag allowed */
+};
+
+enum sparx5_vlan_port_type {
+ SPX5_VLAN_PORT_TYPE_UNAWARE, /* VLAN unaware port */
+ SPX5_VLAN_PORT_TYPE_C, /* C-port */
+ SPX5_VLAN_PORT_TYPE_S, /* S-port */
+ SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */
+};
+
+#define SPX5_PORTS 65
+#define SPX5_PORT_CPU (SPX5_PORTS) /* Next port is CPU port */
+#define SPX5_PORT_CPU_0 (SPX5_PORT_CPU + 0) /* CPU Port 65 */
+#define SPX5_PORT_CPU_1 (SPX5_PORT_CPU + 1) /* CPU Port 66 */
+#define SPX5_PORT_VD0 (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */
+#define SPX5_PORT_VD1 (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */
+#define SPX5_PORT_VD2 (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/
+#define SPX5_PORTS_ALL (SPX5_PORT_CPU + 5) /* Total number of ports */
+
+#define PGID_BASE SPX5_PORTS /* Starts after port PGIDs */
+#define PGID_UC_FLOOD (PGID_BASE + 0)
+#define PGID_MC_FLOOD (PGID_BASE + 1)
+#define PGID_IPV4_MC_DATA (PGID_BASE + 2)
+#define PGID_IPV4_MC_CTRL (PGID_BASE + 3)
+#define PGID_IPV6_MC_DATA (PGID_BASE + 4)
+#define PGID_IPV6_MC_CTRL (PGID_BASE + 5)
+#define PGID_BCAST (PGID_BASE + 6)
+#define PGID_CPU (PGID_BASE + 7)
+#define PGID_MCAST_START (PGID_BASE + 8)
+
+#define PGID_TABLE_SIZE 3290
+
+#define IFH_LEN 9 /* 36 bytes */
+#define NULL_VID 0
+#define SPX5_MACT_PULL_DELAY (2 * HZ)
+#define SPX5_STATS_CHECK_DELAY (1 * HZ)
+#define SPX5_PRIOS 8 /* Number of priority queues */
+#define SPX5_BUFFER_CELL_SZ 184 /* Cell size */
+#define SPX5_BUFFER_MEMORY 4194280 /* 22795 words * 184 bytes */
+
+#define XTR_QUEUE 0
+#define INJ_QUEUE 0
+
+#define FDMA_DCB_MAX 64
+#define FDMA_RX_DCB_MAX_DBS 15
+#define FDMA_TX_DCB_MAX_DBS 1
+
+#define SPARX5_PHC_COUNT 3
+#define SPARX5_PHC_PORT 0
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_ONE_STEP_PTP 0x3
+#define IFH_REW_OP_TWO_STEP_PTP 0x4
+
+#define IFH_PDU_TYPE_NONE 0x0
+#define IFH_PDU_TYPE_PTP 0x5
+#define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6
+#define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7
+
+struct sparx5;
+
+struct sparx5_db_hw {
+ u64 dataptr;
+ u64 status;
+};
+
+struct sparx5_rx_dcb_hw {
+ u64 nextptr;
+ u64 info;
+ struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS];
+};
+
+struct sparx5_tx_dcb_hw {
+ u64 nextptr;
+ u64 info;
+ struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS];
+};
+
+/* Frame DMA receive state:
+ * For each DB, there is a SKB, and the skb data pointer is mapped in
+ * the DB. Once a frame is received the skb is given to the upper layers
+ * and a new skb is added to the dcb.
+ * When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused.
+ */
+struct sparx5_rx {
+ struct sparx5_rx_dcb_hw *dcb_entries;
+ struct sparx5_rx_dcb_hw *last_entry;
+ struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ int db_index;
+ int dcb_index;
+ dma_addr_t dma;
+ struct napi_struct napi;
+ u32 channel_id;
+ struct net_device *ndev;
+ u64 packets;
+};
+
+/* Frame DMA transmit state:
+ * DCBs are chained using the DCBs nextptr field.
+ */
+struct sparx5_tx {
+ struct sparx5_tx_dcb_hw *curr_entry;
+ struct sparx5_tx_dcb_hw *first_entry;
+ struct list_head db_list;
+ dma_addr_t dma;
+ u32 channel_id;
+ u64 packets;
+ u64 dropped;
+};
+
+struct sparx5_port_config {
+ phy_interface_t portmode;
+ u32 bandwidth;
+ int speed;
+ int duplex;
+ enum phy_media media;
+ bool inband;
+ bool power_down;
+ bool autoneg;
+ bool serdes_reset;
+ u32 pause;
+ u32 pause_adv;
+ phy_interface_t phy_mode;
+ u32 sd_sgpio;
+};
+
+struct sparx5_port {
+ struct net_device *ndev;
+ struct sparx5 *sparx5;
+ struct device_node *of_node;
+ struct phy *serdes;
+ struct sparx5_port_config conf;
+ struct phylink_config phylink_config;
+ struct phylink *phylink;
+ struct phylink_pcs phylink_pcs;
+ u16 portno;
+ /* Ingress default VLAN (pvid) */
+ u16 pvid;
+ /* Egress default VLAN (vid) */
+ u16 vid;
+ bool signd_internal;
+ bool signd_active_high;
+ bool signd_enable;
+ bool flow_control;
+ enum sparx5_port_max_tags max_vlan_tags;
+ enum sparx5_vlan_port_type vlan_type;
+ u32 custom_etype;
+ bool vlan_aware;
+ struct hrtimer inj_timer;
+ /* ptp */
+ u8 ptp_cmd;
+ u16 ts_id;
+ struct sk_buff_head tx_skbs;
+ bool is_mrouter;
+ struct list_head tc_templates; /* list of TC templates on this port */
+};
+
+enum sparx5_core_clockfreq {
+ SPX5_CORE_CLOCK_DEFAULT, /* Defaults to the highest supported frequency */
+ SPX5_CORE_CLOCK_250MHZ, /* 250MHZ core clock frequency */
+ SPX5_CORE_CLOCK_500MHZ, /* 500MHZ core clock frequency */
+ SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */
+};
+
+struct sparx5_phc {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ struct kernel_hwtstamp_config hwtstamp_config;
+ struct sparx5 *sparx5;
+ u8 index;
+};
+
+struct sparx5_skb_cb {
+ u8 rew_op;
+ u8 pdu_type;
+ u8 pdu_w16_offset;
+ u16 ts_id;
+ unsigned long jiffies;
+};
+
+struct sparx5_mdb_entry {
+ struct list_head list;
+ DECLARE_BITMAP(port_mask, SPX5_PORTS);
+ unsigned char addr[ETH_ALEN];
+ bool cpu_copy;
+ u16 vid;
+ u16 pgid_idx;
+};
+
+#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10)
+#define SPARX5_SKB_CB(skb) \
+ ((struct sparx5_skb_cb *)((skb)->cb))
+
+struct sparx5 {
+ struct platform_device *pdev;
+ struct device *dev;
+ u32 chip_id;
+ enum spx5_target_chiptype target_ct;
+ void __iomem *regs[NUM_TARGETS];
+ int port_count;
+ struct mutex lock; /* MAC reg lock */
+ /* port structures are in net device */
+ struct sparx5_port *ports[SPX5_PORTS];
+ enum sparx5_core_clockfreq coreclock;
+ /* Statistics */
+ u32 num_stats;
+ u32 num_ethtool_stats;
+ const char * const *stats_layout;
+ u64 *stats;
+ /* Workqueue for reading stats */
+ struct mutex queue_stats_lock;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+ /* Notifiers */
+ struct notifier_block netdevice_nb;
+ struct notifier_block switchdev_nb;
+ struct notifier_block switchdev_blocking_nb;
+ /* Switch state */
+ u8 base_mac[ETH_ALEN];
+ /* Associated bridge device (when bridged) */
+ struct net_device *hw_bridge_dev;
+ /* Bridged interfaces */
+ DECLARE_BITMAP(bridge_mask, SPX5_PORTS);
+ DECLARE_BITMAP(bridge_fwd_mask, SPX5_PORTS);
+ DECLARE_BITMAP(bridge_lrn_mask, SPX5_PORTS);
+ DECLARE_BITMAP(vlan_mask[VLAN_N_VID], SPX5_PORTS);
+ /* SW MAC table */
+ struct list_head mact_entries;
+ /* mac table list (mact_entries) mutex */
+ struct mutex mact_lock;
+ /* SW MDB table */
+ struct list_head mdb_entries;
+ /* mdb list mutex */
+ struct mutex mdb_lock;
+ struct delayed_work mact_work;
+ struct workqueue_struct *mact_queue;
+ /* Board specifics */
+ bool sd_sgpio_remapping;
+ /* Register based inj/xtr */
+ int xtr_irq;
+ /* Frame DMA */
+ int fdma_irq;
+ struct sparx5_rx rx;
+ struct sparx5_tx tx;
+ /* PTP */
+ bool ptp;
+ struct sparx5_phc phc[SPARX5_PHC_COUNT];
+ spinlock_t ptp_clock_lock; /* lock for phc */
+ spinlock_t ptp_ts_id_lock; /* lock for ts_id */
+ struct mutex ptp_lock; /* lock for ptp interface state */
+ u16 ptp_skbs;
+ int ptp_irq;
+ /* VCAP */
+ struct vcap_control *vcap_ctrl;
+ /* PGID allocation map */
+ u8 pgid_map[PGID_TABLE_SIZE];
+ /* Common root for debugfs */
+ struct dentry *debugfs_root;
+};
+
+/* sparx5_switchdev.c */
+int sparx5_register_notifier_blocks(struct sparx5 *sparx5);
+void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
+
+/* sparx5_packet.c */
+struct frame_info {
+ int src_port;
+ u32 timestamp;
+};
+
+void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
+void sparx5_ifh_parse(u32 *ifh, struct frame_info *info);
+irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
+netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
+int sparx5_manual_injection_mode(struct sparx5 *sparx5);
+void sparx5_port_inj_timer_setup(struct sparx5_port *port);
+
+/* sparx5_fdma.c */
+int sparx5_fdma_start(struct sparx5 *sparx5);
+int sparx5_fdma_stop(struct sparx5 *sparx5);
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb);
+irqreturn_t sparx5_fdma_handler(int irq, void *args);
+
+/* sparx5_mactable.c */
+void sparx5_mact_pull_work(struct work_struct *work);
+int sparx5_mact_learn(struct sparx5 *sparx5, int port,
+ const unsigned char mac[ETH_ALEN], u16 vid);
+bool sparx5_mact_getnext(struct sparx5 *sparx5,
+ unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
+int sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2);
+int sparx5_mact_forget(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid);
+int sparx5_add_mact_entry(struct sparx5 *sparx5,
+ struct net_device *dev,
+ u16 portno,
+ const unsigned char *addr, u16 vid);
+int sparx5_del_mact_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid);
+int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr);
+int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr);
+void sparx5_set_ageing(struct sparx5 *sparx5, int msecs);
+void sparx5_mact_init(struct sparx5 *sparx5);
+
+/* sparx5_vlan.c */
+void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable);
+void sparx5_pgid_clear(struct sparx5 *spx5, int pgid);
+void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]);
+void sparx5_update_fwd(struct sparx5 *sparx5);
+void sparx5_vlan_init(struct sparx5 *sparx5);
+void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno);
+int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
+ bool untagged);
+int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid);
+void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port);
+
+/* sparx5_calendar.c */
+int sparx5_config_auto_calendar(struct sparx5 *sparx5);
+int sparx5_config_dsm_calendar(struct sparx5 *sparx5);
+
+/* sparx5_ethtool.c */
+void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);
+int sparx_stats_init(struct sparx5 *sparx5);
+
+/* sparx5_dcb.c */
+#ifdef CONFIG_SPARX5_DCB
+int sparx5_dcb_init(struct sparx5 *sparx5);
+#else
+static inline int sparx5_dcb_init(struct sparx5 *sparx5)
+{
+ return 0;
+}
+#endif
+
+/* sparx5_netdev.c */
+void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp);
+void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op);
+void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type);
+void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset);
+void sparx5_set_port_ifh(void *ifh_hdr, u16 portno);
+bool sparx5_netdevice_check(const struct net_device *dev);
+struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno);
+int sparx5_register_netdevs(struct sparx5 *sparx5);
+void sparx5_destroy_netdevs(struct sparx5 *sparx5);
+void sparx5_unregister_netdevs(struct sparx5 *sparx5);
+
+/* sparx5_ptp.c */
+int sparx5_ptp_init(struct sparx5 *sparx5);
+void sparx5_ptp_deinit(struct sparx5 *sparx5);
+int sparx5_ptp_hwtstamp_set(struct sparx5_port *port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+void sparx5_ptp_hwtstamp_get(struct sparx5_port *port,
+ struct kernel_hwtstamp_config *cfg);
+void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
+ u64 timestamp);
+int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
+ struct sk_buff *skb);
+void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
+ struct sk_buff *skb);
+irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
+int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+
+/* sparx5_vcap_impl.c */
+int sparx5_vcap_init(struct sparx5 *sparx5);
+void sparx5_vcap_destroy(struct sparx5 *sparx5);
+
+/* sparx5_pgid.c */
+enum sparx5_pgid_type {
+ SPX5_PGID_FREE,
+ SPX5_PGID_RESERVED,
+ SPX5_PGID_MULTICAST,
+};
+
+void sparx5_pgid_init(struct sparx5 *spx5);
+int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
+int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
+
+/* sparx5_pool.c */
+struct sparx5_pool_entry {
+ u16 ref_cnt;
+ u32 idx; /* tc index */
+};
+
+u32 sparx5_pool_idx_to_id(u32 idx);
+int sparx5_pool_put(struct sparx5_pool_entry *pool, int size, u32 id);
+int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id);
+int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx,
+ u32 *id);
+
+/* sparx5_sdlb.c */
+#define SPX5_SDLB_PUP_TOKEN_DISABLE 0x1FFF
+#define SPX5_SDLB_PUP_TOKEN_MAX (SPX5_SDLB_PUP_TOKEN_DISABLE - 1)
+#define SPX5_SDLB_GROUP_RATE_MAX 25000000000ULL
+#define SPX5_SDLB_2CYCLES_TYPE2_THRES_OFFSET 13
+#define SPX5_SDLB_CNT 4096
+#define SPX5_SDLB_GROUP_CNT 10
+#define SPX5_CLK_PER_100PS_DEFAULT 16
+
+struct sparx5_sdlb_group {
+ u64 max_rate;
+ u32 min_burst;
+ u32 frame_size;
+ u32 pup_interval;
+ u32 nsets;
+};
+
+extern struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT];
+int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval,
+ u64 rate);
+
+int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5);
+int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst);
+int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group);
+
+int sparx5_sdlb_group_add(struct sparx5 *sparx5, u32 group, u32 idx);
+int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx);
+
+void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst,
+ u32 frame_size, u32 idx);
+
+/* sparx5_police.c */
+enum {
+ /* More policer types will be added later */
+ SPX5_POL_SERVICE
+};
+
+struct sparx5_policer {
+ u32 type;
+ u32 idx;
+ u64 rate;
+ u32 burst;
+ u32 group;
+ u8 event_mask;
+};
+
+int sparx5_policer_conf_set(struct sparx5 *sparx5, struct sparx5_policer *pol);
+
+/* sparx5_psfp.c */
+#define SPX5_PSFP_GCE_CNT 4
+#define SPX5_PSFP_SG_CNT 1024
+#define SPX5_PSFP_SG_MIN_CYCLE_TIME_NS (1 * NSEC_PER_USEC)
+#define SPX5_PSFP_SG_MAX_CYCLE_TIME_NS ((1 * NSEC_PER_SEC) - 1)
+#define SPX5_PSFP_SG_MAX_IPV (SPX5_PRIOS - 1)
+#define SPX5_PSFP_SG_OPEN (SPX5_PSFP_SG_CNT - 1)
+#define SPX5_PSFP_SG_CYCLE_TIME_DEFAULT 1000000
+#define SPX5_PSFP_SF_MAX_SDU 16383
+
+struct sparx5_psfp_fm {
+ struct sparx5_policer pol;
+};
+
+struct sparx5_psfp_gce {
+ bool gate_state; /* StreamGateState */
+ u32 interval; /* TimeInterval */
+ u32 ipv; /* InternalPriorityValue */
+ u32 maxoctets; /* IntervalOctetMax */
+};
+
+struct sparx5_psfp_sg {
+ bool gate_state; /* PSFPAdminGateStates */
+ bool gate_enabled; /* PSFPGateEnabled */
+ u32 ipv; /* PSFPAdminIPV */
+ struct timespec64 basetime; /* PSFPAdminBaseTime */
+ u32 cycletime; /* PSFPAdminCycleTime */
+ u32 cycletimeext; /* PSFPAdminCycleTimeExtension */
+ u32 num_entries; /* PSFPAdminControlListLength */
+ struct sparx5_psfp_gce gce[SPX5_PSFP_GCE_CNT];
+};
+
+struct sparx5_psfp_sf {
+ bool sblock_osize_ena;
+ bool sblock_osize;
+ u32 max_sdu;
+ u32 sgid; /* Gate id */
+ u32 fmid; /* Flow meter id */
+};
+
+int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_fm *fm, u32 *id);
+int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id);
+
+int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_sg *sg, u32 *id);
+int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id);
+
+int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf,
+ u32 *id);
+int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id);
+
+u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx);
+u32 sparx5_psfp_isdx_get_fm(struct sparx5 *sparx5, u32 isdx);
+u32 sparx5_psfp_sf_get_sg(struct sparx5 *sparx5, u32 sfid);
+void sparx5_isdx_conf_set(struct sparx5 *sparx5, u32 isdx, u32 sfid, u32 fmid);
+
+void sparx5_psfp_init(struct sparx5 *sparx5);
+
+/* sparx5_qos.c */
+void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
+ const ktime_t org_base_time, ktime_t *new_base_time);
+
+/* Clock period in picoseconds */
+static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
+{
+ switch (cclock) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ return 4000;
+ case SPX5_CORE_CLOCK_500MHZ:
+ return 2000;
+ case SPX5_CORE_CLOCK_625MHZ:
+ default:
+ return 1600;
+ }
+}
+
+static inline bool sparx5_is_baser(phy_interface_t interface)
+{
+ return interface == PHY_INTERFACE_MODE_5GBASER ||
+ interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_25GBASER;
+}
+
+extern const struct phylink_mac_ops sparx5_phylink_mac_ops;
+extern const struct phylink_pcs_ops sparx5_phylink_pcs_ops;
+extern const struct ethtool_ops sparx5_ethtool_ops;
+extern const struct dcbnl_rtnl_ops sparx5_dcbnl_ops;
+
+/* Calculate raw offset */
+static inline __pure int spx5_offset(int id, int tinst, int tcnt,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((tinst) >= tcnt);
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+/* Read, Write and modify registers content.
+ * The register definition macros start at the id
+ */
+static inline void __iomem *spx5_addr(void __iomem *base[],
+ int id, int tinst, int tcnt,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((tinst) >= tcnt);
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base[id + (tinst)] +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline void __iomem *spx5_inst_addr(void __iomem *base,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline u32 spx5_rd(struct sparx5 *sparx5, int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline u32 spx5_inst_rd(void __iomem *iomem, int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return readl(spx5_inst_addr(iomem, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_wr(u32 val, struct sparx5 *sparx5,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ writel(val, spx5_addr(sparx5->regs, id, tinst, tcnt,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_inst_wr(u32 val, void __iomem *iomem,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ writel(val, spx5_inst_addr(iomem,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_rmw(u32 val, u32 mask, struct sparx5 *sparx5,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+
+ nval = readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+
+ nval = readl(spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr,
+ rinst, rcnt, rwidth));
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr,
+ rinst, rcnt, rwidth));
+}
+
+static inline void __iomem *spx5_inst_get(struct sparx5 *sparx5, int id, int tinst)
+{
+ return sparx5->regs[id + tinst];
+}
+
+static inline void __iomem *spx5_reg_get(struct sparx5 *sparx5,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return spx5_addr(sparx5->regs, id, tinst, tcnt,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth);
+}
+
+#endif /* __SPARX5_MAIN_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
new file mode 100644
index 0000000000..bd03a0a3c1
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -0,0 +1,7350 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ * Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2023-02-10 11:18:53 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
+ */
+
+#ifndef _SPARX5_MAIN_REGS_H_
+#define _SPARX5_MAIN_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum sparx5_target {
+ TARGET_ANA_AC = 1,
+ TARGET_ANA_ACL = 2,
+ TARGET_ANA_AC_POL = 4,
+ TARGET_ANA_AC_SDLB = 5,
+ TARGET_ANA_CL = 6,
+ TARGET_ANA_L2 = 7,
+ TARGET_ANA_L3 = 8,
+ TARGET_ASM = 9,
+ TARGET_CLKGEN = 11,
+ TARGET_CPU = 12,
+ TARGET_DEV10G = 17,
+ TARGET_DEV25G = 29,
+ TARGET_DEV2G5 = 37,
+ TARGET_DEV5G = 102,
+ TARGET_DSM = 115,
+ TARGET_EACL = 116,
+ TARGET_FDMA = 117,
+ TARGET_GCB = 118,
+ TARGET_HSCH = 119,
+ TARGET_LRN = 122,
+ TARGET_PCEP = 129,
+ TARGET_PCS10G_BR = 132,
+ TARGET_PCS25G_BR = 144,
+ TARGET_PCS5G_BR = 160,
+ TARGET_PORT_CONF = 173,
+ TARGET_PTP = 174,
+ TARGET_QFWD = 175,
+ TARGET_QRES = 176,
+ TARGET_QS = 177,
+ TARGET_QSYS = 178,
+ TARGET_REW = 179,
+ TARGET_VCAP_ES0 = 323,
+ TARGET_VCAP_ES2 = 324,
+ TARGET_VCAP_SUPER = 326,
+ TARGET_VOP = 327,
+ TARGET_XQS = 331,
+ NUM_TARGETS = 332
+};
+
+#define __REG(...) __VA_ARGS__
+
+/* ANA_AC:RAM_CTRL:RAM_INIT */
+#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC,\
+ 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4)
+
+#define ANA_AC_RAM_INIT_RAM_INIT BIT(1)
+#define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(ANA_AC_RAM_INIT_RAM_INIT, x)
+#define ANA_AC_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(ANA_AC_RAM_INIT_RAM_INIT, x)
+
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* ANA_AC:PS_COMMON:OWN_UPSID */
+#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 894472, 0, 1, 352, 52, r, 3, 4)
+
+#define ANA_AC_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
+#define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\
+ FIELD_PREP(ANA_AC_OWN_UPSID_OWN_UPSID, x)
+#define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\
+ FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x)
+
+/* ANA_AC:SRC:SRC_CFG */
+#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
+
+/* ANA_AC:SRC:SRC_CFG1 */
+#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 849920, g, 102, 16, 4, 0, 1, 4)
+
+/* ANA_AC:SRC:SRC_CFG2 */
+#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 849920, g, 102, 16, 8, 0, 1, 4)
+
+#define ANA_AC_SRC_CFG2_PORT_MASK2 BIT(0)
+#define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\
+ FIELD_PREP(ANA_AC_SRC_CFG2_PORT_MASK2, x)
+#define ANA_AC_SRC_CFG2_PORT_MASK2_GET(x)\
+ FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x)
+
+/* ANA_AC:PGID:PGID_CFG */
+#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4)
+
+/* ANA_AC:PGID:PGID_CFG1 */
+#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4)
+
+/* ANA_AC:PGID:PGID_CFG2 */
+#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4)
+
+#define ANA_AC_PGID_CFG2_PORT_MASK2 BIT(0)
+#define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\
+ FIELD_PREP(ANA_AC_PGID_CFG2_PORT_MASK2, x)
+#define ANA_AC_PGID_CFG2_PORT_MASK2_GET(x)\
+ FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x)
+
+/* ANA_AC:PGID:PGID_MISC_CFG */
+#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4)
+
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU GENMASK(6, 4)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\
+ FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_GET(x)\
+ FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x)
+
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA BIT(1)
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x)
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x)
+
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA BIT(0)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+
+/* ANA_AC:TSN_SF:TSN_SF */
+#define ANA_AC_TSN_SF __REG(TARGET_ANA_AC,\
+ 0, 1, 839136, 0, 1, 4, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY BIT(9)
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x)
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x)
+
+#define ANA_AC_TSN_SF_PORT_NUM GENMASK(8, 0)
+#define ANA_AC_TSN_SF_PORT_NUM_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_PORT_NUM, x)
+#define ANA_AC_TSN_SF_PORT_NUM_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_PORT_NUM, x)
+
+/* ANA_AC:TSN_SF_CFG:TSN_SF_CFG */
+#define ANA_AC_TSN_SF_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 839680, g, 1024, 4, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_CFG_TSN_SGID GENMASK(25, 16)
+#define ANA_AC_TSN_SF_CFG_TSN_SGID_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+#define ANA_AC_TSN_SF_CFG_TSN_SGID_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU GENMASK(15, 2)
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_MAX_SDU, x)
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_MAX_SDU, x)
+
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA BIT(1)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA, x)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA, x)
+
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE BIT(0)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x)
+
+/* ANA_AC:TSN_SF_STATUS:TSN_SF_STATUS */
+#define ANA_AC_TSN_SF_STATUS __REG(TARGET_ANA_AC,\
+ 0, 1, 839072, 0, 1, 16, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN GENMASK(25, 12)
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_FRM_LEN, x)
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_FRM_LEN, x)
+
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP BIT(11)
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_DLB_DROP, x)
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_DLB_DROP, x)
+
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID GENMASK(10, 1)
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD BIT(0)
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x)
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x)
+
+/* ANA_AC:SG_ACCESS:SG_ACCESS_CTRL */
+#define ANA_AC_SG_ACCESS_CTRL __REG(TARGET_ANA_AC,\
+ 0, 1, 839140, 0, 1, 12, 0, 0, 1, 4)
+
+#define ANA_AC_SG_ACCESS_CTRL_SGID GENMASK(9, 0)
+#define ANA_AC_SG_ACCESS_CTRL_SGID_SET(x)\
+ FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+#define ANA_AC_SG_ACCESS_CTRL_SGID_GET(x)\
+ FIELD_GET(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x)
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(x)\
+ FIELD_GET(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x)
+
+/* ANA_AC:SG_ACCESS:SG_CYCLETIME_UPDATE_PERIOD */
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD __REG(TARGET_ANA_AC,\
+ 0, 1, 839140, 0, 1, 12, 8, 0, 1, 4)
+
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS GENMASK(15, 0)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS, x)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_GET(x)\
+ FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS, x)
+
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA BIT(31)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_1 */
+#define ANA_AC_SG_CONFIG_REG_1 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 48, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_2 */
+#define ANA_AC_SG_CONFIG_REG_2 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 52, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_3 */
+#define ANA_AC_SG_CONFIG_REG_3 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 56, 0, 1, 4)
+
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB GENMASK(15, 0)
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB, x)
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH GENMASK(18, 16)
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH, x)
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE, x)
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS GENMASK(24, 21)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INIT_IPS, x)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INIT_IPS, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(25)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE, x)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA BIT(26)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA, x)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX BIT(27)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INVALID_RX, x)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INVALID_RX, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA BIT(28)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA, x)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED BIT(29)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_4 */
+#define ANA_AC_SG_CONFIG_REG_4 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 60, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_5 */
+#define ANA_AC_SG_CONFIG_REG_5 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 64, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_GS_CONFIG */
+#define ANA_AC_SG_GCL_GS_CONFIG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 0, r, 4, 4)
+
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS GENMASK(3, 0)
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_GCL_GS_CONFIG_IPS, x)
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_IPS, x)
+
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x)
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_TI_CONFIG */
+#define ANA_AC_SG_GCL_TI_CONFIG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 16, r, 4, 4)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_OCT_CONFIG */
+#define ANA_AC_SG_GCL_OCT_CONFIG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 32, r, 4, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_1 */
+#define ANA_AC_SG_STATUS_REG_1 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 0, 0, 1, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_2 */
+#define ANA_AC_SG_STATUS_REG_2 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 4, 0, 1, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_3 */
+#define ANA_AC_SG_STATUS_REG_3 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 8, 0, 1, 4)
+
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB GENMASK(15, 0)
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB, x)
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB, x)
+
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE BIT(16)
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_GATE_STATE, x)
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_GATE_STATE, x)
+
+#define ANA_AC_SG_STATUS_REG_3_IPS GENMASK(23, 20)
+#define ANA_AC_SG_STATUS_REG_3_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_IPS, x)
+#define ANA_AC_SG_STATUS_REG_3_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_IPS, x)
+
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING, x)
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING, x)
+
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX GENMASK(27, 25)
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x)
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_4 */
+#define ANA_AC_SG_STATUS_REG_4 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 12, 0, 1, 4)
+
+/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
+
+#define ANA_AC_PORT_SGE_CFG_MASK GENMASK(15, 0)
+#define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_PORT_SGE_CFG_MASK, x)
+#define ANA_AC_PORT_SGE_CFG_MASK_GET(x)\
+ FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x)
+
+/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */
+#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC,\
+ 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4)
+
+#define ANA_AC_STAT_RESET_RESET BIT(0)
+#define ANA_AC_STAT_RESET_RESET_SET(x)\
+ FIELD_PREP(ANA_AC_STAT_RESET_RESET, x)
+#define ANA_AC_STAT_RESET_RESET_GET(x)\
+ FIELD_GET(ANA_AC_STAT_RESET_RESET, x)
+
+/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */
+#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC,\
+ 0, 1, 843776, g, 70, 64, 4, r, 4, 4)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK GENMASK(11, 4)
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_GET(x)\
+ FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE GENMASK(3, 1)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(x)\
+ FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_GET(x)\
+ FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE BIT(0)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(x)\
+ FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_GET(x)\
+ FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
+
+/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
+#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC,\
+ 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:GLOBAL_CNT_FRM_TYPE_CFG */
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 893792, 0, 1, 24, 0, r, 2, 4)
+
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE GENMASK(2, 0)
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_SET(x)\
+ FIELD_PREP(ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE, x)
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_GET(x)\
+ FIELD_GET(ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE, x)
+
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_CFG */
+#define ANA_AC_ACL_STAT_GLOBAL_CFG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 893792, 0, 1, 24, 8, r, 2, 4)
+
+#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE BIT(0)
+#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_SET(x)\
+ FIELD_PREP(ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE, x)
+#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_GET(x)\
+ FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE, x)
+
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 893792, 0, 1, 24, 16, r, 2, 4)
+
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK GENMASK(3, 0)
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK, x)
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_GET(x)\
+ FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK, x)
+
+/* ANA_ACL:COMMON:VCAP_S2_CFG */
+#define ANA_ACL_VCAP_S2_CFG(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 0, r, 70, 4)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA BIT(28)
+#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA GENMASK(27, 26)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA GENMASK(25, 24)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA GENMASK(23, 22)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA GENMASK(21, 20)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA GENMASK(19, 18)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA GENMASK(17, 16)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA GENMASK(15, 14)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA GENMASK(13, 12)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA GENMASK(11, 10)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA GENMASK(9, 8)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA GENMASK(7, 6)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA GENMASK(5, 4)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_ENA GENMASK(3, 0)
+#define ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x)
+
+/* ANA_ACL:COMMON:SWAP_IP_CTRL */
+#define ANA_ACL_SWAP_IP_CTRL __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 412, 0, 1, 4)
+
+#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL GENMASK(23, 18)
+#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_SET(x)\
+ FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL, x)
+#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_GET(x)\
+ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL, x)
+
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL GENMASK(17, 10)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL_SET(x)\
+ FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL, x)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL_GET(x)\
+ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL, x)
+
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL GENMASK(9, 2)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL_SET(x)\
+ FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL, x)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL_GET(x)\
+ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL, x)
+
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA BIT(1)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA, x)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA, x)
+
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA BIT(0)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x)
+
+/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */
+#define ANA_ACL_VCAP_S2_RLEG_STAT(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 424, r, 4, 4)
+
+#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK GENMASK(12, 6)
+#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK, x)
+#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK, x)
+
+#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK GENMASK(5, 0)
+#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x)
+#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x)
+
+/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 440, 0, 1, 4)
+
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN GENMASK(9, 5)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN, x)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN, x)
+
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS BIT(4)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS, x)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS, x)
+
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES GENMASK(3, 0)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x)
+
+/* ANA_ACL:COMMON:OWN_UPSID */
+#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
+
+#define ANA_ACL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
+#define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\
+ FIELD_PREP(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
+#define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\
+ FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
+
+/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */
+#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 34200, g, 134, 16, 0, r, 4, 4)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA BIT(13)
+#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL BIT(12)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL GENMASK(11, 10)
+#define ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL GENMASK(9, 8)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL GENMASK(7, 6)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL GENMASK(5, 3)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL GENMASK(2, 1)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL BIT(0)
+#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x)
+
+/* ANA_ACL:CNT_A:CNT_A */
+#define ANA_ACL_CNT_A(g) __REG(TARGET_ANA_ACL,\
+ 0, 1, 0, g, 4096, 4, 0, 0, 1, 4)
+
+/* ANA_ACL:CNT_B:CNT_B */
+#define ANA_ACL_CNT_B(g) __REG(TARGET_ANA_ACL,\
+ 0, 1, 16384, g, 4096, 4, 0, 0, 1, 4)
+
+/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */
+#define ANA_ACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 36408, 0, 1, 16, 0, r, 4, 4)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY BIT(17)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY BIT(16)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY BIT(15)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY BIT(14)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY BIT(13)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY BIT(12)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY BIT(11)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY BIT(10)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY BIT(9)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY BIT(8)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY BIT(7)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY BIT(6)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY BIT(5)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY BIT(4)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY BIT(3)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY BIT(2)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY BIT(1)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY BIT(0)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+
+/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
+#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL,\
+ 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
+
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT GENMASK(9, 0)
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\
+ FIELD_PREP(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_GET(x)\
+ FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
+
+/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */
+#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\
+ 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
+ FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\
+ FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT GENMASK(18, 4)
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\
+ FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\
+ FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA BIT(1)
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_GET(x)\
+ FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA BIT(0)
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
+ FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
+
+/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */
+#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\
+ 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
+ FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\
+ FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT GENMASK(18, 4)
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\
+ FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\
+ FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA BIT(1)
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_GET(x)\
+ FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA BIT(0)
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
+ FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:XLB_START */
+#define ANA_AC_SDLB_XLB_START(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 0, 0, 1, 4)
+
+#define ANA_AC_SDLB_XLB_START_LBSET_START GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_START_LBSET_START_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+#define ANA_AC_SDLB_XLB_START_LBSET_START_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_INTERVAL */
+#define ANA_AC_SDLB_PUP_INTERVAL(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 4, 0, 1, 4)
+
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL GENMASK(19, 0)
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x)
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_CTRL */
+#define ANA_AC_SDLB_PUP_CTRL(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 8, 0, 1, 4)
+
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT GENMASK(18, 0)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT, x)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT, x)
+
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA BIT(24)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_MISC */
+#define ANA_AC_SDLB_LBGRP_MISC(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 12, 0, 1, 4)
+
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT GENMASK(12, 8)
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:FRM_RATE_TOKENS */
+#define ANA_AC_SDLB_FRM_RATE_TOKENS(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 16, 0, 1, 4)
+
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS GENMASK(12, 0)
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x)
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_STATE_TBL */
+#define ANA_AC_SDLB_LBGRP_STATE_TBL(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 20, 0, 1, 4)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING BIT(0)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING, x)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK BIT(1)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT GENMASK(28, 16)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:PUP_TOKENS */
+#define ANA_AC_SDLB_PUP_TOKENS(g, r) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 0, r, 2, 4)
+
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS GENMASK(12, 0)
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x)
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:THRES */
+#define ANA_AC_SDLB_THRES(g, r) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 8, r, 2, 4)
+
+#define ANA_AC_SDLB_THRES_THRES GENMASK(9, 0)
+#define ANA_AC_SDLB_THRES_THRES_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_THRES_THRES, x)
+#define ANA_AC_SDLB_THRES_THRES_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_THRES_THRES, x)
+
+#define ANA_AC_SDLB_THRES_THRES_HYS GENMASK(25, 16)
+#define ANA_AC_SDLB_THRES_THRES_HYS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_THRES_THRES_HYS, x)
+#define ANA_AC_SDLB_THRES_THRES_HYS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_THRES_THRES_HYS, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:XLB_NEXT */
+#define ANA_AC_SDLB_XLB_NEXT(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 16, 0, 1, 4)
+
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP GENMASK(27, 24)
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:INH_CTRL */
+#define ANA_AC_SDLB_INH_CTRL(g, r) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 20, r, 2, 4)
+
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX GENMASK(12, 0)
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, x)
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, x)
+
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE GENMASK(21, 20)
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_INH_MODE, x)
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_MODE, x)
+
+#define ANA_AC_SDLB_INH_CTRL_INH_LB BIT(24)
+#define ANA_AC_SDLB_INH_CTRL_INH_LB_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_INH_LB, x)
+#define ANA_AC_SDLB_INH_CTRL_INH_LB_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_LB, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:INH_LBSET_ADDR */
+#define ANA_AC_SDLB_INH_LBSET_ADDR(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 28, 0, 1, 4)
+
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR GENMASK(12, 0)
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:DLB_MISC */
+#define ANA_AC_SDLB_DLB_MISC(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 32, 0, 1, 4)
+
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA BIT(0)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA, x)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA, x)
+
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA BIT(6)
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA, x)
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA, x)
+
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ GENMASK(14, 8)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:DLB_CFG */
+#define ANA_AC_SDLB_DLB_CFG(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 36, 0, 1, 4)
+
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA BIT(11)
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA, x)
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA, x)
+
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL GENMASK(10, 9)
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL, x)
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS BIT(8)
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS, x)
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS, x)
+
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS BIT(7)
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS, x)
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS, x)
+
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL GENMASK(6, 5)
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL, x)
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL GENMASK(4, 3)
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL, x)
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE BIT(2)
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DLB_MODE, x)
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DLB_MODE, x)
+
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK GENMASK(1, 0)
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x)
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x)
+
+/* ANA_CL:PORT:FILTER_CTRL */
+#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
+
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS BIT(2)
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x)
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_GET(x)\
+ FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x)
+
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS BIT(1)
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x)
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_GET(x)\
+ FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x)
+
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA BIT(0)
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_GET(x)\
+ FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
+
+/* ANA_CL:PORT:VLAN_FILTER_CTRL */
+#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 8, r, 3, 4)
+
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10)
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x)
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS BIT(9)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS BIT(8)
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS BIT(7)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS BIT(6)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS BIT(5)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS BIT(4)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS BIT(3)
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS BIT(2)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS BIT(1)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS BIT(0)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
+
+/* ANA_CL:PORT:ETAG_FILTER_CTRL */
+#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 20, 0, 1, 4)
+
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x)
+
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS BIT(0)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
+
+/* ANA_CL:PORT:VLAN_CTRL */
+#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 32, 0, 1, 4)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP GENMASK(25, 23)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI BIT(22)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA BIT(21)
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x)
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL BIT(20)
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x)
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA BIT(19)
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x)
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT GENMASK(18, 17)
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x)
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE BIT(16)
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x)
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_PCP GENMASK(15, 13)
+#define ANA_CL_VLAN_CTRL_PORT_PCP_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_PCP, x)
+#define ANA_CL_VLAN_CTRL_PORT_PCP_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_PCP, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_DEI BIT(12)
+#define ANA_CL_VLAN_CTRL_PORT_DEI_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_DEI, x)
+#define ANA_CL_VLAN_CTRL_PORT_DEI_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_DEI, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VID GENMASK(11, 0)
+#define ANA_CL_VLAN_CTRL_PORT_VID_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VID, x)
+#define ANA_CL_VLAN_CTRL_PORT_VID_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x)
+
+/* ANA_CL:PORT:VLAN_CTRL_2 */
+#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 36, 0, 1, 4)
+
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT GENMASK(1, 0)
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
+
+/* ANA_CL:PORT:PCP_DEI_MAP_CFG */
+#define ANA_CL_PCP_DEI_MAP_CFG(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 108, r, 16, 4)
+
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL GENMASK(4, 3)
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, x)
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_GET(x)\
+ FIELD_GET(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, x)
+
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL GENMASK(2, 0)
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL, x)
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_GET(x)\
+ FIELD_GET(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL, x)
+
+/* ANA_CL:PORT:QOS_CFG */
+#define ANA_CL_QOS_CFG(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 172, 0, 1, 4)
+
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA BIT(17)
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DEFAULT_COSID_ENA, x)
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_COSID_ENA, x)
+
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_VAL GENMASK(16, 14)
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DEFAULT_COSID_VAL, x)
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_VAL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_COSID_VAL, x)
+
+#define ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL GENMASK(13, 12)
+#define ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, x)
+#define ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, x)
+
+#define ANA_CL_QOS_CFG_DSCP_TRANSLATE_ENA BIT(11)
+#define ANA_CL_QOS_CFG_DSCP_TRANSLATE_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DSCP_TRANSLATE_ENA, x)
+#define ANA_CL_QOS_CFG_DSCP_TRANSLATE_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DSCP_TRANSLATE_ENA, x)
+
+#define ANA_CL_QOS_CFG_DSCP_KEEP_ENA BIT(10)
+#define ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DSCP_KEEP_ENA, x)
+#define ANA_CL_QOS_CFG_DSCP_KEEP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DSCP_KEEP_ENA, x)
+
+#define ANA_CL_QOS_CFG_KEEP_ENA BIT(9)
+#define ANA_CL_QOS_CFG_KEEP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_KEEP_ENA, x)
+#define ANA_CL_QOS_CFG_KEEP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_KEEP_ENA, x)
+
+#define ANA_CL_QOS_CFG_PCP_DEI_DP_ENA BIT(8)
+#define ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_PCP_DEI_DP_ENA, x)
+#define ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_PCP_DEI_DP_ENA, x)
+
+#define ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA BIT(7)
+#define ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA, x)
+#define ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA, x)
+
+#define ANA_CL_QOS_CFG_DSCP_DP_ENA BIT(6)
+#define ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DSCP_DP_ENA, x)
+#define ANA_CL_QOS_CFG_DSCP_DP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DSCP_DP_ENA, x)
+
+#define ANA_CL_QOS_CFG_DSCP_QOS_ENA BIT(5)
+#define ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DSCP_QOS_ENA, x)
+#define ANA_CL_QOS_CFG_DSCP_QOS_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DSCP_QOS_ENA, x)
+
+#define ANA_CL_QOS_CFG_DEFAULT_DP_VAL GENMASK(4, 3)
+#define ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DEFAULT_DP_VAL, x)
+#define ANA_CL_QOS_CFG_DEFAULT_DP_VAL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_DP_VAL, x)
+
+#define ANA_CL_QOS_CFG_DEFAULT_QOS_VAL GENMASK(2, 0)
+#define ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL, x)
+#define ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL, x)
+
+/* ANA_CL:PORT:CAPTURE_BPDU_CFG */
+#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
+
+/* ANA_CL:PORT:ADV_CL_CFG_2 */
+#define ANA_CL_ADV_CL_CFG_2(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 200, r, 6, 4)
+
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA BIT(1)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA, x)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA, x)
+
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA BIT(0)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA, x)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA, x)
+
+/* ANA_CL:PORT:ADV_CL_CFG */
+#define ANA_CL_ADV_CL_CFG(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 224, r, 6, 4)
+
+#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL GENMASK(30, 26)
+#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL GENMASK(25, 21)
+#define ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL GENMASK(20, 16)
+#define ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL GENMASK(15, 11)
+#define ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL GENMASK(10, 6)
+#define ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL GENMASK(5, 1)
+#define ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA BIT(0)
+#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_LOOKUP_ENA, x)
+#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_LOOKUP_ENA, x)
+
+/* ANA_CL:COMMON:OWN_UPSID */
+#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL,\
+ 0, 1, 166912, 0, 1, 756, 0, r, 3, 4)
+
+#define ANA_CL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
+#define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\
+ FIELD_PREP(ANA_CL_OWN_UPSID_OWN_UPSID, x)
+#define ANA_CL_OWN_UPSID_OWN_UPSID_GET(x)\
+ FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x)
+
+/* ANA_CL:COMMON:DSCP_CFG */
+#define ANA_CL_DSCP_CFG(r) __REG(TARGET_ANA_CL,\
+ 0, 1, 166912, 0, 1, 756, 256, r, 64, 4)
+
+#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL GENMASK(12, 7)
+#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL, x)
+#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL_GET(x)\
+ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL, x)
+
+#define ANA_CL_DSCP_CFG_DSCP_QOS_VAL GENMASK(6, 4)
+#define ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_DSCP_CFG_DSCP_QOS_VAL, x)
+#define ANA_CL_DSCP_CFG_DSCP_QOS_VAL_GET(x)\
+ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_QOS_VAL, x)
+
+#define ANA_CL_DSCP_CFG_DSCP_DP_VAL GENMASK(3, 2)
+#define ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_DSCP_CFG_DSCP_DP_VAL, x)
+#define ANA_CL_DSCP_CFG_DSCP_DP_VAL_GET(x)\
+ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_DP_VAL, x)
+
+#define ANA_CL_DSCP_CFG_DSCP_REWR_ENA BIT(1)
+#define ANA_CL_DSCP_CFG_DSCP_REWR_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_DSCP_CFG_DSCP_REWR_ENA, x)
+#define ANA_CL_DSCP_CFG_DSCP_REWR_ENA_GET(x)\
+ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_REWR_ENA, x)
+
+#define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA BIT(0)
+#define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, x)
+#define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\
+ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, x)
+
+/* ANA_CL:COMMON:QOS_MAP_CFG */
+#define ANA_CL_QOS_MAP_CFG(r) __REG(TARGET_ANA_CL,\
+ 0, 1, 166912, 0, 1, 756, 512, r, 32, 4)
+
+#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL GENMASK(9, 4)
+#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x)
+#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x)
+
+/* ANA_L2:COMMON:FWD_CFG */
+#define ANA_L2_FWD_CFG __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 0, 0, 1, 4)
+
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL GENMASK(21, 20)
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL, x)
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL, x)
+
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA BIT(18)
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA, x)
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA, x)
+
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA BIT(17)
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA, x)
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA, x)
+
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA BIT(16)
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, x)
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, x)
+
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU GENMASK(10, 8)
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_CPU_DMAC_QU, x)
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_CPU_DMAC_QU, x)
+
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA BIT(7)
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_LOOPBACK_ENA, x)
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_LOOPBACK_ENA, x)
+
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA BIT(6)
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA, x)
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA, x)
+
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL BIT(4)
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FILTER_MODE_SEL, x)
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FILTER_MODE_SEL, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA BIT(3)
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA BIT(2)
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA BIT(1)
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA, x)
+
+#define ANA_L2_FWD_CFG_FWD_ENA BIT(0)
+#define ANA_L2_FWD_CFG_FWD_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FWD_ENA, x)
+#define ANA_L2_FWD_CFG_FWD_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FWD_ENA, x)
+
+/* ANA_L2:COMMON:AUTO_LRN_CFG */
+#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
+
+/* ANA_L2:COMMON:AUTO_LRN_CFG1 */
+#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4)
+
+/* ANA_L2:COMMON:AUTO_LRN_CFG2 */
+#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4)
+
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2 BIT(0)
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\
+ FIELD_PREP(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_GET(x)\
+ FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
+
+/* ANA_L2:COMMON:OWN_UPSID */
+#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 672, r, 3, 4)
+
+#define ANA_L2_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
+#define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\
+ FIELD_PREP(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\
+ FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+
+/* ANA_L2:ISDX:DLB_CFG */
+#define ANA_L2_DLB_CFG(g) __REG(TARGET_ANA_L2,\
+ 0, 1, 0, g, 4096, 128, 56, 0, 1, 4)
+
+#define ANA_L2_DLB_CFG_DLB_IDX GENMASK(12, 0)
+#define ANA_L2_DLB_CFG_DLB_IDX_SET(x)\
+ FIELD_PREP(ANA_L2_DLB_CFG_DLB_IDX, x)
+#define ANA_L2_DLB_CFG_DLB_IDX_GET(x)\
+ FIELD_GET(ANA_L2_DLB_CFG_DLB_IDX, x)
+
+/* ANA_L2:ISDX:TSN_CFG */
+#define ANA_L2_TSN_CFG(g) __REG(TARGET_ANA_L2,\
+ 0, 1, 0, g, 4096, 128, 100, 0, 1, 4)
+
+#define ANA_L2_TSN_CFG_TSN_SFID GENMASK(9, 0)
+#define ANA_L2_TSN_CFG_TSN_SFID_SET(x)\
+ FIELD_PREP(ANA_L2_TSN_CFG_TSN_SFID, x)
+#define ANA_L2_TSN_CFG_TSN_SFID_GET(x)\
+ FIELD_GET(ANA_L2_TSN_CFG_TSN_SFID, x)
+
+/* ANA_L3:COMMON:VLAN_CTRL */
+#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3,\
+ 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
+
+#define ANA_L3_VLAN_CTRL_VLAN_ENA BIT(0)
+#define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
+#define ANA_L3_VLAN_CTRL_VLAN_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
+
+/* ANA_L3:VLAN:VLAN_CFG */
+#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 8, 0, 1, 4)
+
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR GENMASK(30, 24)
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x)
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_FID GENMASK(20, 8)
+#define ANA_L3_VLAN_CFG_VLAN_FID_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FID, x)
+#define ANA_L3_VLAN_CFG_VLAN_FID_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FID, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA BIT(6)
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA BIT(5)
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS BIT(4)
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x)
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS BIT(3)
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x)
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA BIT(2)
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA BIT(1)
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA BIT(0)
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
+
+/* ANA_L3:VLAN:VLAN_MASK_CFG */
+#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 16, 0, 1, 4)
+
+/* ANA_L3:VLAN:VLAN_MASK_CFG1 */
+#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 20, 0, 1, 4)
+
+/* ANA_L3:VLAN:VLAN_MASK_CFG2 */
+#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 24, 0, 1, 4)
+
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2 BIT(0)
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
+
+/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */
+#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 0, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */
+#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 4, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */
+#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 8, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */
+#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 12, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */
+#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 16, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */
+#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 20, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UC_CNT */
+#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 24, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_MC_CNT */
+#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 28, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_BC_CNT */
+#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 32, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */
+#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 36, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */
+#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 40, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */
+#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 44, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 48, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 52, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */
+#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 56, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */
+#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 60, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */
+#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 64, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */
+#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 68, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */
+#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 72, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */
+#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 76, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */
+#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 80, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */
+#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 84, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */
+#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 88, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */
+#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 92, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */
+#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 96, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */
+#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 100, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */
+#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 104, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_UC_CNT */
+#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 108, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_MC_CNT */
+#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 112, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_BC_CNT */
+#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 116, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */
+#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 120, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */
+#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 124, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */
+#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 128, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */
+#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 132, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */
+#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 136, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */
+#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 140, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */
+#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 144, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */
+#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 148, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */
+#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 152, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */
+#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 156, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */
+#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 160, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */
+#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 164, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */
+#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 168, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */
+#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 172, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */
+#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 176, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */
+#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 180, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 184, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */
+#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 188, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */
+#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 192, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */
+#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 196, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */
+#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 200, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */
+#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 204, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */
+#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 208, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 212, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 216, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */
+#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 220, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */
+#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 224, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */
+#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 228, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */
+#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 232, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */
+#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 236, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */
+#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 240, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */
+#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 244, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 248, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 252, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */
+#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 256, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */
+#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 260, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */
+#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 264, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */
+#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 268, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */
+#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 272, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */
+#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 276, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */
+#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 280, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */
+#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 284, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */
+#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 288, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */
+#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 292, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 296, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 300, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 304, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */
+#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 308, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */
+#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 312, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */
+#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 316, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */
+#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 320, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */
+#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 324, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */
+#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 328, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */
+#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 332, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */
+#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 336, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_DEFER_CNT */
+#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 340, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */
+#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 344, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */
+#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 348, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */
+#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 352, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */
+#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 356, 0, 1, 4)
+
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */
+#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 360, 0, 1, 4)
+
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 364, 0, 1, 4)
+
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */
+#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 368, 0, 1, 4)
+
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 372, 0, 1, 4)
+
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */
+#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 376, 0, 1, 4)
+
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */
+#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 380, 0, 1, 4)
+
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 384, 0, 1, 4)
+
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */
+#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 388, 0, 1, 4)
+
+/* ASM:CFG:STAT_CFG */
+#define ASM_STAT_CFG __REG(TARGET_ASM,\
+ 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4)
+
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT BIT(0)
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\
+ FIELD_PREP(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_GET(x)\
+ FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
+
+/* ASM:CFG:PORT_CFG */
+#define ASM_PORT_CFG(r) __REG(TARGET_ASM,\
+ 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4)
+
+#define ASM_PORT_CFG_CSC_STAT_DIS BIT(12)
+#define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_CSC_STAT_DIS, x)
+#define ASM_PORT_CFG_CSC_STAT_DIS_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_CSC_STAT_DIS, x)
+
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA BIT(11)
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA BIT(10)
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x)
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x)
+
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA BIT(9)
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_NO_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_NO_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA BIT(8)
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_FRM_AGING_DIS BIT(7)
+#define ASM_PORT_CFG_FRM_AGING_DIS_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_FRM_AGING_DIS, x)
+#define ASM_PORT_CFG_FRM_AGING_DIS_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_FRM_AGING_DIS, x)
+
+#define ASM_PORT_CFG_PAD_ENA BIT(6)
+#define ASM_PORT_CFG_PAD_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_PAD_ENA, x)
+#define ASM_PORT_CFG_PAD_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_PAD_ENA, x)
+
+#define ASM_PORT_CFG_INJ_DISCARD_CFG GENMASK(5, 4)
+#define ASM_PORT_CFG_INJ_DISCARD_CFG_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_INJ_DISCARD_CFG, x)
+#define ASM_PORT_CFG_INJ_DISCARD_CFG_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_INJ_DISCARD_CFG, x)
+
+#define ASM_PORT_CFG_INJ_FORMAT_CFG GENMASK(3, 2)
+#define ASM_PORT_CFG_INJ_FORMAT_CFG_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_INJ_FORMAT_CFG, x)
+#define ASM_PORT_CFG_INJ_FORMAT_CFG_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_INJ_FORMAT_CFG, x)
+
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA BIT(1)
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_VSTAX2_AWR_ENA, x)
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_VSTAX2_AWR_ENA, x)
+
+#define ASM_PORT_CFG_PFRM_FLUSH BIT(0)
+#define ASM_PORT_CFG_PFRM_FLUSH_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_PFRM_FLUSH, x)
+#define ASM_PORT_CFG_PFRM_FLUSH_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x)
+
+/* ASM:RAM_CTRL:RAM_INIT */
+#define ASM_RAM_INIT __REG(TARGET_ASM,\
+ 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4)
+
+#define ASM_RAM_INIT_RAM_INIT BIT(1)
+#define ASM_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(ASM_RAM_INIT_RAM_INIT, x)
+#define ASM_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(ASM_RAM_INIT_RAM_INIT, x)
+
+#define ASM_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define ASM_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(ASM_RAM_INIT_RAM_CFG_HOOK, x)
+#define ASM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */
+#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN,\
+ 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV GENMASK(7, 0)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV GENMASK(10, 8)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR BIT(11)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL GENMASK(13, 12)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA BIT(14)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA BIT(15)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
+
+/* CPU:CPU_REGS:PROC_CTRL */
+#define CPU_PROC_CTRL __REG(TARGET_CPU,\
+ 0, 1, 0, 0, 1, 204, 176, 0, 1, 4)
+
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA BIT(12)
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS BIT(11)
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS BIT(10)
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+
+#define CPU_PROC_CTRL_BE_EXCEP_MODE BIT(9)
+#define CPU_PROC_CTRL_BE_EXCEP_MODE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+#define CPU_PROC_CTRL_BE_EXCEP_MODE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+
+#define CPU_PROC_CTRL_VINITHI BIT(8)
+#define CPU_PROC_CTRL_VINITHI_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_VINITHI, x)
+#define CPU_PROC_CTRL_VINITHI_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_VINITHI, x)
+
+#define CPU_PROC_CTRL_CFGTE BIT(7)
+#define CPU_PROC_CTRL_CFGTE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_CFGTE, x)
+#define CPU_PROC_CTRL_CFGTE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_CFGTE, x)
+
+#define CPU_PROC_CTRL_CP15S_DISABLE BIT(6)
+#define CPU_PROC_CTRL_CP15S_DISABLE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_CP15S_DISABLE, x)
+#define CPU_PROC_CTRL_CP15S_DISABLE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_CP15S_DISABLE, x)
+
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE BIT(5)
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA BIT(4)
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
+
+#define CPU_PROC_CTRL_ACP_AWCACHE BIT(3)
+#define CPU_PROC_CTRL_ACP_AWCACHE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_ACP_AWCACHE, x)
+#define CPU_PROC_CTRL_ACP_AWCACHE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_ACP_AWCACHE, x)
+
+#define CPU_PROC_CTRL_ACP_ARCACHE BIT(2)
+#define CPU_PROC_CTRL_ACP_ARCACHE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_ACP_ARCACHE, x)
+#define CPU_PROC_CTRL_ACP_ARCACHE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_ACP_ARCACHE, x)
+
+#define CPU_PROC_CTRL_L2_FLUSH_REQ BIT(1)
+#define CPU_PROC_CTRL_L2_FLUSH_REQ_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+#define CPU_PROC_CTRL_L2_FLUSH_REQ_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+
+#define CPU_PROC_CTRL_ACP_DISABLE BIT(0)
+#define CPU_PROC_CTRL_ACP_DISABLE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_ACP_DISABLE, x)
+#define CPU_PROC_CTRL_ACP_DISABLE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV10G_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV10G_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV10G_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV10G_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV10G_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+ FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+ FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */
+#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 12, 0, 1, 4)
+
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS GENMASK(1, 0)
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\
+ FIELD_PREP(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_GET(x)\
+ FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 16, r, 3, 4)
+
+#define DEV10G_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
+#define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ID, x)
+#define DEV10G_MAC_TAGS_CFG_TAG_ID_GET(x)\
+ FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA BIT(4)
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16)
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4)
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0)
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */
+#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 48, 0, 1, 4)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_GET(x)\
+ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY BIT(3)
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_GET(x)\
+ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY BIT(2)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_GET(x)\
+ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY BIT(1)
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_GET(x)\
+ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY BIT(0)
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_GET(x)\
+ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
+
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G,\
+ t, 12, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23)
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST BIT(12)
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST BIT(8)
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST BIT(4)
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST BIT(0)
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 488, 0, 1, 32, 0, 0, 1, 4)
+
+#define DEV10G_PCS25G_CFG_PCS25G_ENA BIT(0)
+#define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\
+ FIELD_PREP(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
+#define DEV10G_PCS25G_CFG_PCS25G_ENA_GET(x)\
+ FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV25G_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV25G_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV25G_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV25G_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV25G_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+ FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+ FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16)
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4)
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0)
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G,\
+ t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23)
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST BIT(12)
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST BIT(8)
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST BIT(4)
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST BIT(0)
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
+
+#define DEV25G_PCS25G_CFG_PCS25G_ENA BIT(0)
+#define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\
+ FIELD_PREP(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
+#define DEV25G_PCS25G_CFG_PCS25G_ENA_GET(x)\
+ FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
+
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */
+#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
+
+#define DEV25G_PCS25G_SD_CFG_SD_SEL BIT(8)
+#define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_SEL, x)
+#define DEV25G_PCS25G_SD_CFG_SD_SEL_GET(x)\
+ FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_SEL, x)
+
+#define DEV25G_PCS25G_SD_CFG_SD_POL BIT(4)
+#define DEV25G_PCS25G_SD_CFG_SD_POL_SET(x)\
+ FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_POL, x)
+#define DEV25G_PCS25G_SD_CFG_SD_POL_GET(x)\
+ FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_POL, x)
+
+#define DEV25G_PCS25G_SD_CFG_SD_ENA BIT(0)
+#define DEV25G_PCS25G_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
+#define DEV25G_PCS25G_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
+
+/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5,\
+ t, 65, 0, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23)
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST BIT(17)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST BIT(16)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST BIT(12)
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST BIT(8)
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST BIT(4)
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST BIT(0)
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEV2G5_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_ENA_CFG_RX_ENA, x)
+#define DEV2G5_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV2G5_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV2G5_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
+#define DEV2G5_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 4, 0, 1, 4)
+
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x)
+
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA BIT(0)
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 8, 0, 1, 4)
+
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 12, 0, 1, 4)
+
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG_TAG_ID, x)
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1)
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG_PB_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG_PB_ENA, x)
+
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */
+#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 16, 0, 1, 4)
+
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3 GENMASK(31, 16)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x)
+
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2 GENMASK(15, 0)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 20, 0, 1, 4)
+
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 24, 0, 1, 4)
+
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x)
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_GET(x)\
+ FIELD_GET(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x)
+
+#define DEV2G5_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEV2G5_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_IFG_CFG_TX_IFG, x)
+#define DEV2G5_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEV2G5_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG2, x)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 28, 0, 1, 4)
+
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x)
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_GET(x)\
+ FIELD_GET(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x)
+
+#define DEV2G5_MAC_HDX_CFG_SEED GENMASK(23, 16)
+#define DEV2G5_MAC_HDX_CFG_SEED_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED, x)
+#define DEV2G5_MAC_HDX_CFG_SEED_GET(x)\
+ FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED, x)
+
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x)
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_GET(x)\
+ FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x)
+
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x)
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x)
+
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS GENMASK(6, 0)
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_GET(x)\
+ FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 0, 0, 1, 4)
+
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x)
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x)
+
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x)
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x)
+
+#define DEV2G5_PCS1G_CFG_PCS_ENA BIT(0)
+#define DEV2G5_PCS1G_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_CFG_PCS_ENA, x)
+#define DEV2G5_PCS1G_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 4, 0, 1, 4)
+
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x)
+
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1)
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 8, 0, 1, 4)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL BIT(8)
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_SEL, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_SEL, x)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_POL BIT(4)
+#define DEV2G5_PCS1G_SD_CFG_SD_POL_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_POL, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_POL_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_POL, x)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA BIT(0)
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 12, 0, 1, 4)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */
+#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 20, 0, 1, 4)
+
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA BIT(4)
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LB_CFG_RA_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LB_CFG_RA_ENA, x)
+
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x)
+
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 32, 0, 1, 4)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY GENMASK(31, 16)
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_PR BIT(4)
+#define DEV2G5_PCS1G_ANEG_STATUS_PR_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PR, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_PR_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PR, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 40, 0, 1, 4)
+
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR GENMASK(15, 12)
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x)
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8)
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x)
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x)
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 48, 0, 1, 4)
+
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
+
+/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */
+#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 164, 0, 1, 4, 0, 0, 1, 4)
+
+#define DEV2G5_PCS_FX100_CFG_SD_SEL BIT(26)
+#define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_SEL, x)
+#define DEV2G5_PCS_FX100_CFG_SD_SEL_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_SEL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SD_POL BIT(25)
+#define DEV2G5_PCS_FX100_CFG_SD_POL_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_POL, x)
+#define DEV2G5_PCS_FX100_CFG_SD_POL_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_POL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SD_ENA BIT(24)
+#define DEV2G5_PCS_FX100_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA BIT(20)
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA BIT(16)
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL GENMASK(15, 12)
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_RXBITSEL, x)
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_RXBITSEL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG GENMASK(10, 9)
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x)
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x)
+
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8)
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER GENMASK(7, 4)
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x)
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x)
+
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3)
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA BIT(2)
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA BIT(0)
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
+
+/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */
+#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5,\
+ t, 65, 168, 0, 1, 4, 0, 0, 1, 4)
+
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP GENMASK(11, 8)
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x)
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x)
+
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS BIT(2)
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x)
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1)
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x)
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS BIT(0)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G,\
+ t, 13, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV5G_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV5G_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV5G_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV5G_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV5G_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G,\
+ t, 13, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+ FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+ FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G,\
+ t, 13, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16)
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4)
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0)
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */
+#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 0, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */
+#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 4, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */
+#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 8, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */
+#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 12, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */
+#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 16, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */
+#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 20, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */
+#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 24, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */
+#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 28, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */
+#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 32, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 36, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 40, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */
+#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 44, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */
+#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 48, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */
+#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 52, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */
+#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 56, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */
+#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 60, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */
+#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 64, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */
+#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 68, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */
+#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 72, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */
+#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 76, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */
+#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 80, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */
+#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 84, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */
+#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 88, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */
+#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 92, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */
+#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 96, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */
+#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 100, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */
+#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 104, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */
+#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 108, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */
+#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 112, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */
+#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 116, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */
+#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 120, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */
+#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 124, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 128, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */
+#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 132, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */
+#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 136, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */
+#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 140, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */
+#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 144, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */
+#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 148, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */
+#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 152, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */
+#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 156, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */
+#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 160, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */
+#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 164, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */
+#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 168, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */
+#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 172, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */
+#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 176, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */
+#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 180, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 184, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 188, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */
+#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 192, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */
+#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 196, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */
+#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 200, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 204, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 208, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 212, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 216, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 220, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 224, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */
+#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 228, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */
+#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 232, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */
+#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 236, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */
+#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 240, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */
+#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 244, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 248, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 252, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 256, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 260, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 264, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 268, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 272, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 276, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */
+#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 280, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 284, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */
+#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 288, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */
+#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 292, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 296, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 300, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 304, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 308, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */
+#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 0, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */
+#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 4, 0, 1, 4)
+
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */
+#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 8, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */
+#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 12, 0, 1, 4)
+
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */
+#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 16, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 20, 0, 1, 4)
+
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */
+#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 24, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */
+#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 28, 0, 1, 4)
+
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */
+#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 32, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */
+#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 36, 0, 1, 4)
+
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 40, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 44, 0, 1, 4)
+
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 48, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 52, 0, 1, 4)
+
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 56, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 60, 0, 1, 4)
+
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G,\
+ t, 13, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23)
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST BIT(12)
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST BIT(8)
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST BIT(4)
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST BIT(0)
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/* DSM:RAM_CTRL:RAM_INIT */
+#define DSM_RAM_INIT __REG(TARGET_DSM,\
+ 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
+
+#define DSM_RAM_INIT_RAM_INIT BIT(1)
+#define DSM_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(DSM_RAM_INIT_RAM_INIT, x)
+#define DSM_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(DSM_RAM_INIT_RAM_INIT, x)
+
+#define DSM_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define DSM_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(DSM_RAM_INIT_RAM_CFG_HOOK, x)
+#define DSM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* DSM:CFG:BUF_CFG */
+#define DSM_BUF_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 0, r, 67, 4)
+
+#define DSM_BUF_CFG_CSC_STAT_DIS BIT(13)
+#define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\
+ FIELD_PREP(DSM_BUF_CFG_CSC_STAT_DIS, x)
+#define DSM_BUF_CFG_CSC_STAT_DIS_GET(x)\
+ FIELD_GET(DSM_BUF_CFG_CSC_STAT_DIS, x)
+
+#define DSM_BUF_CFG_AGING_ENA BIT(12)
+#define DSM_BUF_CFG_AGING_ENA_SET(x)\
+ FIELD_PREP(DSM_BUF_CFG_AGING_ENA, x)
+#define DSM_BUF_CFG_AGING_ENA_GET(x)\
+ FIELD_GET(DSM_BUF_CFG_AGING_ENA, x)
+
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS BIT(11)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(x)\
+ FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_GET(x)\
+ FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x)
+
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT GENMASK(10, 0)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_SET(x)\
+ FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_GET(x)\
+ FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
+
+/* DSM:CFG:DEV_TX_STOP_WM_CFG */
+#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4)
+
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA BIT(9)
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\
+ FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x)
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_GET(x)\
+ FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA BIT(8)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(x)\
+ FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_GET(x)\
+ FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM GENMASK(7, 1)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(x)\
+ FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_GET(x)\
+ FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR BIT(0)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(x)\
+ FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_GET(x)\
+ FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
+
+/* DSM:CFG:RX_PAUSE_CFG */
+#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4)
+
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN BIT(1)
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\
+ FIELD_PREP(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x)
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_GET(x)\
+ FIELD_GET(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x)
+
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL BIT(0)
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_SET(x)\
+ FIELD_PREP(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_GET(x)\
+ FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
+
+/* DSM:CFG:MAC_CFG */
+#define DSM_MAC_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4)
+
+#define DSM_MAC_CFG_TX_PAUSE_VAL GENMASK(31, 16)
+#define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\
+ FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_VAL, x)
+#define DSM_MAC_CFG_TX_PAUSE_VAL_GET(x)\
+ FIELD_GET(DSM_MAC_CFG_TX_PAUSE_VAL, x)
+
+#define DSM_MAC_CFG_HDX_BACKPREASSURE BIT(2)
+#define DSM_MAC_CFG_HDX_BACKPREASSURE_SET(x)\
+ FIELD_PREP(DSM_MAC_CFG_HDX_BACKPREASSURE, x)
+#define DSM_MAC_CFG_HDX_BACKPREASSURE_GET(x)\
+ FIELD_GET(DSM_MAC_CFG_HDX_BACKPREASSURE, x)
+
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE BIT(1)
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_SET(x)\
+ FIELD_PREP(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x)
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_GET(x)\
+ FIELD_GET(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x)
+
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF BIT(0)
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_SET(x)\
+ FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_GET(x)\
+ FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
+
+/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */
+#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4)
+
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0)
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\
+ FIELD_PREP(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_GET(x)\
+ FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
+
+/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */
+#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4)
+
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW GENMASK(23, 0)
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\
+ FIELD_PREP(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_GET(x)\
+ FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
+
+/* DSM:CFG:TAXI_CAL_CFG */
+#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4)
+
+#define DSM_TAXI_CAL_CFG_CAL_IDX GENMASK(20, 15)
+#define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_IDX, x)
+#define DSM_TAXI_CAL_CFG_CAL_IDX_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_IDX, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN GENMASK(14, 9)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL GENMASK(8, 5)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL GENMASK(4, 1)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA BIT(0)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+
+/* EACL:ES2_KEY_SELECT_PROFILE:VCAP_ES2_KEY_SEL */
+#define EACL_VCAP_ES2_KEY_SEL(g, r) __REG(TARGET_EACL,\
+ 0, 1, 149504, g, 138, 8, 0, r, 2, 4)
+
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL GENMASK(7, 5)
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL GENMASK(4, 2)
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL BIT(1)
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA BIT(0)
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x)
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x)
+
+/* EACL:CNT_TBL:ES2_CNT */
+#define EACL_ES2_CNT(g) __REG(TARGET_EACL,\
+ 0, 1, 122880, g, 2048, 4, 0, 0, 1, 4)
+
+/* EACL:POL_CFG:POL_EACL_CFG */
+#define EACL_POL_EACL_CFG __REG(TARGET_EACL,\
+ 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
+
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5)
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x)
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x)
+
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY BIT(4)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x)
+
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY BIT(3)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE BIT(2)
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN BIT(1)
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT BIT(0)
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+
+/* EACL:ES2_STICKY:SEC_LOOKUP_STICKY */
+#define EACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_EACL,\
+ 0, 1, 118696, 0, 1, 8, 0, r, 2, 4)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY BIT(7)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY BIT(6)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY BIT(5)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY BIT(4)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY BIT(3)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY BIT(2)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY BIT(1)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY BIT(0)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+
+/* EACL:RAM_CTRL:RAM_INIT */
+#define EACL_RAM_INIT __REG(TARGET_EACL,\
+ 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
+
+#define EACL_RAM_INIT_RAM_INIT BIT(1)
+#define EACL_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(EACL_RAM_INIT_RAM_INIT, x)
+#define EACL_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(EACL_RAM_INIT_RAM_INIT, x)
+
+#define EACL_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define EACL_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(EACL_RAM_INIT_RAM_CFG_HOOK, x)
+#define EACL_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
+ FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
+ FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+
+/* FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+
+#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
+#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
+ FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x)
+#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
+ FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
+
+/* FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+
+#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
+#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
+ FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x)
+#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
+ FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
+
+/* FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP_PREV */
+#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 116, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */
+#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 148, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE BIT(7)
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(6)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+
+#define FDMA_CH_CFG_CH_INJ_PORT BIT(5)
+#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+
+#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(4, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+
+#define FDMA_CH_CFG_CH_MEM BIT(0)
+#define FDMA_CH_CFG_CH_MEM_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_MEM, x)
+#define FDMA_CH_CFG_CH_MEM_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
+
+/* FDMA:FDMA:FDMA_CH_TRANSLATE */
+#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 256, r, 8, 4)
+
+#define FDMA_CH_TRANSLATE_OFFSET GENMASK(15, 0)
+#define FDMA_CH_TRANSLATE_OFFSET_SET(x)\
+ FIELD_PREP(FDMA_CH_TRANSLATE_OFFSET, x)
+#define FDMA_CH_TRANSLATE_OFFSET_GET(x)\
+ FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x)
+
+/* FDMA:FDMA:FDMA_XTR_CFG */
+#define FDMA_XTR_CFG __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 364, 0, 1, 4)
+
+#define FDMA_XTR_CFG_XTR_FIFO_WM GENMASK(15, 11)
+#define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\
+ FIELD_PREP(FDMA_XTR_CFG_XTR_FIFO_WM, x)
+#define FDMA_XTR_CFG_XTR_FIFO_WM_GET(x)\
+ FIELD_GET(FDMA_XTR_CFG_XTR_FIFO_WM, x)
+
+#define FDMA_XTR_CFG_XTR_ARB_SAT GENMASK(10, 0)
+#define FDMA_XTR_CFG_XTR_ARB_SAT_SET(x)\
+ FIELD_PREP(FDMA_XTR_CFG_XTR_ARB_SAT, x)
+#define FDMA_XTR_CFG_XTR_ARB_SAT_GET(x)\
+ FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x)
+
+/* FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+
+#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
+#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x)
+#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x)
+
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE BIT(3)
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP_FORCE, x)
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_INJ_STOP_FORCE, x)
+
+#define FDMA_PORT_CTRL_XTR_STOP BIT(2)
+#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x)
+#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x)
+
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY BIT(1)
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x)
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x)
+
+#define FDMA_PORT_CTRL_XTR_BUF_RST BIT(0)
+#define FDMA_PORT_CTRL_XTR_BUF_RST_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_RST, x)
+#define FDMA_PORT_CTRL_XTR_BUF_RST_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x)
+
+/* FDMA:FDMA:FDMA_INTR_DCB */
+#define FDMA_INTR_DCB __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 384, 0, 1, 4)
+
+#define FDMA_INTR_DCB_INTR_DCB GENMASK(7, 0)
+#define FDMA_INTR_DCB_INTR_DCB_SET(x)\
+ FIELD_PREP(FDMA_INTR_DCB_INTR_DCB, x)
+#define FDMA_INTR_DCB_INTR_DCB_GET(x)\
+ FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x)
+
+/* FDMA:FDMA:FDMA_INTR_DCB_ENA */
+#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 388, 0, 1, 4)
+
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA GENMASK(7, 0)
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\
+ FIELD_PREP(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_GET(x)\
+ FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
+
+/* FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+
+#define FDMA_INTR_DB_INTR_DB GENMASK(7, 0)
+#define FDMA_INTR_DB_INTR_DB_SET(x)\
+ FIELD_PREP(FDMA_INTR_DB_INTR_DB, x)
+#define FDMA_INTR_DB_INTR_DB_GET(x)\
+ FIELD_GET(FDMA_INTR_DB_INTR_DB, x)
+
+/* FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
+ FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
+ FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+
+/* FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+
+#define FDMA_INTR_ERR_INTR_PORT_ERR GENMASK(9, 8)
+#define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\
+ FIELD_PREP(FDMA_INTR_ERR_INTR_PORT_ERR, x)
+#define FDMA_INTR_ERR_INTR_PORT_ERR_GET(x)\
+ FIELD_GET(FDMA_INTR_ERR_INTR_PORT_ERR, x)
+
+#define FDMA_INTR_ERR_INTR_CH_ERR GENMASK(7, 0)
+#define FDMA_INTR_ERR_INTR_CH_ERR_SET(x)\
+ FIELD_PREP(FDMA_INTR_ERR_INTR_CH_ERR, x)
+#define FDMA_INTR_ERR_INTR_CH_ERR_GET(x)\
+ FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x)
+
+/* FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+
+#define FDMA_ERRORS_ERR_XTR_WR GENMASK(31, 30)
+#define FDMA_ERRORS_ERR_XTR_WR_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_XTR_WR, x)
+#define FDMA_ERRORS_ERR_XTR_WR_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_XTR_WR, x)
+
+#define FDMA_ERRORS_ERR_XTR_OVF GENMASK(29, 28)
+#define FDMA_ERRORS_ERR_XTR_OVF_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_XTR_OVF, x)
+#define FDMA_ERRORS_ERR_XTR_OVF_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_XTR_OVF, x)
+
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF GENMASK(27, 26)
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x)
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x)
+
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL GENMASK(25, 24)
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x)
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x)
+
+#define FDMA_ERRORS_ERR_DCB_RD GENMASK(23, 16)
+#define FDMA_ERRORS_ERR_DCB_RD_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_DCB_RD, x)
+#define FDMA_ERRORS_ERR_DCB_RD_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_DCB_RD, x)
+
+#define FDMA_ERRORS_ERR_INJ_RD GENMASK(15, 10)
+#define FDMA_ERRORS_ERR_INJ_RD_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_INJ_RD, x)
+#define FDMA_ERRORS_ERR_INJ_RD_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_INJ_RD, x)
+
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC GENMASK(9, 8)
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x)
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x)
+
+#define FDMA_ERRORS_ERR_CH_WR GENMASK(7, 0)
+#define FDMA_ERRORS_ERR_CH_WR_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_CH_WR, x)
+#define FDMA_ERRORS_ERR_CH_WR_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x)
+
+/* FDMA:FDMA:FDMA_ERRORS_2 */
+#define FDMA_ERRORS_2 __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 416, 0, 1, 4)
+
+#define FDMA_ERRORS_2_ERR_XTR_FRAG GENMASK(1, 0)
+#define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
+#define FDMA_ERRORS_2_ERR_XTR_FRAG_GET(x)\
+ FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
+
+/* FDMA:FDMA:FDMA_CTRL */
+#define FDMA_CTRL __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 424, 0, 1, 4)
+
+#define FDMA_CTRL_NRESET BIT(0)
+#define FDMA_CTRL_NRESET_SET(x)\
+ FIELD_PREP(FDMA_CTRL_NRESET, x)
+#define FDMA_CTRL_NRESET_GET(x)\
+ FIELD_GET(FDMA_CTRL_NRESET, x)
+
+/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */
+#define GCB_CHIP_ID __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 0, 0, 1, 4)
+
+#define GCB_CHIP_ID_REV_ID GENMASK(31, 28)
+#define GCB_CHIP_ID_REV_ID_SET(x)\
+ FIELD_PREP(GCB_CHIP_ID_REV_ID, x)
+#define GCB_CHIP_ID_REV_ID_GET(x)\
+ FIELD_GET(GCB_CHIP_ID_REV_ID, x)
+
+#define GCB_CHIP_ID_PART_ID GENMASK(27, 12)
+#define GCB_CHIP_ID_PART_ID_SET(x)\
+ FIELD_PREP(GCB_CHIP_ID_PART_ID, x)
+#define GCB_CHIP_ID_PART_ID_GET(x)\
+ FIELD_GET(GCB_CHIP_ID_PART_ID, x)
+
+#define GCB_CHIP_ID_MFG_ID GENMASK(11, 1)
+#define GCB_CHIP_ID_MFG_ID_SET(x)\
+ FIELD_PREP(GCB_CHIP_ID_MFG_ID, x)
+#define GCB_CHIP_ID_MFG_ID_GET(x)\
+ FIELD_GET(GCB_CHIP_ID_MFG_ID, x)
+
+#define GCB_CHIP_ID_ONE BIT(0)
+#define GCB_CHIP_ID_ONE_SET(x)\
+ FIELD_PREP(GCB_CHIP_ID_ONE, x)
+#define GCB_CHIP_ID_ONE_GET(x)\
+ FIELD_GET(GCB_CHIP_ID_ONE, x)
+
+/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */
+#define GCB_SOFT_RST __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 8, 0, 1, 4)
+
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST BIT(2)
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\
+ FIELD_PREP(GCB_SOFT_RST_SOFT_NON_CFG_RST, x)
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST_GET(x)\
+ FIELD_GET(GCB_SOFT_RST_SOFT_NON_CFG_RST, x)
+
+#define GCB_SOFT_RST_SOFT_SWC_RST BIT(1)
+#define GCB_SOFT_RST_SOFT_SWC_RST_SET(x)\
+ FIELD_PREP(GCB_SOFT_RST_SOFT_SWC_RST, x)
+#define GCB_SOFT_RST_SOFT_SWC_RST_GET(x)\
+ FIELD_GET(GCB_SOFT_RST_SOFT_SWC_RST, x)
+
+#define GCB_SOFT_RST_SOFT_CHIP_RST BIT(0)
+#define GCB_SOFT_RST_SOFT_CHIP_RST_SET(x)\
+ FIELD_PREP(GCB_SOFT_RST_SOFT_CHIP_RST, x)
+#define GCB_SOFT_RST_SOFT_CHIP_RST_GET(x)\
+ FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x)
+
+/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */
+#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 20, 0, 1, 4)
+
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA BIT(1)
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\
+ FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x)
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_GET(x)\
+ FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x)
+
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL BIT(0)
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_SET(x)\
+ FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_GET(x)\
+ FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
+
+/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 24, r, 65, 4)
+
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\
+ FIELD_PREP(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_GET(x)\
+ FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+
+/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */
+#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB,\
+ 0, 1, 876, g, 3, 280, 20, 0, 1, 4)
+
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ GENMASK(19, 8)
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\
+ FIELD_PREP(GCB_SIO_CLOCK_SIO_CLK_FREQ, x)
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ_GET(x)\
+ FIELD_GET(GCB_SIO_CLOCK_SIO_CLK_FREQ, x)
+
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD GENMASK(7, 0)
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(x)\
+ FIELD_PREP(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
+ FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+
+/* HSCH:HSCH_CFG:CIR_CFG */
+#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
+
+#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
+#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
+ FIELD_PREP(HSCH_CIR_CFG_CIR_RATE, x)
+#define HSCH_CIR_CFG_CIR_RATE_GET(x)\
+ FIELD_GET(HSCH_CIR_CFG_CIR_RATE, x)
+
+#define HSCH_CIR_CFG_CIR_BURST GENMASK(5, 0)
+#define HSCH_CIR_CFG_CIR_BURST_SET(x)\
+ FIELD_PREP(HSCH_CIR_CFG_CIR_BURST, x)
+#define HSCH_CIR_CFG_CIR_BURST_GET(x)\
+ FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
+
+/* HSCH:HSCH_CFG:EIR_CFG */
+#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
+
+#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
+#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
+ FIELD_PREP(HSCH_EIR_CFG_EIR_RATE, x)
+#define HSCH_EIR_CFG_EIR_RATE_GET(x)\
+ FIELD_GET(HSCH_EIR_CFG_EIR_RATE, x)
+
+#define HSCH_EIR_CFG_EIR_BURST GENMASK(5, 0)
+#define HSCH_EIR_CFG_EIR_BURST_SET(x)\
+ FIELD_PREP(HSCH_EIR_CFG_EIR_BURST, x)
+#define HSCH_EIR_CFG_EIR_BURST_GET(x)\
+ FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
+
+/* HSCH:HSCH_CFG:SE_CFG */
+#define HSCH_SE_CFG(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
+
+#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
+#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x)
+#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x)
+
+#define HSCH_SE_CFG_SE_AVB_ENA BIT(5)
+#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_AVB_ENA, x)
+#define HSCH_SE_CFG_SE_AVB_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_AVB_ENA, x)
+
+#define HSCH_SE_CFG_SE_FRM_MODE GENMASK(4, 3)
+#define HSCH_SE_CFG_SE_FRM_MODE_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_FRM_MODE, x)
+#define HSCH_SE_CFG_SE_FRM_MODE_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_FRM_MODE, x)
+
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE GENMASK(2, 1)
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
+
+#define HSCH_SE_CFG_SE_STOP BIT(0)
+#define HSCH_SE_CFG_SE_STOP_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_STOP, x)
+#define HSCH_SE_CFG_SE_STOP_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
+
+/* HSCH:HSCH_CFG:SE_CONNECT */
+#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
+
+#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
+ FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\
+ FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+
+/* HSCH:HSCH_CFG:SE_DLB_SENSE */
+#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2)
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(1)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+
+/* HSCH:HSCH_DWRR:DWRR_ENTRY */
+#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH,\
+ 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
+
+#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
+#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_COST, x)
+#define HSCH_DWRR_ENTRY_DWRR_COST_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_COST, x)
+
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE GENMASK(19, 0)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+
+/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
+#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH,\
+ 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
+
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12)
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
+
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT GENMASK(11, 0)
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
+
+/* HSCH:HSCH_MISC:SYS_CLK_PER */
+#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH,\
+ 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
+
+#define HSCH_SYS_CLK_PER_100PS GENMASK(7, 0)
+#define HSCH_SYS_CLK_PER_100PS_SET(x)\
+ FIELD_PREP(HSCH_SYS_CLK_PER_100PS, x)
+#define HSCH_SYS_CLK_PER_100PS_GET(x)\
+ FIELD_GET(HSCH_SYS_CLK_PER_100PS, x)
+
+/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
+#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH,\
+ 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
+
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
+ FIELD_PREP(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\
+ FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
+
+/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
+#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH,\
+ 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
+
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
+ FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\
+ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0)
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\
+ FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\
+ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
+
+/* HSCH:SYSTEM:FLUSH_CTRL */
+#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH,\
+ 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
+
+#define HSCH_FLUSH_CTRL_FLUSH_ENA BIT(27)
+#define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_ENA, x)
+#define HSCH_FLUSH_CTRL_FLUSH_ENA_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_ENA, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_SRC BIT(26)
+#define HSCH_FLUSH_CTRL_FLUSH_SRC_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SRC, x)
+#define HSCH_FLUSH_CTRL_FLUSH_SRC_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SRC, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_DST BIT(25)
+#define HSCH_FLUSH_CTRL_FLUSH_DST_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_DST, x)
+#define HSCH_FLUSH_CTRL_FLUSH_DST_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_DST, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_PORT GENMASK(24, 18)
+#define HSCH_FLUSH_CTRL_FLUSH_PORT_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+#define HSCH_FLUSH_CTRL_FLUSH_PORT_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE BIT(17)
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x)
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_SE BIT(16)
+#define HSCH_FLUSH_CTRL_FLUSH_SE_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SE, x)
+#define HSCH_FLUSH_CTRL_FLUSH_SE_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SE, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_HIER GENMASK(15, 0)
+#define HSCH_FLUSH_CTRL_FLUSH_HIER_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+#define HSCH_FLUSH_CTRL_FLUSH_HIER_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+
+/* HSCH:SYSTEM:PORT_MODE */
+#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH,\
+ 0, 1, 184000, 0, 1, 312, 8, r, 70, 4)
+
+#define HSCH_PORT_MODE_DEQUEUE_DIS BIT(4)
+#define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\
+ FIELD_PREP(HSCH_PORT_MODE_DEQUEUE_DIS, x)
+#define HSCH_PORT_MODE_DEQUEUE_DIS_GET(x)\
+ FIELD_GET(HSCH_PORT_MODE_DEQUEUE_DIS, x)
+
+#define HSCH_PORT_MODE_AGE_DIS BIT(3)
+#define HSCH_PORT_MODE_AGE_DIS_SET(x)\
+ FIELD_PREP(HSCH_PORT_MODE_AGE_DIS, x)
+#define HSCH_PORT_MODE_AGE_DIS_GET(x)\
+ FIELD_GET(HSCH_PORT_MODE_AGE_DIS, x)
+
+#define HSCH_PORT_MODE_TRUNC_ENA BIT(2)
+#define HSCH_PORT_MODE_TRUNC_ENA_SET(x)\
+ FIELD_PREP(HSCH_PORT_MODE_TRUNC_ENA, x)
+#define HSCH_PORT_MODE_TRUNC_ENA_GET(x)\
+ FIELD_GET(HSCH_PORT_MODE_TRUNC_ENA, x)
+
+#define HSCH_PORT_MODE_EIR_REMARK_ENA BIT(1)
+#define HSCH_PORT_MODE_EIR_REMARK_ENA_SET(x)\
+ FIELD_PREP(HSCH_PORT_MODE_EIR_REMARK_ENA, x)
+#define HSCH_PORT_MODE_EIR_REMARK_ENA_GET(x)\
+ FIELD_GET(HSCH_PORT_MODE_EIR_REMARK_ENA, x)
+
+#define HSCH_PORT_MODE_CPU_PRIO_MODE BIT(0)
+#define HSCH_PORT_MODE_CPU_PRIO_MODE_SET(x)\
+ FIELD_PREP(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
+#define HSCH_PORT_MODE_CPU_PRIO_MODE_GET(x)\
+ FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
+
+/* HSCH:SYSTEM:OUTB_SHARE_ENA */
+#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH,\
+ 0, 1, 184000, 0, 1, 312, 288, r, 5, 4)
+
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA GENMASK(7, 0)
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\
+ FIELD_PREP(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_GET(x)\
+ FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
+
+/* HSCH:MMGT:RESET_CFG */
+#define HSCH_RESET_CFG __REG(TARGET_HSCH,\
+ 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4)
+
+#define HSCH_RESET_CFG_CORE_ENA BIT(0)
+#define HSCH_RESET_CFG_CORE_ENA_SET(x)\
+ FIELD_PREP(HSCH_RESET_CFG_CORE_ENA, x)
+#define HSCH_RESET_CFG_CORE_ENA_GET(x)\
+ FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x)
+
+/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH,\
+ 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4)
+
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY GENMASK(7, 0)
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\
+ FIELD_PREP(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
+ FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+
+/* LRN:COMMON:COMMON_ACCESS_CTRL */
+#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\
+ FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_GET(x)\
+ FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE BIT(19)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_SET(x)\
+ FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_GET(x)\
+ FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW GENMASK(18, 5)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_SET(x)\
+ FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_GET(x)\
+ FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD GENMASK(4, 1)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(x)\
+ FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_GET(x)\
+ FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x)
+
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT BIT(0)
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(x)\
+ FIELD_PREP(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(x)\
+ FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
+
+/* LRN:COMMON:MAC_ACCESS_CFG_0 */
+#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID GENMASK(28, 16)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x)
+
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB GENMASK(15, 0)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
+
+/* LRN:COMMON:MAC_ACCESS_CFG_1 */
+#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
+
+/* LRN:COMMON:MAC_ACCESS_CFG_2 */
+#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL BIT(27)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU GENMASK(26, 24)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY BIT(23)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE BIT(22)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR BIT(21)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG GENMASK(20, 19)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL GENMASK(18, 17)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED BIT(16)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD BIT(15)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE GENMASK(14, 12)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR GENMASK(11, 0)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
+
+/* LRN:COMMON:MAC_ACCESS_CFG_3 */
+#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0)
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+
+/* LRN:COMMON:SCAN_NEXT_CFG */
+#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL GENMASK(18, 17)
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL GENMASK(16, 15)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA BIT(14)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA BIT(13)
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA BIT(12)
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA BIT(11)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA BIT(10)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA BIT(9)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA BIT(8)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA BIT(7)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK GENMASK(6, 3)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x)
+
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA BIT(2)
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA BIT(1)
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA BIT(0)
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
+
+/* LRN:COMMON:SCAN_NEXT_CFG_1 */
+#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
+
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR GENMASK(30, 16)
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x)
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x)
+
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK GENMASK(14, 0)
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
+
+/* LRN:COMMON:AUTOAGE_CFG */
+#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
+
+#define LRN_AUTOAGE_CFG_UNIT_SIZE GENMASK(29, 28)
+#define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_UNIT_SIZE, x)
+#define LRN_AUTOAGE_CFG_UNIT_SIZE_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_UNIT_SIZE, x)
+
+#define LRN_AUTOAGE_CFG_PERIOD_VAL GENMASK(27, 0)
+#define LRN_AUTOAGE_CFG_PERIOD_VAL_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
+#define LRN_AUTOAGE_CFG_PERIOD_VAL_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
+
+/* LRN:COMMON:AUTOAGE_CFG_1 */
+#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
+
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA BIT(25)
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x)
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x)
+
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN GENMASK(24, 15)
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x)
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x)
+
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS GENMASK(14, 7)
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x)
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x)
+
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA BIT(6)
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x)
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT GENMASK(5, 2)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT BIT(1)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA BIT(0)
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
+
+/* LRN:COMMON:AUTOAGE_CFG_2 */
+#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
+
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW GENMASK(17, 4)
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS GENMASK(3, 0)
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
+#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
+
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE GENMASK(7, 0)
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_MSG_CODE, x)
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_MSG_CODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_TAG GENMASK(15, 8)
+#define PCEP_RCTRL_2_OUT_0_TAG_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG, x)
+#define PCEP_RCTRL_2_OUT_0_TAG_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG, x)
+
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN BIT(16)
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x)
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x)
+
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS BIT(19)
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x)
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x)
+
+#define PCEP_RCTRL_2_OUT_0_SNP BIT(20)
+#define PCEP_RCTRL_2_OUT_0_SNP_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_SNP, x)
+#define PCEP_RCTRL_2_OUT_0_SNP_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_SNP, x)
+
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD BIT(22)
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x)
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x)
+
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN BIT(23)
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x)
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x)
+
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE BIT(28)
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x)
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE BIT(29)
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x)
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_REGION_EN BIT(31)
+#define PCEP_RCTRL_2_OUT_0_REGION_EN_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
+#define PCEP_RCTRL_2_OUT_0_REGION_EN_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
+
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW GENMASK(15, 0)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_GET(x)\
+ FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x)
+
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW GENMASK(31, 16)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_GET(x)\
+ FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
+
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW GENMASK(15, 0)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_GET(x)\
+ FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x)
+
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW GENMASK(31, 16)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_GET(x)\
+ FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
+
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_GET(x)\
+ FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x)
+
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW GENMASK(31, 2)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_GET(x)\
+ FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR,\
+ t, 12, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS10G_BR_PCS_CFG_PCS_ENA BIT(31)
+#define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS10G_BR_PCS_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30)
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24)
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP BIT(18)
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA BIT(15)
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS BIT(14)
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE BIT(13)
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12)
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP BIT(7)
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6)
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE BIT(4)
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3)
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR,\
+ t, 12, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL BIT(8)
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_POL BIT(4)
+#define PCS10G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA BIT(0)
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR,\
+ t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS25G_BR_PCS_CFG_PCS_ENA BIT(31)
+#define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS25G_BR_PCS_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30)
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24)
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP BIT(18)
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA BIT(15)
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS BIT(14)
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE BIT(13)
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12)
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP BIT(7)
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6)
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE BIT(4)
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3)
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR,\
+ t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL BIT(8)
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_POL BIT(4)
+#define PCS25G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA BIT(0)
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR,\
+ t, 13, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS5G_BR_PCS_CFG_PCS_ENA BIT(31)
+#define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS5G_BR_PCS_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30)
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24)
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP BIT(18)
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA BIT(15)
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS BIT(14)
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE BIT(13)
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12)
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP BIT(7)
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6)
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE BIT(4)
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3)
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR,\
+ t, 13, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL BIT(8)
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_POL BIT(4)
+#define PCS5G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA BIT(0)
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/* PORT_CONF:HW_CFG:DEV5G_MODES */
+#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE BIT(0)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE BIT(1)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE BIT(2)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE BIT(3)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE BIT(4)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE BIT(5)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE BIT(6)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE BIT(7)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE BIT(8)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE BIT(9)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE BIT(10)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE BIT(11)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE BIT(12)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
+
+/* PORT_CONF:HW_CFG:DEV10G_MODES */
+#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE BIT(0)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE BIT(1)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE BIT(2)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE BIT(3)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE BIT(4)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE BIT(5)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE BIT(6)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE BIT(7)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE BIT(8)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE BIT(9)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE BIT(10)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE BIT(11)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
+
+/* PORT_CONF:HW_CFG:DEV25G_MODES */
+#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE BIT(0)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE BIT(1)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE BIT(2)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE BIT(3)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE BIT(4)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE BIT(5)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE BIT(6)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE BIT(7)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
+
+/* PORT_CONF:HW_CFG:QSGMII_ENA */
+#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0 BIT(0)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1 BIT(1)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2 BIT(2)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3 BIT(3)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4 BIT(4)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5 BIT(5)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6 BIT(6)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7 BIT(7)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8 BIT(8)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9 BIT(9)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10 BIT(10)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11 BIT(11)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
+
+/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */
+#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF,\
+ 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
+
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM BIT(9)
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x)
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x)
+
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM BIT(8)
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x)
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x)
+
+#define PORT_CONF_USGMII_CFG_FLIP_LANES BIT(7)
+#define PORT_CONF_USGMII_CFG_FLIP_LANES_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_FLIP_LANES, x)
+#define PORT_CONF_USGMII_CFG_FLIP_LANES_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_FLIP_LANES, x)
+
+#define PORT_CONF_USGMII_CFG_SHYST_DIS BIT(6)
+#define PORT_CONF_USGMII_CFG_SHYST_DIS_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_SHYST_DIS, x)
+#define PORT_CONF_USGMII_CFG_SHYST_DIS_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_SHYST_DIS, x)
+
+#define PORT_CONF_USGMII_CFG_E_DET_ENA BIT(5)
+#define PORT_CONF_USGMII_CFG_E_DET_ENA_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_E_DET_ENA, x)
+#define PORT_CONF_USGMII_CFG_E_DET_ENA_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_E_DET_ENA, x)
+
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA BIT(4)
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_USE_I1_ENA, x)
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_USE_I1_ENA, x)
+
+#define PORT_CONF_USGMII_CFG_QUAD_MODE BIT(1)
+#define PORT_CONF_USGMII_CFG_QUAD_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PTP_PIN_INTR __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_INTR_INTR_PTP, x)
+#define PTP_PTP_PIN_INTR_INTR_PTP_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */
+#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 8, 0, 1, 4)
+
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0)
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\
+ FIELD_PREP(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_GET(x)\
+ FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_PTP_DOM_CFG __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 12, 0, 1, 4)
+
+#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9)
+#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_ENA, x)
+#define PTP_PTP_DOM_CFG_PTP_ENA_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_ENA, x)
+
+#define PTP_PTP_DOM_CFG_PTP_HOLD GENMASK(8, 6)
+#define PTP_PTP_DOM_CFG_PTP_HOLD_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_HOLD, x)
+#define PTP_PTP_DOM_CFG_PTP_HOLD_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_HOLD, x)
+
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE GENMASK(5, 3)
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x)
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x)
+
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS GENMASK(2, 0)
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 0, r, 2, 4)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */
+#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 8, 0, 1, 4)
+
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0)
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */
+#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 12, 0, 1, 4)
+
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0)
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */
+#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 16, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */
+#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 20, 0, 1, 4)
+
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0)
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */
+#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 24, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 0, 0, 1, 4)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26)
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC GENMASK(25, 24)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL BIT(23)
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT GENMASK(22, 21)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT GENMASK(20, 18)
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x)
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM GENMASK(17, 16)
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT GENMASK(15, 14)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK BIT(13)
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS GENMASK(12, 0)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 4, 0, 1, 4)
+
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0)
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 8, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 12, 0, 1, 4)
+
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0)
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */
+#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 16, 0, 1, 4)
+
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0)
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
+
+/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */
+#define PTP_NTP_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 20, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */
+#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 24, 0, 1, 4)
+
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0)
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\
+ FIELD_PREP(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_GET(x)\
+ FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */
+#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 28, 0, 1, 4)
+
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0)
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\
+ FIELD_PREP(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_GET(x)\
+ FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */
+#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 32, 0, 1, 4)
+
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\
+ FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_GET(x)\
+ FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x)
+
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG GENMASK(2, 0)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_SET(x)\
+ FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_GET(x)\
+ FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
+
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */
+#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP,\
+ 0, 1, 420, g, 5, 8, 0, 0, 1, 4)
+
+#define PTP_PHAD_CTRL_PHAD_ENA BIT(7)
+#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_PHAD_ENA, x)
+#define PTP_PHAD_CTRL_PHAD_ENA_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_PHAD_ENA, x)
+
+#define PTP_PHAD_CTRL_PHAD_FAILED BIT(6)
+#define PTP_PHAD_CTRL_PHAD_FAILED_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_PHAD_FAILED, x)
+#define PTP_PHAD_CTRL_PHAD_FAILED_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_PHAD_FAILED, x)
+
+#define PTP_PHAD_CTRL_REDUCED_RES GENMASK(5, 3)
+#define PTP_PHAD_CTRL_REDUCED_RES_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_REDUCED_RES, x)
+#define PTP_PHAD_CTRL_REDUCED_RES_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_REDUCED_RES, x)
+
+#define PTP_PHAD_CTRL_LOCK_ACC GENMASK(2, 0)
+#define PTP_PHAD_CTRL_LOCK_ACC_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_LOCK_ACC, x)
+#define PTP_PHAD_CTRL_LOCK_ACC_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x)
+
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */
+#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP,\
+ 0, 1, 420, g, 5, 8, 4, 0, 1, 4)
+
+/* QFWD:SYSTEM:SWITCH_PORT_MODE */
+#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD,\
+ 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
+
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA BIT(19)
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_PORT_ENA, x)
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_PORT_ENA, x)
+
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY GENMASK(18, 10)
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x)
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x)
+
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD GENMASK(9, 6)
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x)
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x)
+
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(5)
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x)
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x)
+
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING BIT(4)
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x)
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING BIT(3)
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x)
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE BIT(2)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS BIT(1)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x)
+
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE BIT(0)
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
+
+/* QRES:RES_CTRL:RES_CFG */
+#define QRES_RES_CFG(g) __REG(TARGET_QRES,\
+ 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
+
+#define QRES_RES_CFG_WM_HIGH GENMASK(11, 0)
+#define QRES_RES_CFG_WM_HIGH_SET(x)\
+ FIELD_PREP(QRES_RES_CFG_WM_HIGH, x)
+#define QRES_RES_CFG_WM_HIGH_GET(x)\
+ FIELD_GET(QRES_RES_CFG_WM_HIGH, x)
+
+/* QRES:RES_CTRL:RES_STAT */
+#define QRES_RES_STAT(g) __REG(TARGET_QRES,\
+ 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
+
+#define QRES_RES_STAT_MAXUSE GENMASK(20, 0)
+#define QRES_RES_STAT_MAXUSE_SET(x)\
+ FIELD_PREP(QRES_RES_STAT_MAXUSE, x)
+#define QRES_RES_STAT_MAXUSE_GET(x)\
+ FIELD_GET(QRES_RES_STAT_MAXUSE, x)
+
+/* QRES:RES_CTRL:RES_STAT_CUR */
+#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES,\
+ 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
+
+#define QRES_RES_STAT_CUR_INUSE GENMASK(20, 0)
+#define QRES_RES_STAT_CUR_INUSE_SET(x)\
+ FIELD_PREP(QRES_RES_STAT_CUR_INUSE, x)
+#define QRES_RES_STAT_CUR_INUSE_GET(x)\
+ FIELD_GET(QRES_RES_STAT_CUR_INUSE, x)
+
+/* DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+
+#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_MODE, x)
+#define QS_XTR_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_MODE, x)
+
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS BIT(1)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_STATUS_WORD_POS, x)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_STATUS_WORD_POS, x)
+
+#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r) __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+
+/* DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+
+#define QS_XTR_FLUSH_FLUSH GENMASK(1, 0)
+#define QS_XTR_FLUSH_FLUSH_SET(x)\
+ FIELD_PREP(QS_XTR_FLUSH_FLUSH, x)
+#define QS_XTR_FLUSH_FLUSH_GET(x)\
+ FIELD_GET(QS_XTR_FLUSH_FLUSH, x)
+
+/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT GENMASK(1, 0)
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\
+ FIELD_PREP(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT_GET(x)\
+ FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
+
+/* DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+
+#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_MODE, x)
+#define QS_INJ_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_MODE, x)
+
+#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r) __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+
+/* DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r) __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+
+#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x)
+#define QS_INJ_CTRL_GAP_SIZE_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x)
+
+#define QS_INJ_CTRL_ABORT BIT(20)
+#define QS_INJ_CTRL_ABORT_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_ABORT, x)
+#define QS_INJ_CTRL_ABORT_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_ABORT, x)
+
+#define QS_INJ_CTRL_EOF BIT(19)
+#define QS_INJ_CTRL_EOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_EOF, x)
+#define QS_INJ_CTRL_EOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_EOF, x)
+
+#define QS_INJ_CTRL_SOF BIT(18)
+#define QS_INJ_CTRL_SOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_SOF, x)
+#define QS_INJ_CTRL_SOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_SOF, x)
+
+#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x)
+#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
+
+/* DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+
+#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x)
+#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x)
+
+#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x)
+#define QS_INJ_STATUS_FIFO_RDY_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x)
+
+#define QS_INJ_STATUS_INJ_IN_PROGRESS GENMASK(1, 0)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
+
+/* QSYS:PAUSE_CFG:PAUSE_CFG */
+#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 0, r, 70, 4)
+
+#define QSYS_PAUSE_CFG_PAUSE_START GENMASK(25, 14)
+#define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\
+ FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_START, x)
+#define QSYS_PAUSE_CFG_PAUSE_START_GET(x)\
+ FIELD_GET(QSYS_PAUSE_CFG_PAUSE_START, x)
+
+#define QSYS_PAUSE_CFG_PAUSE_STOP GENMASK(13, 2)
+#define QSYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
+ FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+#define QSYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
+ FIELD_GET(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+
+#define QSYS_PAUSE_CFG_PAUSE_ENA BIT(1)
+#define QSYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
+ FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_ENA, x)
+#define QSYS_PAUSE_CFG_PAUSE_ENA_GET(x)\
+ FIELD_GET(QSYS_PAUSE_CFG_PAUSE_ENA, x)
+
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA BIT(0)
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_SET(x)\
+ FIELD_PREP(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_GET(x)\
+ FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
+
+/* QSYS:PAUSE_CFG:ATOP */
+#define QSYS_ATOP(r) __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 284, r, 70, 4)
+
+#define QSYS_ATOP_ATOP GENMASK(11, 0)
+#define QSYS_ATOP_ATOP_SET(x)\
+ FIELD_PREP(QSYS_ATOP_ATOP, x)
+#define QSYS_ATOP_ATOP_GET(x)\
+ FIELD_GET(QSYS_ATOP_ATOP, x)
+
+/* QSYS:PAUSE_CFG:FWD_PRESSURE */
+#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 564, r, 70, 4)
+
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE GENMASK(11, 1)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\
+ FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE, x)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_GET(x)\
+ FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE, x)
+
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS BIT(0)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(x)\
+ FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_GET(x)\
+ FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
+
+/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4)
+
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT GENMASK(11, 0)
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\
+ FIELD_PREP(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT_GET(x)\
+ FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+
+/* QSYS:CALCFG:CAL_AUTO */
+#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS,\
+ 0, 1, 2304, 0, 1, 40, 0, r, 7, 4)
+
+#define QSYS_CAL_AUTO_CAL_AUTO GENMASK(29, 0)
+#define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\
+ FIELD_PREP(QSYS_CAL_AUTO_CAL_AUTO, x)
+#define QSYS_CAL_AUTO_CAL_AUTO_GET(x)\
+ FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x)
+
+/* QSYS:CALCFG:CAL_CTRL */
+#define QSYS_CAL_CTRL __REG(TARGET_QSYS,\
+ 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4)
+
+#define QSYS_CAL_CTRL_CAL_MODE GENMASK(14, 11)
+#define QSYS_CAL_CTRL_CAL_MODE_SET(x)\
+ FIELD_PREP(QSYS_CAL_CTRL_CAL_MODE, x)
+#define QSYS_CAL_CTRL_CAL_MODE_GET(x)\
+ FIELD_GET(QSYS_CAL_CTRL_CAL_MODE, x)
+
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE GENMASK(10, 1)
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(x)\
+ FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x)
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_GET(x)\
+ FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x)
+
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR BIT(0)
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_SET(x)\
+ FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(x)\
+ FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
+
+/* QSYS:RAM_CTRL:RAM_INIT */
+#define QSYS_RAM_INIT __REG(TARGET_QSYS,\
+ 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4)
+
+#define QSYS_RAM_INIT_RAM_INIT BIT(1)
+#define QSYS_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(QSYS_RAM_INIT_RAM_INIT, x)
+#define QSYS_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(QSYS_RAM_INIT_RAM_INIT, x)
+
+#define QSYS_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define QSYS_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
+#define QSYS_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* REW:COMMON:OWN_UPSID */
+#define REW_OWN_UPSID(r) __REG(TARGET_REW,\
+ 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4)
+
+#define REW_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
+#define REW_OWN_UPSID_OWN_UPSID_SET(x)\
+ FIELD_PREP(REW_OWN_UPSID_OWN_UPSID, x)
+#define REW_OWN_UPSID_OWN_UPSID_GET(x)\
+ FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x)
+
+/* REW:COMMON:RTAG_ETAG_CTRL */
+#define REW_RTAG_ETAG_CTRL(r) __REG(TARGET_REW,\
+ 0, 1, 387264, 0, 1, 1232, 560, r, 70, 4)
+
+#define REW_RTAG_ETAG_CTRL_IPE_TBL GENMASK(9, 3)
+#define REW_RTAG_ETAG_CTRL_IPE_TBL_SET(x)\
+ FIELD_PREP(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
+#define REW_RTAG_ETAG_CTRL_IPE_TBL_GET(x)\
+ FIELD_GET(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
+
+#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA GENMASK(2, 1)
+#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(x)\
+ FIELD_PREP(REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA, x)
+#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_GET(x)\
+ FIELD_GET(REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA, x)
+
+#define REW_RTAG_ETAG_CTRL_KEEP_ETAG BIT(0)
+#define REW_RTAG_ETAG_CTRL_KEEP_ETAG_SET(x)\
+ FIELD_PREP(REW_RTAG_ETAG_CTRL_KEEP_ETAG, x)
+#define REW_RTAG_ETAG_CTRL_KEEP_ETAG_GET(x)\
+ FIELD_GET(REW_RTAG_ETAG_CTRL_KEEP_ETAG, x)
+
+/* REW:COMMON:ES0_CTRL */
+#define REW_ES0_CTRL __REG(TARGET_REW,\
+ 0, 1, 387264, 0, 1, 1232, 852, 0, 1, 4)
+
+#define REW_ES0_CTRL_ES0_BY_RT_FWD BIT(5)
+#define REW_ES0_CTRL_ES0_BY_RT_FWD_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_BY_RT_FWD, x)
+#define REW_ES0_CTRL_ES0_BY_RT_FWD_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_BY_RT_FWD, x)
+
+#define REW_ES0_CTRL_ES0_BY_RLEG BIT(4)
+#define REW_ES0_CTRL_ES0_BY_RLEG_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_BY_RLEG, x)
+#define REW_ES0_CTRL_ES0_BY_RLEG_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_BY_RLEG, x)
+
+#define REW_ES0_CTRL_ES0_DPORT_ENA BIT(3)
+#define REW_ES0_CTRL_ES0_DPORT_ENA_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_DPORT_ENA, x)
+#define REW_ES0_CTRL_ES0_DPORT_ENA_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_DPORT_ENA, x)
+
+#define REW_ES0_CTRL_ES0_FRM_LBK_CFG BIT(2)
+#define REW_ES0_CTRL_ES0_FRM_LBK_CFG_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_FRM_LBK_CFG, x)
+#define REW_ES0_CTRL_ES0_FRM_LBK_CFG_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_FRM_LBK_CFG, x)
+
+#define REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA BIT(1)
+#define REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA, x)
+#define REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA, x)
+
+#define REW_ES0_CTRL_ES0_LU_ENA BIT(0)
+#define REW_ES0_CTRL_ES0_LU_ENA_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_LU_ENA, x)
+#define REW_ES0_CTRL_ES0_LU_ENA_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_LU_ENA, x)
+
+/* REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 0, 0, 1, 4)
+
+#define REW_PORT_VLAN_CFG_PORT_PCP GENMASK(15, 13)
+#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_PCP, x)
+#define REW_PORT_VLAN_CFG_PORT_PCP_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_PCP, x)
+
+#define REW_PORT_VLAN_CFG_PORT_DEI BIT(12)
+#define REW_PORT_VLAN_CFG_PORT_DEI_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_DEI, x)
+#define REW_PORT_VLAN_CFG_PORT_DEI_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_DEI, x)
+
+#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0)
+#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x)
+#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+
+/* REW:PORT:PCP_MAP_DE0 */
+#define REW_PCP_MAP_DE0(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 4, r, 8, 4)
+
+#define REW_PCP_MAP_DE0_PCP_DE0 GENMASK(2, 0)
+#define REW_PCP_MAP_DE0_PCP_DE0_SET(x)\
+ FIELD_PREP(REW_PCP_MAP_DE0_PCP_DE0, x)
+#define REW_PCP_MAP_DE0_PCP_DE0_GET(x)\
+ FIELD_GET(REW_PCP_MAP_DE0_PCP_DE0, x)
+
+/* REW:PORT:PCP_MAP_DE1 */
+#define REW_PCP_MAP_DE1(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 36, r, 8, 4)
+
+#define REW_PCP_MAP_DE1_PCP_DE1 GENMASK(2, 0)
+#define REW_PCP_MAP_DE1_PCP_DE1_SET(x)\
+ FIELD_PREP(REW_PCP_MAP_DE1_PCP_DE1, x)
+#define REW_PCP_MAP_DE1_PCP_DE1_GET(x)\
+ FIELD_GET(REW_PCP_MAP_DE1_PCP_DE1, x)
+
+/* REW:PORT:DEI_MAP_DE0 */
+#define REW_DEI_MAP_DE0(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 68, r, 8, 4)
+
+#define REW_DEI_MAP_DE0_DEI_DE0 BIT(0)
+#define REW_DEI_MAP_DE0_DEI_DE0_SET(x)\
+ FIELD_PREP(REW_DEI_MAP_DE0_DEI_DE0, x)
+#define REW_DEI_MAP_DE0_DEI_DE0_GET(x)\
+ FIELD_GET(REW_DEI_MAP_DE0_DEI_DE0, x)
+
+/* REW:PORT:DEI_MAP_DE1 */
+#define REW_DEI_MAP_DE1(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 100, r, 8, 4)
+
+#define REW_DEI_MAP_DE1_DEI_DE1 BIT(0)
+#define REW_DEI_MAP_DE1_DEI_DE1_SET(x)\
+ FIELD_PREP(REW_DEI_MAP_DE1_DEI_DE1, x)
+#define REW_DEI_MAP_DE1_DEI_DE1_GET(x)\
+ FIELD_GET(REW_DEI_MAP_DE1_DEI_DE1, x)
+
+/* REW:PORT:TAG_CTRL */
+#define REW_TAG_CTRL(g) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 132, 0, 1, 4)
+
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED BIT(13)
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x)
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x)
+
+#define REW_TAG_CTRL_TAG_CFG GENMASK(12, 11)
+#define REW_TAG_CTRL_TAG_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_CFG, x)
+#define REW_TAG_CTRL_TAG_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_CFG, x)
+
+#define REW_TAG_CTRL_TAG_TPID_CFG GENMASK(10, 8)
+#define REW_TAG_CTRL_TAG_TPID_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_TPID_CFG, x)
+#define REW_TAG_CTRL_TAG_TPID_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_TPID_CFG, x)
+
+#define REW_TAG_CTRL_TAG_VID_CFG GENMASK(7, 6)
+#define REW_TAG_CTRL_TAG_VID_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_VID_CFG, x)
+#define REW_TAG_CTRL_TAG_VID_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_VID_CFG, x)
+
+#define REW_TAG_CTRL_TAG_PCP_CFG GENMASK(5, 3)
+#define REW_TAG_CTRL_TAG_PCP_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_PCP_CFG, x)
+#define REW_TAG_CTRL_TAG_PCP_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_PCP_CFG, x)
+
+#define REW_TAG_CTRL_TAG_DEI_CFG GENMASK(2, 0)
+#define REW_TAG_CTRL_TAG_DEI_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_DEI_CFG, x)
+#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
+
+/* REW:PORT:DSCP_MAP */
+#define REW_DSCP_MAP(g) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 136, 0, 1, 4)
+
+#define REW_DSCP_MAP_DSCP_UPDATE_ENA BIT(1)
+#define REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(x)\
+ FIELD_PREP(REW_DSCP_MAP_DSCP_UPDATE_ENA, x)
+#define REW_DSCP_MAP_DSCP_UPDATE_ENA_GET(x)\
+ FIELD_GET(REW_DSCP_MAP_DSCP_UPDATE_ENA, x)
+
+#define REW_DSCP_MAP_DSCP_REMAP_ENA BIT(0)
+#define REW_DSCP_MAP_DSCP_REMAP_ENA_SET(x)\
+ FIELD_PREP(REW_DSCP_MAP_DSCP_REMAP_ENA, x)
+#define REW_DSCP_MAP_DSCP_REMAP_ENA_GET(x)\
+ FIELD_GET(REW_DSCP_MAP_DSCP_REMAP_ENA, x)
+
+/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */
+#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT BIT(11)
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD BIT(10)
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x)
+
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL BIT(0)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */
+#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0)
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */
+#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0)
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */
+#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */
+#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */
+#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
+
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0)
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\
+ FIELD_PREP(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_GET(x)\
+ FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
+
+/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */
+#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
+
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2)
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\
+ FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_OFS, x)
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS_GET(x)\
+ FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_OFS, x)
+
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT GENMASK(1, 0)
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT_SET(x)\
+ FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT_GET(x)\
+ FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
+
+/* REW:RAM_CTRL:RAM_INIT */
+#define REW_RAM_INIT __REG(TARGET_REW,\
+ 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
+
+#define REW_RAM_INIT_RAM_INIT BIT(1)
+#define REW_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(REW_RAM_INIT_RAM_INIT, x)
+#define REW_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(REW_RAM_INIT_RAM_INIT, x)
+
+#define REW_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define REW_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(REW_RAM_INIT_RAM_CFG_HOOK, x)
+#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* VCAP_ES0:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES0_CTRL __REG(TARGET_VCAP_ES0,\
+ 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES0_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_ES0_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_CMD, x)
+#define VCAP_ES0_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_CMD, x)
+
+#define VCAP_ES0_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_ES0_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_ES0_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_ES0_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_ES0_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_ES0_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_ES0_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_ADDR, x)
+#define VCAP_ES0_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_ES0_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_ES0_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_SHOT, x)
+#define VCAP_ES0_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_ES0_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_ES0_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_CLEAR_CACHE, x)
+#define VCAP_ES0_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP_ES0:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES0_CFG __REG(TARGET_VCAP_ES0,\
+ 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES0_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_ES0_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CFG_MV_NUM_POS, x)
+#define VCAP_ES0_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_ES0_CFG_MV_NUM_POS, x)
+
+#define VCAP_ES0_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_ES0_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_ES0_CFG_MV_SIZE, x)
+#define VCAP_ES0_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_ES0_CFG_MV_SIZE, x)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES0_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES0_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES0_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES0_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES0_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES0_VCAP_TG_DAT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES0_IDX __REG(TARGET_VCAP_ES0,\
+ 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES0_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_ES0_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_ES0_IDX_CORE_IDX, x)
+#define VCAP_ES0_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_ES0_IDX_CORE_IDX, x)
+
+/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES0_MAP __REG(TARGET_VCAP_ES0,\
+ 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES0_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_ES0_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_ES0_MAP_CORE_MAP, x)
+#define VCAP_ES0_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_ES0_MAP_CORE_MAP, x)
+
+/* VCAP_ES0:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES0_VCAP_STICKY __REG(TARGET_VCAP_ES0,\
+ 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
+ FIELD_PREP(VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
+ FIELD_GET(VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+
+/* VCAP_ES0:VCAP_CONST:VCAP_VER */
+#define VCAP_ES0_VCAP_VER __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES0_ENTRY_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES0_ENTRY_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES0_ENTRY_SWCNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES0_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES0_ACTION_DEF_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES0_ACTION_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES0_CNT_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:CORE_CNT */
+#define VCAP_ES0_CORE_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:IF_CNT */
+#define VCAP_ES0_IF_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES2_CTRL __REG(TARGET_VCAP_ES2,\
+ 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES2_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_ES2_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_CMD, x)
+#define VCAP_ES2_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_CMD, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_ES2_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ADDR, x)
+#define VCAP_ES2_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_ES2_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_ES2_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_SHOT, x)
+#define VCAP_ES2_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_ES2_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_ES2_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_CLEAR_CACHE, x)
+#define VCAP_ES2_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES2_CFG __REG(TARGET_VCAP_ES2,\
+ 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES2_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_ES2_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CFG_MV_NUM_POS, x)
+#define VCAP_ES2_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_ES2_CFG_MV_NUM_POS, x)
+
+#define VCAP_ES2_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_ES2_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_ES2_CFG_MV_SIZE, x)
+#define VCAP_ES2_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_ES2_CFG_MV_SIZE, x)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES2_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES2_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES2_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES2_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES2_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES2_VCAP_TG_DAT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES2_IDX __REG(TARGET_VCAP_ES2,\
+ 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES2_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_ES2_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_ES2_IDX_CORE_IDX, x)
+#define VCAP_ES2_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_ES2_IDX_CORE_IDX, x)
+
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES2_MAP __REG(TARGET_VCAP_ES2,\
+ 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES2_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_ES2_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_ES2_MAP_CORE_MAP, x)
+#define VCAP_ES2_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_ES2_MAP_CORE_MAP, x)
+
+/* VCAP_ES2:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES2_VCAP_STICKY __REG(TARGET_VCAP_ES2,\
+ 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
+ FIELD_PREP(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
+ FIELD_GET(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+
+/* VCAP_ES2:VCAP_CONST:VCAP_VER */
+#define VCAP_ES2_VCAP_VER __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES2_ENTRY_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES2_ENTRY_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES2_ENTRY_SWCNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES2_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES2_ACTION_DEF_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES2_ACTION_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES2_CNT_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:CORE_CNT */
+#define VCAP_ES2_CORE_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:IF_CNT */
+#define VCAP_ES2_IF_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_SUPER_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_SUPER_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_CMD, x)
+#define VCAP_SUPER_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_CMD, x)
+
+#define VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_SUPER_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_SUPER_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_SUPER_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_SUPER_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_SUPER_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_ADDR, x)
+#define VCAP_SUPER_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_SUPER_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_SUPER_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_SHOT, x)
+#define VCAP_SUPER_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_SUPER_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_SUPER_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_CLEAR_CACHE, x)
+#define VCAP_SUPER_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_SUPER_CFG __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_SUPER_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_SUPER_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CFG_MV_NUM_POS, x)
+#define VCAP_SUPER_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_SUPER_CFG_MV_NUM_POS, x)
+
+#define VCAP_SUPER_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_SUPER_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CFG_MV_SIZE, x)
+#define VCAP_SUPER_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_SUPER_CFG_MV_SIZE, x)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_SUPER_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_SUPER_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_SUPER_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_SUPER_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_SUPER_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_SUPER_VCAP_TG_DAT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_SUPER_IDX __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_SUPER_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_SUPER_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_SUPER_IDX_CORE_IDX, x)
+#define VCAP_SUPER_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_SUPER_IDX_CORE_IDX, x)
+
+/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_SUPER_MAP __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_SUPER_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_SUPER_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_SUPER_MAP_CORE_MAP, x)
+#define VCAP_SUPER_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_SUPER_MAP_CORE_MAP, x)
+
+/* VCAP_SUPER:VCAP_CONST:VCAP_VER */
+#define VCAP_SUPER_VCAP_VER __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_SUPER_ENTRY_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */
+#define VCAP_SUPER_ENTRY_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_SUPER_ENTRY_SWCNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_SUPER_ENTRY_TG_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_SUPER_ACTION_DEF_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_SUPER_ACTION_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */
+#define VCAP_SUPER_CNT_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:CORE_CNT */
+#define VCAP_SUPER_CORE_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:IF_CNT */
+#define VCAP_SUPER_IF_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+
+/* VCAP_SUPER:RAM_CTRL:RAM_INIT */
+#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_SUPER_RAM_INIT_RAM_INIT BIT(1)
+#define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_INIT, x)
+#define VCAP_SUPER_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_INIT, x)
+
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* VOP:RAM_CTRL:RAM_INIT */
+#define VOP_RAM_INIT __REG(TARGET_VOP,\
+ 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4)
+
+#define VOP_RAM_INIT_RAM_INIT BIT(1)
+#define VOP_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(VOP_RAM_INIT_RAM_INIT, x)
+#define VOP_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(VOP_RAM_INIT_RAM_INIT, x)
+
+#define VOP_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define VOP_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(VOP_RAM_INIT_RAM_CFG_HOOK, x)
+#define VOP_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* XQS:SYSTEM:STAT_CFG */
+#define XQS_STAT_CFG __REG(TARGET_XQS,\
+ 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4)
+
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT GENMASK(21, 18)
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\
+ FIELD_PREP(XQS_STAT_CFG_STAT_CLEAR_SHOT, x)
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT_GET(x)\
+ FIELD_GET(XQS_STAT_CFG_STAT_CLEAR_SHOT, x)
+
+#define XQS_STAT_CFG_STAT_VIEW GENMASK(17, 5)
+#define XQS_STAT_CFG_STAT_VIEW_SET(x)\
+ FIELD_PREP(XQS_STAT_CFG_STAT_VIEW, x)
+#define XQS_STAT_CFG_STAT_VIEW_GET(x)\
+ FIELD_GET(XQS_STAT_CFG_STAT_VIEW, x)
+
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY BIT(4)
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_SET(x)\
+ FIELD_PREP(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x)
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_GET(x)\
+ FIELD_GET(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x)
+
+#define XQS_STAT_CFG_STAT_WRAP_DIS GENMASK(3, 0)
+#define XQS_STAT_CFG_STAT_WRAP_DIS_SET(x)\
+ FIELD_PREP(XQS_STAT_CFG_STAT_WRAP_DIS, x)
+#define XQS_STAT_CFG_STAT_WRAP_DIS_GET(x)\
+ FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x)
+
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */
+#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 0, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\
+ FIELD_PREP(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_GET(x)\
+ FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */
+#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 4, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\
+ FIELD_PREP(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_GET(x)\
+ FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */
+#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 8, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\
+ FIELD_PREP(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_GET(x)\
+ FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */
+#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 12, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\
+ FIELD_PREP(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_GET(x)\
+ FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+
+/* XQS:STAT:CNT */
+#define XQS_CNT(g) __REG(TARGET_XQS,\
+ 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+
+#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
new file mode 100644
index 0000000000..705a004b32
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+#include "sparx5_tc.h"
+
+/* The IFH bit position of the first VSTAX bit. This is because the
+ * VSTAX bit positions in Data sheet is starting from zero.
+ */
+#define VSTAX 73
+
+#define ifh_encode_bitfield(ifh, value, pos, _width) \
+ ({ \
+ u32 width = (_width); \
+ \
+ /* Max width is 5 bytes - 40 bits. In worst case this will
+ * spread over 6 bytes - 48 bits
+ */ \
+ compiletime_assert(width <= 40, \
+ "Unsupported width, must be <= 40"); \
+ __ifh_encode_bitfield((ifh), (value), (pos), width); \
+ })
+
+static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
+{
+ u8 *ifh_hdr = ifh;
+ /* Calculate the Start IFH byte position of this IFH bit position */
+ u32 byte = (35 - (pos / 8));
+ /* Calculate the Start bit position in the Start IFH byte */
+ u32 bit = (pos % 8);
+ u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit);
+
+ /* The b0-b7 goes into the start IFH byte */
+ if (encode & 0xFF)
+ ifh_hdr[byte] |= (u8)((encode & 0xFF));
+ /* The b8-b15 goes into the next IFH byte */
+ if (encode & 0xFF00)
+ ifh_hdr[byte - 1] |= (u8)((encode & 0xFF00) >> 8);
+ /* The b16-b23 goes into the next IFH byte */
+ if (encode & 0xFF0000)
+ ifh_hdr[byte - 2] |= (u8)((encode & 0xFF0000) >> 16);
+ /* The b24-b31 goes into the next IFH byte */
+ if (encode & 0xFF000000)
+ ifh_hdr[byte - 3] |= (u8)((encode & 0xFF000000) >> 24);
+ /* The b32-b39 goes into the next IFH byte */
+ if (encode & 0xFF00000000)
+ ifh_hdr[byte - 4] |= (u8)((encode & 0xFF00000000) >> 32);
+ /* The b40-b47 goes into the next IFH byte */
+ if (encode & 0xFF0000000000)
+ ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40);
+}
+
+void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
+{
+ /* VSTAX.RSV = 1. MSBit must be 1 */
+ ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1);
+ /* VSTAX.INGR_DROP_MODE = Enable. Don't make head-of-line blocking */
+ ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 55, 1);
+ /* MISC.CPU_MASK/DPORT = Destination port */
+ ifh_encode_bitfield(ifh_hdr, portno, 29, 8);
+ /* MISC.PIPELINE_PT */
+ ifh_encode_bitfield(ifh_hdr, 16, 37, 5);
+ /* MISC.PIPELINE_ACT */
+ ifh_encode_bitfield(ifh_hdr, 1, 42, 3);
+ /* FWD.SRC_PORT = CPU */
+ ifh_encode_bitfield(ifh_hdr, SPX5_PORT_CPU, 46, 7);
+ /* FWD.SFLOW_ID (disable SFlow sampling) */
+ ifh_encode_bitfield(ifh_hdr, 124, 57, 7);
+ /* FWD.UPDATE_FCS = Enable. Enforce update of FCS. */
+ ifh_encode_bitfield(ifh_hdr, 1, 67, 1);
+}
+
+void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op)
+{
+ ifh_encode_bitfield(ifh_hdr, rew_op, VSTAX + 32, 10);
+}
+
+void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type)
+{
+ ifh_encode_bitfield(ifh_hdr, pdu_type, 191, 4);
+}
+
+void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset)
+{
+ ifh_encode_bitfield(ifh_hdr, pdu_w16_offset, 195, 6);
+}
+
+void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp)
+{
+ ifh_encode_bitfield(ifh_hdr, timestamp, 232, 40);
+}
+
+static int sparx5_port_open(struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ int err = 0;
+
+ sparx5_port_enable(port, true);
+ err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
+ if (err) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ goto err_connect;
+ }
+
+ phylink_start(port->phylink);
+
+ if (!ndev->phydev) {
+ /* power up serdes */
+ port->conf.power_down = false;
+ if (port->conf.serdes_reset)
+ err = sparx5_serdes_set(port->sparx5, port, &port->conf);
+ else
+ err = phy_power_on(port->serdes);
+ if (err) {
+ netdev_err(ndev, "%s failed\n", __func__);
+ goto out_power;
+ }
+ }
+
+ return 0;
+
+out_power:
+ phylink_stop(port->phylink);
+ phylink_disconnect_phy(port->phylink);
+err_connect:
+ sparx5_port_enable(port, false);
+
+ return err;
+}
+
+static int sparx5_port_stop(struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ int err = 0;
+
+ sparx5_port_enable(port, false);
+ phylink_stop(port->phylink);
+ phylink_disconnect_phy(port->phylink);
+
+ if (!ndev->phydev) {
+ /* power down serdes */
+ port->conf.power_down = true;
+ if (port->conf.serdes_reset)
+ err = sparx5_serdes_set(port->sparx5, port, &port->conf);
+ else
+ err = phy_power_off(port->serdes);
+ if (err)
+ netdev_err(ndev, "%s failed\n", __func__);
+ }
+ return 0;
+}
+
+static void sparx5_set_rx_mode(struct net_device *dev)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (!test_bit(port->portno, sparx5->bridge_mask))
+ __dev_mc_sync(dev, sparx5_mc_sync, sparx5_mc_unsync);
+}
+
+static int sparx5_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = snprintf(buf, len, "p%d", port->portno);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sparx5_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+ const struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* Remove current */
+ sparx5_mact_forget(sparx5, dev->dev_addr, port->pvid);
+
+ /* Add new */
+ sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid);
+
+ /* Record the address */
+ eth_hw_addr_set(dev, addr->sa_data);
+
+ return 0;
+}
+
+static int sparx5_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct sparx5_port *sparx5_port = netdev_priv(dev);
+ struct sparx5 *sparx5 = sparx5_port->sparx5;
+
+ ppid->id_len = sizeof(sparx5->base_mac);
+ memcpy(&ppid->id, &sparx5->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static int sparx5_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct sparx5_port *sparx5_port = netdev_priv(dev);
+ struct sparx5 *sparx5 = sparx5_port->sparx5;
+
+ if (!sparx5->ptp)
+ return -EOPNOTSUPP;
+
+ sparx5_ptp_hwtstamp_get(sparx5_port, cfg);
+
+ return 0;
+}
+
+static int sparx5_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct sparx5_port *sparx5_port = netdev_priv(dev);
+ struct sparx5 *sparx5 = sparx5_port->sparx5;
+
+ if (!sparx5->ptp)
+ return -EOPNOTSUPP;
+
+ return sparx5_ptp_hwtstamp_set(sparx5_port, cfg, extack);
+}
+
+static const struct net_device_ops sparx5_port_netdev_ops = {
+ .ndo_open = sparx5_port_open,
+ .ndo_stop = sparx5_port_stop,
+ .ndo_start_xmit = sparx5_port_xmit_impl,
+ .ndo_set_rx_mode = sparx5_set_rx_mode,
+ .ndo_get_phys_port_name = sparx5_port_get_phys_port_name,
+ .ndo_set_mac_address = sparx5_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = sparx5_get_stats64,
+ .ndo_get_port_parent_id = sparx5_get_port_parent_id,
+ .ndo_eth_ioctl = phy_do_ioctl,
+ .ndo_setup_tc = sparx5_port_setup_tc,
+ .ndo_hwtstamp_get = sparx5_port_hwtstamp_get,
+ .ndo_hwtstamp_set = sparx5_port_hwtstamp_set,
+};
+
+bool sparx5_netdevice_check(const struct net_device *dev)
+{
+ return dev && (dev->netdev_ops == &sparx5_port_netdev_ops);
+}
+
+struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
+{
+ struct sparx5_port *spx5_port;
+ struct net_device *ndev;
+
+ ndev = devm_alloc_etherdev_mqs(sparx5->dev, sizeof(struct sparx5_port),
+ SPX5_PRIOS, 1);
+ if (!ndev)
+ return ERR_PTR(-ENOMEM);
+
+ ndev->hw_features |= NETIF_F_HW_TC;
+ ndev->features |= NETIF_F_HW_TC;
+
+ SET_NETDEV_DEV(ndev, sparx5->dev);
+ spx5_port = netdev_priv(ndev);
+ spx5_port->ndev = ndev;
+ spx5_port->sparx5 = sparx5;
+ spx5_port->portno = portno;
+
+ ndev->netdev_ops = &sparx5_port_netdev_ops;
+ ndev->ethtool_ops = &sparx5_ethtool_ops;
+
+ eth_hw_addr_gen(ndev, sparx5->base_mac, portno + 1);
+
+ return ndev;
+}
+
+int sparx5_register_netdevs(struct sparx5 *sparx5)
+{
+ int portno;
+ int err;
+
+ for (portno = 0; portno < SPX5_PORTS; portno++)
+ if (sparx5->ports[portno]) {
+ err = register_netdev(sparx5->ports[portno]->ndev);
+ if (err) {
+ dev_err(sparx5->dev,
+ "port: %02u: netdev registration failed\n",
+ portno);
+ return err;
+ }
+ sparx5_port_inj_timer_setup(sparx5->ports[portno]);
+ }
+ return 0;
+}
+
+void sparx5_destroy_netdevs(struct sparx5 *sparx5)
+{
+ struct sparx5_port *port;
+ int portno;
+
+ for (portno = 0; portno < SPX5_PORTS; portno++) {
+ port = sparx5->ports[portno];
+ if (port && port->phylink) {
+ /* Disconnect the phy */
+ rtnl_lock();
+ sparx5_port_stop(port->ndev);
+ phylink_disconnect_phy(port->phylink);
+ rtnl_unlock();
+ phylink_destroy(port->phylink);
+ port->phylink = NULL;
+ }
+ }
+}
+
+void sparx5_unregister_netdevs(struct sparx5 *sparx5)
+{
+ int portno;
+
+ for (portno = 0; portno < SPX5_PORTS; portno++)
+ if (sparx5->ports[portno])
+ unregister_netdev(sparx5->ports[portno]->ndev);
+}
+
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
new file mode 100644
index 0000000000..6db6ac6a3b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define XTR_EOF_0 ntohl((__force __be32)0x80000000u)
+#define XTR_EOF_1 ntohl((__force __be32)0x80000001u)
+#define XTR_EOF_2 ntohl((__force __be32)0x80000002u)
+#define XTR_EOF_3 ntohl((__force __be32)0x80000003u)
+#define XTR_PRUNED ntohl((__force __be32)0x80000004u)
+#define XTR_ABORT ntohl((__force __be32)0x80000005u)
+#define XTR_ESCAPE ntohl((__force __be32)0x80000006u)
+#define XTR_NOT_READY ntohl((__force __be32)0x80000007u)
+
+#define XTR_VALID_BYTES(x) (4 - ((x) & 3))
+
+#define INJ_TIMEOUT_NS 50000
+
+void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp)
+{
+ /* Start flush */
+ spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH);
+
+ /* Allow to drain */
+ mdelay(1);
+
+ /* All Queues normal */
+ spx5_wr(0, sparx5, QS_XTR_FLUSH);
+}
+
+void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
+{
+ u8 *xtr_hdr = (u8 *)ifh;
+
+ /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */
+ u32 fwd =
+ ((u32)xtr_hdr[27] << 24) |
+ ((u32)xtr_hdr[28] << 16) |
+ ((u32)xtr_hdr[29] << 8) |
+ ((u32)xtr_hdr[30] << 0);
+ fwd = (fwd >> 5);
+ info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+
+ info->timestamp =
+ ((u64)xtr_hdr[2] << 24) |
+ ((u64)xtr_hdr[3] << 16) |
+ ((u64)xtr_hdr[4] << 8) |
+ ((u64)xtr_hdr[5] << 0);
+}
+
+static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
+{
+ bool eof_flag = false, pruned_flag = false, abort_flag = false;
+ struct net_device *netdev;
+ struct sparx5_port *port;
+ struct frame_info fi;
+ int i, byte_cnt = 0;
+ struct sk_buff *skb;
+ u32 ifh[IFH_LEN];
+ u32 *rxbuf;
+
+ /* Get IFH */
+ for (i = 0; i < IFH_LEN; i++)
+ ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
+
+ /* Decode IFH (whats needed) */
+ sparx5_ifh_parse(ifh, &fi);
+
+ /* Map to port netdev */
+ port = fi.src_port < SPX5_PORTS ?
+ sparx5->ports[fi.src_port] : NULL;
+ if (!port || !port->ndev) {
+ dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
+ sparx5_xtr_flush(sparx5, grp);
+ return;
+ }
+
+ /* Have netdev, get skb */
+ netdev = port->ndev;
+ skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN);
+ if (!skb) {
+ sparx5_xtr_flush(sparx5, grp);
+ dev_err(sparx5->dev, "No skb allocated\n");
+ netdev->stats.rx_dropped++;
+ return;
+ }
+ rxbuf = (u32 *)skb->data;
+
+ /* Now, pull frame data */
+ while (!eof_flag) {
+ u32 val = spx5_rd(sparx5, QS_XTR_RD(grp));
+ u32 cmp = val;
+
+ if (byte_swap)
+ cmp = ntohl((__force __be32)val);
+
+ switch (cmp) {
+ case XTR_NOT_READY:
+ break;
+ case XTR_ABORT:
+ /* No accompanying data */
+ abort_flag = true;
+ eof_flag = true;
+ break;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ /* This assumes STATUS_WORD_POS == 1, Status
+ * just after last data
+ */
+ if (!byte_swap)
+ val = ntohl((__force __be32)val);
+ byte_cnt -= (4 - XTR_VALID_BYTES(val));
+ eof_flag = true;
+ break;
+ case XTR_PRUNED:
+ /* But get the last 4 bytes as well */
+ eof_flag = true;
+ pruned_flag = true;
+ fallthrough;
+ case XTR_ESCAPE:
+ *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp));
+ byte_cnt += 4;
+ rxbuf++;
+ break;
+ default:
+ *rxbuf = val;
+ byte_cnt += 4;
+ rxbuf++;
+ }
+ }
+
+ if (abort_flag || pruned_flag || !eof_flag) {
+ netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n",
+ abort_flag, pruned_flag, eof_flag);
+ kfree_skb(skb);
+ netdev->stats.rx_dropped++;
+ return;
+ }
+
+ /* Everything we see on an interface that is in the HW bridge
+ * has already been forwarded
+ */
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ skb->offload_fwd_mark = 1;
+
+ /* Finish up skb */
+ skb_put(skb, byte_cnt - ETH_FCS_LEN);
+ eth_skb_pad(skb);
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
+ skb->protocol = eth_type_trans(skb, netdev);
+ netdev->stats.rx_bytes += skb->len;
+ netdev->stats.rx_packets++;
+ netif_rx(skb);
+}
+
+static int sparx5_inject(struct sparx5 *sparx5,
+ u32 *ifh,
+ struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int grp = INJ_QUEUE;
+ u32 val, w, count;
+ u8 *buf;
+
+ val = spx5_rd(sparx5, QS_INJ_STATUS);
+ if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) {
+ pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n",
+ QS_INJ_STATUS_FIFO_RDY_GET(val));
+ return -EBUSY;
+ }
+
+ /* Indicate SOF */
+ spx5_wr(QS_INJ_CTRL_SOF_SET(1) |
+ QS_INJ_CTRL_GAP_SIZE_SET(1),
+ sparx5, QS_INJ_CTRL(grp));
+
+ /* Write the IFH to the chip. */
+ for (w = 0; w < IFH_LEN; w++)
+ spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp));
+
+ /* Write words, round up */
+ count = DIV_ROUND_UP(skb->len, 4);
+ buf = skb->data;
+ for (w = 0; w < count; w++, buf += 4) {
+ val = get_unaligned((const u32 *)buf);
+ spx5_wr(val, sparx5, QS_INJ_WR(grp));
+ }
+
+ /* Add padding */
+ while (w < (60 / 4)) {
+ spx5_wr(0, sparx5, QS_INJ_WR(grp));
+ w++;
+ }
+
+ /* Indicate EOF and valid bytes in last word */
+ spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) |
+ QS_INJ_CTRL_EOF_SET(1),
+ sparx5, QS_INJ_CTRL(grp));
+
+ /* Add dummy CRC */
+ spx5_wr(0, sparx5, QS_INJ_WR(grp));
+ w++;
+
+ val = spx5_rd(sparx5, QS_INJ_STATUS);
+ if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n",
+ QS_INJ_STATUS_WMARK_REACHED_GET(val));
+ netif_stop_queue(ndev);
+ hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS,
+ HRTIMER_MODE_REL);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_device_stats *stats = &dev->stats;
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+ u32 ifh[IFH_LEN];
+ netdev_tx_t ret;
+
+ memset(ifh, 0, IFH_LEN * 4);
+ sparx5_set_port_ifh(ifh, port->portno);
+
+ if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ if (sparx5_ptp_txtstamp_request(port, skb) < 0)
+ return NETDEV_TX_BUSY;
+
+ sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);
+ sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type);
+ sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset);
+ sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id);
+ }
+
+ skb_tx_timestamp(skb);
+ if (sparx5->fdma_irq > 0)
+ ret = sparx5_fdma_xmit(sparx5, ifh, skb);
+ else
+ ret = sparx5_inject(sparx5, ifh, skb, dev);
+
+ if (ret == -EBUSY)
+ goto busy;
+ if (ret < 0)
+ goto drop;
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ sparx5->tx.packets++;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ return NETDEV_TX_OK;
+
+ dev_consume_skb_any(skb);
+ return NETDEV_TX_OK;
+drop:
+ stats->tx_dropped++;
+ sparx5->tx.dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+busy:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ sparx5_ptp_txtstamp_release(port, skb);
+ return NETDEV_TX_BUSY;
+}
+
+static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr)
+{
+ struct sparx5_port *port = container_of(tmr, struct sparx5_port,
+ inj_timer);
+ int grp = INJ_QUEUE;
+ u32 val;
+
+ val = spx5_rd(port->sparx5, QS_INJ_STATUS);
+ if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
+ pr_err_ratelimited("Injection: Reset watermark count\n");
+ /* Reset Watermark count to restart */
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
+ DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
+ port->sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(port->portno));
+ }
+ netif_wake_queue(port->ndev);
+ return HRTIMER_NORESTART;
+}
+
+int sparx5_manual_injection_mode(struct sparx5 *sparx5)
+{
+ const int byte_swap = 1;
+ int portno;
+
+ /* Change mode to manual extraction and injection */
+ spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+ QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
+ QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap),
+ sparx5, QS_XTR_GRP_CFG(XTR_QUEUE));
+ spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+ QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap),
+ sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
+
+ /* CPU ports capture setup */
+ for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
+ /* ASM CPU port: No preamble, IFH, enable padding */
+ spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
+ ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
+ ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */
+ sparx5, ASM_PORT_CFG(portno));
+
+ /* Reset WM cnt to unclog queued frames */
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
+ DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(portno));
+
+ /* Set Disassembler Stop Watermark level */
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0),
+ DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(portno));
+
+ /* Enable Disassembler buffer underrun watchdog
+ */
+ spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0),
+ DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS,
+ sparx5,
+ DSM_BUF_CFG(portno));
+ }
+ return 0;
+}
+
+irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
+{
+ struct sparx5 *s5 = _sparx5;
+ int poll = 64;
+
+ /* Check data in queue */
+ while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0)
+ sparx5_xtr_grp(s5, XTR_QUEUE, false);
+
+ return IRQ_HANDLED;
+}
+
+void sparx5_port_inj_timer_setup(struct sparx5_port *port)
+{
+ hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ port->inj_timer.function = sparx5_injection_timeout;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
new file mode 100644
index 0000000000..af8b435009
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include "sparx5_main.h"
+
+void sparx5_pgid_init(struct sparx5 *spx5)
+{
+ int i;
+
+ for (i = 0; i < PGID_TABLE_SIZE; i++)
+ spx5->pgid_map[i] = SPX5_PGID_FREE;
+
+ /* Reserved for unicast, flood control, broadcast, and CPU.
+ * These cannot be freed.
+ */
+ for (i = 0; i <= PGID_CPU; i++)
+ spx5->pgid_map[i] = SPX5_PGID_RESERVED;
+}
+
+int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx)
+{
+ int i;
+
+ /* The multicast area starts at index 65, but the first 7
+ * are reserved for flood masks and CPU. Start alloc after that.
+ */
+ for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) {
+ if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
+ spx5->pgid_map[i] = SPX5_PGID_MULTICAST;
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -EBUSY;
+}
+
+int sparx5_pgid_free(struct sparx5 *spx5, u16 idx)
+{
+ if (idx <= PGID_CPU || idx >= PGID_TABLE_SIZE)
+ return -EINVAL;
+
+ if (spx5->pgid_map[idx] == SPX5_PGID_FREE)
+ return -EINVAL;
+
+ spx5->pgid_map[idx] = SPX5_PGID_FREE;
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
new file mode 100644
index 0000000000..f8562c1a89
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/phylink.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/sfp.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_port_config *b)
+{
+ if (a->speed != b->speed ||
+ a->portmode != b->portmode ||
+ a->autoneg != b->autoneg ||
+ a->pause_adv != b->pause_adv ||
+ a->power_down != b->power_down ||
+ a->media != b->media)
+ return true;
+ return false;
+}
+
+static struct phylink_pcs *
+sparx5_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+
+ return &port->phylink_pcs;
+}
+
+static void sparx5_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Currently not used */
+}
+
+static void sparx5_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+ struct sparx5_port_config conf;
+ int err;
+
+ conf = port->conf;
+ conf.duplex = duplex;
+ conf.pause = 0;
+ conf.pause |= tx_pause ? MLO_PAUSE_TX : 0;
+ conf.pause |= rx_pause ? MLO_PAUSE_RX : 0;
+ conf.speed = speed;
+ /* Configure the port to speed/duplex/pause */
+ err = sparx5_port_config(port->sparx5, port, &conf);
+ if (err)
+ netdev_err(port->ndev, "port config failed: %d\n", err);
+}
+
+static void sparx5_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* Currently not used */
+}
+
+static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct sparx5_port, phylink_pcs);
+}
+
+static void sparx5_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct sparx5_port *port = sparx5_pcs_to_port(pcs);
+ struct sparx5_port_status status;
+
+ sparx5_get_port_status(port->sparx5, port, &status);
+ state->link = status.link && !status.link_down;
+ state->an_complete = status.an_complete;
+ state->speed = status.speed;
+ state->duplex = status.duplex;
+ state->pause = status.pause;
+}
+
+static int sparx5_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct sparx5_port *port = sparx5_pcs_to_port(pcs);
+ struct sparx5_port_config conf;
+ int ret = 0;
+
+ conf = port->conf;
+ conf.power_down = false;
+ conf.portmode = interface;
+ conf.inband = neg_mode == PHYLINK_PCS_NEG_INBAND_DISABLED ||
+ neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
+ conf.autoneg = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
+ conf.pause_adv = 0;
+ if (phylink_test(advertising, Pause))
+ conf.pause_adv |= ADVERTISE_1000XPAUSE;
+ if (phylink_test(advertising, Asym_Pause))
+ conf.pause_adv |= ADVERTISE_1000XPSE_ASYM;
+ if (sparx5_is_baser(interface)) {
+ if (phylink_test(advertising, FIBRE))
+ conf.media = PHY_MEDIA_SR;
+ else
+ conf.media = PHY_MEDIA_DAC;
+ }
+ if (!port_conf_has_changed(&port->conf, &conf))
+ return ret;
+ /* Enable the PCS matching this interface type */
+ ret = sparx5_port_pcs_set(port->sparx5, port, &conf);
+ if (ret)
+ netdev_err(port->ndev, "port PCS config failed: %d\n", ret);
+ return ret;
+}
+
+static void sparx5_pcs_aneg_restart(struct phylink_pcs *pcs)
+{
+ /* Currently not used */
+}
+
+const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
+ .pcs_get_state = sparx5_pcs_get_state,
+ .pcs_config = sparx5_pcs_config,
+ .pcs_an_restart = sparx5_pcs_aneg_restart,
+};
+
+const struct phylink_mac_ops sparx5_phylink_mac_ops = {
+ .mac_select_pcs = sparx5_phylink_mac_select_pcs,
+ .mac_config = sparx5_phylink_mac_config,
+ .mac_link_down = sparx5_phylink_mac_link_down,
+ .mac_link_up = sparx5_phylink_mac_link_up,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_police.c b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
new file mode 100644
index 0000000000..8ada5cee13
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static int sparx5_policer_service_conf_set(struct sparx5 *sparx5,
+ struct sparx5_policer *pol)
+{
+ u32 idx, pup_tokens, max_pup_tokens, burst, thres;
+ struct sparx5_sdlb_group *g;
+ u64 rate;
+
+ g = &sdlb_groups[pol->group];
+ idx = pol->idx;
+
+ rate = pol->rate * 1000;
+ burst = pol->burst;
+
+ pup_tokens = sparx5_sdlb_pup_token_get(sparx5, g->pup_interval, rate);
+ max_pup_tokens =
+ sparx5_sdlb_pup_token_get(sparx5, g->pup_interval, g->max_rate);
+
+ thres = DIV_ROUND_UP(burst, g->min_burst);
+
+ spx5_wr(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(pup_tokens), sparx5,
+ ANA_AC_SDLB_PUP_TOKENS(idx, 0));
+
+ spx5_rmw(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(max_pup_tokens),
+ ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, sparx5,
+ ANA_AC_SDLB_INH_CTRL(idx, 0));
+
+ spx5_rmw(ANA_AC_SDLB_THRES_THRES_SET(thres), ANA_AC_SDLB_THRES_THRES,
+ sparx5, ANA_AC_SDLB_THRES(idx, 0));
+
+ return 0;
+}
+
+int sparx5_policer_conf_set(struct sparx5 *sparx5, struct sparx5_policer *pol)
+{
+ /* More policer types will be added later */
+ switch (pol->type) {
+ case SPX5_POL_SERVICE:
+ return sparx5_policer_service_conf_set(sparx5, pol);
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c
new file mode 100644
index 0000000000..b4b280c613
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static u32 sparx5_pool_id_to_idx(u32 id)
+{
+ return --id;
+}
+
+u32 sparx5_pool_idx_to_id(u32 idx)
+{
+ return ++idx;
+}
+
+/* Release resource from pool.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_put(struct sparx5_pool_entry *pool, int size, u32 id)
+{
+ struct sparx5_pool_entry *e_itr;
+
+ e_itr = (pool + sparx5_pool_id_to_idx(id));
+ if (e_itr->ref_cnt == 0)
+ return -EINVAL;
+
+ return --e_itr->ref_cnt;
+}
+
+/* Get resource from pool.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id)
+{
+ struct sparx5_pool_entry *e_itr;
+ int i;
+
+ for (i = 0, e_itr = pool; i < size; i++, e_itr++) {
+ if (e_itr->ref_cnt == 0) {
+ *id = sparx5_pool_idx_to_id(i);
+ return ++e_itr->ref_cnt;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/* Get resource from pool that matches index.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx,
+ u32 *id)
+{
+ struct sparx5_pool_entry *e_itr;
+ int i, ret = -ENOSPC;
+
+ for (i = 0, e_itr = pool; i < size; i++, e_itr++) {
+ /* Pool index of first free entry */
+ if (e_itr->ref_cnt == 0 && ret == -ENOSPC)
+ ret = i;
+ /* Tc index already in use ? */
+ if (e_itr->idx == idx && e_itr->ref_cnt > 0) {
+ ret = i;
+ break;
+ }
+ }
+
+ /* Did we find a free entry? */
+ if (ret >= 0) {
+ *id = sparx5_pool_idx_to_id(ret);
+ e_itr = (pool + ret);
+ e_itr->idx = idx;
+ return ++e_itr->ref_cnt;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
new file mode 100644
index 0000000000..3a1b1a1f5a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <net/dcbnl.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+#define SPX5_ETYPE_TAG_C 0x8100
+#define SPX5_ETYPE_TAG_S 0x88a8
+
+#define SPX5_WAIT_US 1000
+#define SPX5_WAIT_MAX_US 2000
+
+enum port_error {
+ SPX5_PERR_SPEED,
+ SPX5_PERR_IFTYPE,
+};
+
+#define PAUSE_DISCARD 0xC
+#define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
+
+static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
+{
+ status->an_complete = true;
+ if (!(lp_abil & LPA_SGMII_LINK)) {
+ status->link = false;
+ return;
+ }
+
+ switch (lp_abil & LPA_SGMII_SPD_MASK) {
+ case LPA_SGMII_10:
+ status->speed = SPEED_10;
+ break;
+ case LPA_SGMII_100:
+ status->speed = SPEED_100;
+ break;
+ case LPA_SGMII_1000:
+ status->speed = SPEED_1000;
+ break;
+ default:
+ status->link = false;
+ return;
+ }
+ if (lp_abil & LPA_SGMII_FULL_DUPLEX)
+ status->duplex = DUPLEX_FULL;
+ else
+ status->duplex = DUPLEX_HALF;
+}
+
+static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
+{
+ status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
+ status->an_complete = true;
+ status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
+ DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
+
+ if ((ld_abil & ADVERTISE_1000XPAUSE) &&
+ (lp_abil & ADVERTISE_1000XPAUSE)) {
+ status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
+ } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
+ (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
+ status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
+ MLO_PAUSE_TX : 0;
+ status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
+ MLO_PAUSE_RX : 0;
+ } else {
+ status->pause = MLO_PAUSE_NONE;
+ }
+}
+
+static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_status *status)
+{
+ u32 portno = port->portno;
+ u16 lp_adv, ld_adv;
+ u32 value;
+
+ /* Get PCS Link down sticky */
+ value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
+ status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
+ if (status->link_down) /* Clear the sticky */
+ spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
+
+ /* Get both current Link and Sync status */
+ value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
+ status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
+ DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
+
+ if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
+ status->speed = SPEED_1000;
+ else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
+ status->speed = SPEED_2500;
+
+ status->duplex = DUPLEX_FULL;
+
+ /* Get PCS ANEG status register */
+ value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
+
+ /* Aneg complete provides more information */
+ if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
+ lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
+ if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
+ decode_sgmii_word(lp_adv, status);
+ } else {
+ value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
+ ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
+ decode_cl37_word(lp_adv, ld_adv, status);
+ }
+ }
+ return 0;
+}
+
+static int sparx5_get_sfi_status(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_status *status)
+{
+ bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
+ u32 portno = port->portno;
+ u32 value, dev, tinst;
+ void __iomem *inst;
+
+ if (!high_speed_dev) {
+ netdev_err(port->ndev, "error: low speed and SFI mode\n");
+ return -EINVAL;
+ }
+
+ dev = sparx5_to_high_dev(portno);
+ tinst = sparx5_port_dev_index(portno);
+ inst = spx5_inst_get(sparx5, dev, tinst);
+
+ value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+ if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
+ /* The link is or has been down. Clear the sticky bit */
+ status->link_down = 1;
+ spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+ value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+ }
+ status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
+ status->duplex = DUPLEX_FULL;
+ if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
+ status->speed = SPEED_5000;
+ else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
+ status->speed = SPEED_10000;
+ else
+ status->speed = SPEED_25000;
+
+ return 0;
+}
+
+/* Get link status of 1000Base-X/in-band and SFI ports.
+ */
+int sparx5_get_port_status(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_status *status)
+{
+ memset(status, 0, sizeof(*status));
+ status->speed = port->conf.speed;
+ if (port->conf.power_down) {
+ status->link = false;
+ return 0;
+ }
+ switch (port->conf.portmode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return sparx5_get_dev2g5_status(sparx5, port, status);
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ return sparx5_get_sfi_status(sparx5, port, status);
+ case PHY_INTERFACE_MODE_NA:
+ return 0;
+ default:
+ netdev_err(port->ndev, "Status not supported");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int sparx5_port_error(struct sparx5_port *port,
+ struct sparx5_port_config *conf,
+ enum port_error errtype)
+{
+ switch (errtype) {
+ case SPX5_PERR_SPEED:
+ netdev_err(port->ndev,
+ "Interface does not support speed: %u: for %s\n",
+ conf->speed, phy_modes(conf->portmode));
+ break;
+ case SPX5_PERR_IFTYPE:
+ netdev_err(port->ndev,
+ "Switch port does not support interface type: %s\n",
+ phy_modes(conf->portmode));
+ break;
+ default:
+ netdev_err(port->ndev,
+ "Interface configuration error\n");
+ }
+
+ return -EINVAL;
+}
+
+static int sparx5_port_verify_speed(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ if ((sparx5_port_is_2g5(port->portno) &&
+ conf->speed > SPEED_2500) ||
+ (sparx5_port_is_5g(port->portno) &&
+ conf->speed > SPEED_5000) ||
+ (sparx5_port_is_10g(port->portno) &&
+ conf->speed > SPEED_10000))
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+
+ switch (conf->portmode) {
+ case PHY_INTERFACE_MODE_NA:
+ return -EINVAL;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ if (conf->speed != SPEED_1000 ||
+ sparx5_port_is_2g5(port->portno))
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ if (sparx5_port_is_2g5(port->portno))
+ return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (conf->speed != SPEED_2500 ||
+ sparx5_port_is_2g5(port->portno))
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ if (port->portno > 47)
+ return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+ fallthrough;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (conf->speed != SPEED_1000 &&
+ conf->speed != SPEED_100 &&
+ conf->speed != SPEED_10 &&
+ conf->speed != SPEED_2500)
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ if ((conf->speed != SPEED_5000 &&
+ conf->speed != SPEED_10000 &&
+ conf->speed != SPEED_25000))
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
+ default:
+ return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+ }
+ return 0;
+}
+
+static bool sparx5_dev_change(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ return sparx5_is_baser(port->conf.portmode) ^
+ sparx5_is_baser(conf->portmode);
+}
+
+static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
+{
+ u32 value, resource, prio, delay_cnt = 0;
+ bool poll_src = true;
+ char *mem = "";
+
+ /* Resource == 0: Memory tracked per source (SRC-MEM)
+ * Resource == 1: Frame references tracked per source (SRC-REF)
+ * Resource == 2: Memory tracked per destination (DST-MEM)
+ * Resource == 3: Frame references tracked per destination. (DST-REF)
+ */
+ while (1) {
+ bool empty = true;
+
+ for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
+ u32 base;
+
+ base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
+ for (prio = 0; prio < SPX5_PRIOS; prio++) {
+ value = spx5_rd(sparx5,
+ QRES_RES_STAT(base + prio));
+ if (value) {
+ mem = resource == 0 ?
+ "DST-MEM" : "SRC-MEM";
+ empty = false;
+ }
+ }
+ }
+
+ if (empty)
+ break;
+
+ if (delay_cnt++ == 2000) {
+ dev_err(sparx5->dev,
+ "Flush timeout port %u. %s queue not empty\n",
+ portno, mem);
+ return -EINVAL;
+ }
+
+ usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
+ }
+ return 0;
+}
+
+static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
+{
+ u32 tinst = high_spd_dev ?
+ sparx5_port_dev_index(port->portno) : port->portno;
+ u32 dev = high_spd_dev ?
+ sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
+ void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
+ u32 spd = port->conf.speed;
+ u32 spd_prm;
+ int err;
+
+ if (high_spd_dev) {
+ /* 1: Reset the PCS Rx clock domain */
+ spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
+ DEV10G_DEV_RST_CTRL_PCS_RX_RST,
+ devinst,
+ DEV10G_DEV_RST_CTRL(0));
+
+ /* 2: Disable MAC frame reception */
+ spx5_inst_rmw(0,
+ DEV10G_MAC_ENA_CFG_RX_ENA,
+ devinst,
+ DEV10G_MAC_ENA_CFG(0));
+ } else {
+ /* 1: Reset the PCS Rx clock domain */
+ spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+ DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+ devinst,
+ DEV2G5_DEV_RST_CTRL(0));
+ /* 2: Disable MAC frame reception */
+ spx5_inst_rmw(0,
+ DEV2G5_MAC_ENA_CFG_RX_ENA,
+ devinst,
+ DEV2G5_MAC_ENA_CFG(0));
+ }
+ /* 3: Disable traffic being sent to or from switch port->portno */
+ spx5_rmw(0,
+ QFWD_SWITCH_PORT_MODE_PORT_ENA,
+ sparx5,
+ QFWD_SWITCH_PORT_MODE(port->portno));
+
+ /* 4: Disable dequeuing from the egress queues */
+ spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
+ HSCH_PORT_MODE_DEQUEUE_DIS,
+ sparx5,
+ HSCH_PORT_MODE(port->portno));
+
+ /* 5: Disable Flowcontrol */
+ spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
+ QSYS_PAUSE_CFG_PAUSE_STOP,
+ sparx5,
+ QSYS_PAUSE_CFG(port->portno));
+
+ spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
+ /* 6: Wait while the last frame is exiting the queues */
+ usleep_range(8 * spd_prm, 10 * spd_prm);
+
+ /* 7: Flush the queues accociated with the port->portno */
+ spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
+ HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
+ HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
+ HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
+ HSCH_FLUSH_CTRL_FLUSH_PORT |
+ HSCH_FLUSH_CTRL_FLUSH_DST |
+ HSCH_FLUSH_CTRL_FLUSH_SRC |
+ HSCH_FLUSH_CTRL_FLUSH_ENA,
+ sparx5,
+ HSCH_FLUSH_CTRL);
+
+ /* 8: Enable dequeuing from the egress queues */
+ spx5_rmw(0,
+ HSCH_PORT_MODE_DEQUEUE_DIS,
+ sparx5,
+ HSCH_PORT_MODE(port->portno));
+
+ /* 9: Wait until flushing is complete */
+ err = sparx5_port_flush_poll(sparx5, port->portno);
+ if (err)
+ return err;
+
+ /* 10: Reset the MAC clock domain */
+ if (high_spd_dev) {
+ spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
+ DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
+ DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
+ DEV10G_DEV_RST_CTRL_PCS_TX_RST |
+ DEV10G_DEV_RST_CTRL_MAC_RX_RST |
+ DEV10G_DEV_RST_CTRL_MAC_TX_RST,
+ devinst,
+ DEV10G_DEV_RST_CTRL(0));
+
+ } else {
+ spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
+ DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
+ DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
+ DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
+ DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
+ DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+ DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
+ DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
+ DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
+ DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
+ devinst,
+ DEV2G5_DEV_RST_CTRL(0));
+ }
+ /* 11: Clear flushing */
+ spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
+ HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
+ HSCH_FLUSH_CTRL_FLUSH_PORT |
+ HSCH_FLUSH_CTRL_FLUSH_ENA,
+ sparx5,
+ HSCH_FLUSH_CTRL);
+
+ if (high_spd_dev) {
+ u32 pcs = sparx5_to_pcs_dev(port->portno);
+ void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
+
+ /* 12: Disable 5G/10G/25 BaseR PCS */
+ spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
+ PCS10G_BR_PCS_CFG_PCS_ENA,
+ pcsinst,
+ PCS10G_BR_PCS_CFG(0));
+
+ if (sparx5_port_is_25g(port->portno))
+ /* Disable 25G PCS */
+ spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
+ DEV25G_PCS25G_CFG_PCS25G_ENA,
+ sparx5,
+ DEV25G_PCS25G_CFG(tinst));
+ } else {
+ /* 12: Disable 1G PCS */
+ spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
+ DEV2G5_PCS1G_CFG_PCS_ENA,
+ sparx5,
+ DEV2G5_PCS1G_CFG(port->portno));
+ }
+
+ /* The port is now flushed and disabled */
+ return 0;
+}
+
+static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
+ u32 portno, u32 speed)
+{
+ u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
+ const u32 taxi_dist[SPX5_PORTS_ALL] = {
+ 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
+ 4, 4, 4, 4,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 4, 6, 8, 4, 6, 8, 6, 8,
+ 2, 2, 2, 2, 2, 2, 2, 4, 2
+ };
+ u32 mac_per = 6400, tmp1, tmp2, tmp3;
+ u32 fifo_width = 16;
+ u32 mac_width = 8;
+ u32 addition = 0;
+
+ switch (speed) {
+ case SPEED_25000:
+ return 0;
+ case SPEED_10000:
+ mac_per = 6400;
+ mac_width = 8;
+ addition = 1;
+ break;
+ case SPEED_5000:
+ mac_per = 12800;
+ mac_width = 8;
+ addition = 0;
+ break;
+ case SPEED_2500:
+ mac_per = 3200;
+ mac_width = 1;
+ addition = 0;
+ break;
+ case SPEED_1000:
+ mac_per = 8000;
+ mac_width = 1;
+ addition = 0;
+ break;
+ case SPEED_100:
+ case SPEED_10:
+ return 1;
+ default:
+ break;
+ }
+
+ tmp1 = 1000 * mac_width / fifo_width;
+ tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
+ * sys_clk / mac_per);
+ tmp3 = tmp1 * tmp2 / 1000;
+ return (tmp3 + 2000 + 999) / 1000 + addition;
+}
+
+/* Configure port muxing:
+ * QSGMII: 4x2G5 devices
+ */
+static int sparx5_port_mux_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 portno = port->portno;
+ u32 inst;
+
+ if (port->conf.portmode == conf->portmode)
+ return 0; /* Nothing to do */
+
+ switch (conf->portmode) {
+ case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */
+ inst = (portno - portno % 4) / 4;
+ spx5_rmw(BIT(inst),
+ BIT(inst),
+ sparx5,
+ PORT_CONF_QSGMII_ENA);
+
+ if ((portno / 4 % 2) == 0) {
+ /* Affects d0-d3,d8-d11..d40-d43 */
+ spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
+ PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
+ PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
+ PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
+ PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
+ PORT_CONF_USGMII_CFG_QUAD_MODE,
+ sparx5,
+ PORT_CONF_USGMII_CFG((portno / 8)));
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
+ struct sparx5_port *port)
+{
+ enum sparx5_port_max_tags max_tags = port->max_vlan_tags;
+ int tag_ct = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
+ max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
+ bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO;
+ enum sparx5_vlan_port_type vlan_type = port->vlan_type;
+ bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE;
+ u32 dev = sparx5_to_high_dev(port->portno);
+ u32 tinst = sparx5_port_dev_index(port->portno);
+ void __iomem *inst = spx5_inst_get(sparx5, dev, tinst);
+ u32 etype;
+
+ etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
+ port->custom_etype :
+ vlan_type == SPX5_VLAN_PORT_TYPE_C ?
+ SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
+
+ spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+ DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
+ DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
+ DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
+ sparx5,
+ DEV2G5_MAC_TAGS_CFG(port->portno));
+
+ if (sparx5_port_is_2g5(port->portno))
+ return 0;
+
+ spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+ DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
+ DEV10G_MAC_TAGS_CFG_TAG_ID |
+ DEV10G_MAC_TAGS_CFG_TAG_ENA,
+ inst,
+ DEV10G_MAC_TAGS_CFG(0, 0));
+
+ spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
+ DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
+ inst,
+ DEV10G_MAC_NUM_TAGS_CFG(0));
+
+ spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
+ DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
+ inst,
+ DEV10G_MAC_MAXLEN_CFG(0));
+ return 0;
+}
+
+int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
+{
+ u32 clk_period_ps = 1600; /* 625Mhz for now */
+ u32 urg = 672000;
+
+ switch (speed) {
+ case SPEED_10:
+ case SPEED_100:
+ case SPEED_1000:
+ urg = 672000;
+ break;
+ case SPEED_2500:
+ urg = 270000;
+ break;
+ case SPEED_5000:
+ urg = 135000;
+ break;
+ case SPEED_10000:
+ urg = 67200;
+ break;
+ case SPEED_25000:
+ urg = 27000;
+ break;
+ }
+ return urg / clk_period_ps - 1;
+}
+
+static u16 sparx5_wm_enc(u16 value)
+{
+ if (value >= 2048)
+ return 2048 + value / 16;
+
+ return value;
+}
+
+static int sparx5_port_fc_setup(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
+ u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
+
+ if (conf->pause & MLO_PAUSE_TX)
+ pause_stop = sparx5_wm_enc(4 * (ETH_MAXLEN /
+ SPX5_BUFFER_CELL_SZ));
+
+ /* Set HDX flowcontrol */
+ spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
+ DSM_MAC_CFG_HDX_BACKPREASSURE,
+ sparx5,
+ DSM_MAC_CFG(port->portno));
+
+ /* Obey flowcontrol */
+ spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
+ DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
+ sparx5,
+ DSM_RX_PAUSE_CFG(port->portno));
+
+ /* Disable forward pressure */
+ spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
+ QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
+ sparx5,
+ QSYS_FWD_PRESSURE(port->portno));
+
+ /* Generate pause frames */
+ spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
+ QSYS_PAUSE_CFG_PAUSE_STOP,
+ sparx5,
+ QSYS_PAUSE_CFG(port->portno));
+
+ return 0;
+}
+
+static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
+{
+ if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
+ return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
+ else
+ return 1; /* Enable SGMII Aneg */
+}
+
+int sparx5_serdes_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ int portmode, err, speed = conf->speed;
+
+ if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
+ ((port->portno % 4) != 0)) {
+ return 0;
+ }
+ if (sparx5_is_baser(conf->portmode)) {
+ if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
+ speed = SPEED_25000;
+ else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
+ speed = SPEED_10000;
+ else
+ speed = SPEED_5000;
+ }
+
+ err = phy_set_media(port->serdes, conf->media);
+ if (err)
+ return err;
+ if (speed > 0) {
+ err = phy_set_speed(port->serdes, speed);
+ if (err)
+ return err;
+ }
+ if (conf->serdes_reset) {
+ err = phy_reset(port->serdes);
+ if (err)
+ return err;
+ }
+
+ /* Configure SerDes with port parameters
+ * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
+ */
+ portmode = conf->portmode;
+ if (sparx5_is_baser(conf->portmode))
+ portmode = PHY_INTERFACE_MODE_10GBASER;
+ err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
+ if (err)
+ return err;
+ conf->serdes_reset = false;
+ return err;
+}
+
+static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ bool sgmii = false, inband_aneg = false;
+ int err;
+
+ if (port->conf.inband) {
+ if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
+ conf->portmode == PHY_INTERFACE_MODE_QSGMII)
+ inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+ else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
+ conf->autoneg)
+ inband_aneg = true; /* Clause-37 in-band-aneg */
+
+ err = sparx5_serdes_set(sparx5, port, conf);
+ if (err)
+ return -EINVAL;
+ } else {
+ sgmii = true; /* Phy is connected to the MAC */
+ }
+
+ /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
+ spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
+ DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ sparx5,
+ DEV2G5_PCS1G_MODE_CFG(port->portno));
+
+ /* Enable PCS */
+ spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
+ sparx5,
+ DEV2G5_PCS1G_CFG(port->portno));
+
+ if (inband_aneg) {
+ u16 abil = sparx5_get_aneg_word(conf);
+
+ /* Enable in-band aneg */
+ spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
+ DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
+ DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
+ DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
+ sparx5,
+ DEV2G5_PCS1G_ANEG_CFG(port->portno));
+ } else {
+ spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
+ }
+
+ /* Take PCS out of reset */
+ spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
+ DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
+ DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
+ DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+ DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
+ DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+ sparx5,
+ DEV2G5_DEV_RST_CTRL(port->portno));
+
+ return 0;
+}
+
+static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
+ u32 pix = sparx5_port_dev_index(port->portno);
+ u32 dev = sparx5_to_high_dev(port->portno);
+ u32 pcs = sparx5_to_pcs_dev(port->portno);
+ void __iomem *devinst;
+ void __iomem *pcsinst;
+ int err;
+
+ devinst = spx5_inst_get(sparx5, dev, pix);
+ pcsinst = spx5_inst_get(sparx5, pcs, pix);
+
+ /* SFI : No in-band-aneg. Speeds 5G/10G/25G */
+ err = sparx5_serdes_set(sparx5, port, conf);
+ if (err)
+ return -EINVAL;
+ if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
+ /* Enable PCS for 25G device, speed 25G */
+ spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
+ DEV25G_PCS25G_CFG_PCS25G_ENA,
+ sparx5,
+ DEV25G_PCS25G_CFG(pix));
+ } else {
+ /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
+ spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
+ PCS10G_BR_PCS_CFG_PCS_ENA,
+ pcsinst,
+ PCS10G_BR_PCS_CFG(0));
+ }
+
+ /* Enable 5G/10G/25G MAC module */
+ spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
+ devinst,
+ DEV10G_MAC_ENA_CFG(0));
+
+ /* Take the device out of reset */
+ spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
+ DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
+ DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
+ DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
+ DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
+ DEV10G_DEV_RST_CTRL_PCS_RX_RST |
+ DEV10G_DEV_RST_CTRL_PCS_TX_RST |
+ DEV10G_DEV_RST_CTRL_MAC_RX_RST |
+ DEV10G_DEV_RST_CTRL_MAC_TX_RST |
+ DEV10G_DEV_RST_CTRL_SPEED_SEL,
+ devinst,
+ DEV10G_DEV_RST_CTRL(0));
+
+ return 0;
+}
+
+/* Switch between 1G/2500 and 5G/10G/25G devices */
+static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
+{
+ int bt_indx = BIT(sparx5_port_dev_index(port));
+
+ if (sparx5_port_is_5g(port)) {
+ spx5_rmw(hsd ? 0 : bt_indx,
+ bt_indx,
+ sparx5,
+ PORT_CONF_DEV5G_MODES);
+ } else if (sparx5_port_is_10g(port)) {
+ spx5_rmw(hsd ? 0 : bt_indx,
+ bt_indx,
+ sparx5,
+ PORT_CONF_DEV10G_MODES);
+ } else if (sparx5_port_is_25g(port)) {
+ spx5_rmw(hsd ? 0 : bt_indx,
+ bt_indx,
+ sparx5,
+ PORT_CONF_DEV25G_MODES);
+ }
+}
+
+/* Configure speed/duplex dependent registers */
+static int sparx5_port_config_low_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
+ bool fdx = conf->duplex == DUPLEX_FULL;
+ int spd = conf->speed;
+
+ clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
+ gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
+ tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
+ hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
+ hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
+
+ /* GIG/FDX mode */
+ spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
+ DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
+ DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
+ DEV2G5_MAC_MODE_CFG_FDX_ENA,
+ sparx5,
+ DEV2G5_MAC_MODE_CFG(port->portno));
+
+ /* Set MAC IFG Gaps */
+ spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
+ DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
+ DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
+ sparx5,
+ DEV2G5_MAC_IFG_CFG(port->portno));
+
+ /* Disabling frame aging when in HDX (due to HDX issue) */
+ spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
+ HSCH_PORT_MODE_AGE_DIS,
+ sparx5,
+ HSCH_PORT_MODE(port->portno));
+
+ /* Enable MAC module */
+ spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
+ DEV2G5_MAC_ENA_CFG_TX_ENA,
+ sparx5,
+ DEV2G5_MAC_ENA_CFG(port->portno));
+
+ /* Select speed and take MAC out of reset */
+ spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
+ DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
+ DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
+ DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+ DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
+ DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
+ sparx5,
+ DEV2G5_DEV_RST_CTRL(port->portno));
+
+ return 0;
+}
+
+int sparx5_port_pcs_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+
+{
+ bool high_speed_dev = sparx5_is_baser(conf->portmode);
+ int err;
+
+ if (sparx5_dev_change(sparx5, port, conf)) {
+ /* switch device */
+ sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
+
+ /* Disable the not-in-use device */
+ err = sparx5_port_disable(sparx5, port, !high_speed_dev);
+ if (err)
+ return err;
+ }
+ /* Disable the port before re-configuring */
+ err = sparx5_port_disable(sparx5, port, high_speed_dev);
+ if (err)
+ return -EINVAL;
+
+ if (high_speed_dev)
+ err = sparx5_port_pcs_high_set(sparx5, port, conf);
+ else
+ err = sparx5_port_pcs_low_set(sparx5, port, conf);
+
+ if (err)
+ return -EINVAL;
+
+ if (port->conf.inband) {
+ /* Enable/disable 1G counters in ASM */
+ spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+ ASM_PORT_CFG_CSC_STAT_DIS,
+ sparx5,
+ ASM_PORT_CFG(port->portno));
+
+ /* Enable/disable 1G counters in DSM */
+ spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+ DSM_BUF_CFG_CSC_STAT_DIS,
+ sparx5,
+ DSM_BUF_CFG(port->portno));
+ }
+
+ port->conf = *conf;
+
+ return 0;
+}
+
+int sparx5_port_config(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ bool high_speed_dev = sparx5_is_baser(conf->portmode);
+ int err, urgency, stop_wm;
+
+ err = sparx5_port_verify_speed(sparx5, port, conf);
+ if (err)
+ return err;
+
+ /* high speed device is already configured */
+ if (!high_speed_dev)
+ sparx5_port_config_low_set(sparx5, port, conf);
+
+ /* Configure flow control */
+ err = sparx5_port_fc_setup(sparx5, port, conf);
+ if (err)
+ return err;
+
+ /* Set the DSM stop watermark */
+ stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
+ DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(port->portno));
+
+ /* Enable port in queue system */
+ urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
+ spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
+ QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
+ QFWD_SWITCH_PORT_MODE_PORT_ENA |
+ QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
+ sparx5,
+ QFWD_SWITCH_PORT_MODE(port->portno));
+
+ /* Save the new values */
+ port->conf = *conf;
+
+ return 0;
+}
+
+/* Initialize port config to default */
+int sparx5_port_init(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
+ u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
+ u32 devhigh = sparx5_to_high_dev(port->portno);
+ u32 pix = sparx5_port_dev_index(port->portno);
+ u32 pcs = sparx5_to_pcs_dev(port->portno);
+ bool sd_pol = port->signd_active_high;
+ bool sd_sel = !port->signd_internal;
+ bool sd_ena = port->signd_enable;
+ u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
+ void __iomem *devinst;
+ void __iomem *pcsinst;
+ int err;
+
+ devinst = spx5_inst_get(sparx5, devhigh, pix);
+ pcsinst = spx5_inst_get(sparx5, pcs, pix);
+
+ /* Set the mux port mode */
+ err = sparx5_port_mux_set(sparx5, port, conf);
+ if (err)
+ return err;
+
+ /* Configure MAC vlan awareness */
+ err = sparx5_port_max_tags_set(sparx5, port);
+ if (err)
+ return err;
+
+ /* Set Max Length */
+ spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
+ sparx5,
+ DEV2G5_MAC_MAXLEN_CFG(port->portno));
+
+ /* 1G/2G5: Signal Detect configuration */
+ spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
+ DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
+ DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
+ sparx5,
+ DEV2G5_PCS1G_SD_CFG(port->portno));
+
+ /* Set Pause WM hysteresis */
+ spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
+ QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
+ QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
+ QSYS_PAUSE_CFG_PAUSE_START |
+ QSYS_PAUSE_CFG_PAUSE_STOP |
+ QSYS_PAUSE_CFG_PAUSE_ENA,
+ sparx5,
+ QSYS_PAUSE_CFG(port->portno));
+
+ /* Port ATOP. Frames are tail dropped when this WM is hit */
+ spx5_wr(QSYS_ATOP_ATOP_SET(atop),
+ sparx5,
+ QSYS_ATOP(port->portno));
+
+ /* Discard pause frame 01-80-C2-00-00-01 */
+ spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
+
+ /* Discard SMAC multicast */
+ spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
+ ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
+ sparx5, ANA_CL_FILTER_CTRL(port->portno));
+
+ if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
+ conf->portmode == PHY_INTERFACE_MODE_SGMII) {
+ err = sparx5_serdes_set(sparx5, port, conf);
+ if (err)
+ return err;
+
+ if (!sparx5_port_is_2g5(port->portno))
+ /* Enable shadow device */
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
+ DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(port->portno));
+
+ sparx5_dev_switch(sparx5, port->portno, false);
+ }
+ if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
+ // All ports must be PCS enabled in QSGMII mode
+ spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
+ DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
+ sparx5,
+ DEV2G5_DEV_RST_CTRL(port->portno));
+ }
+ /* Default IFGs for 1G */
+ spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
+ DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
+ DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
+ sparx5,
+ DEV2G5_MAC_IFG_CFG(port->portno));
+
+ if (sparx5_port_is_2g5(port->portno))
+ return 0; /* Low speed device only - return */
+
+ /* Now setup the high speed device */
+ if (conf->portmode == PHY_INTERFACE_MODE_NA)
+ conf->portmode = PHY_INTERFACE_MODE_10GBASER;
+
+ if (sparx5_is_baser(conf->portmode))
+ sparx5_dev_switch(sparx5, port->portno, true);
+
+ /* Set Max Length */
+ spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
+ devinst,
+ DEV10G_MAC_ENA_CFG(0));
+
+ /* Handle Signal Detect in 10G PCS */
+ spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
+ PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
+ PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
+ pcsinst,
+ PCS10G_BR_PCS_SD_CFG(0));
+
+ if (sparx5_port_is_25g(port->portno)) {
+ /* Handle Signal Detect in 25G PCS */
+ spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
+ DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
+ DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
+ sparx5,
+ DEV25G_PCS25G_SD_CFG(pix));
+ }
+
+ return 0;
+}
+
+void sparx5_port_enable(struct sparx5_port *port, bool enable)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+
+ /* Enable port for frame transfer? */
+ spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
+ QFWD_SWITCH_PORT_MODE_PORT_ENA,
+ sparx5,
+ QFWD_SWITCH_PORT_MODE(port->portno));
+}
+
+int sparx5_port_qos_set(struct sparx5_port *port,
+ struct sparx5_port_qos *qos)
+{
+ sparx5_port_qos_dscp_set(port, &qos->dscp);
+ sparx5_port_qos_pcp_set(port, &qos->pcp);
+ sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
+ sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
+ sparx5_port_qos_default_set(port, qos);
+
+ return 0;
+}
+
+int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_pcp_rewr *qos)
+{
+ int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 pcp, dei;
+
+ /* Use mapping table, with classified QoS as index, to map QoS and DP
+ * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
+ * PCP. Classified PCP equals frame PCP.
+ */
+ if (qos->enable)
+ mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
+
+ spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
+ REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
+ REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
+ port->sparx5, REW_TAG_CTRL(port->portno));
+
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ /* Extract PCP and DEI */
+ pcp = qos->map.map[i];
+ if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
+ dei = 1;
+ else
+ dei = 0;
+
+ /* Rewrite PCP and DEI, for each classified QoS class and DP
+ * level. This table is only used if tag ctrl mode is set to
+ * 'mapped'.
+ *
+ * 0:0nd - prio=0 and dp:0 => pcp=0 and dei=0
+ * 0:0de - prio=0 and dp:1 => pcp=0 and dei=1
+ */
+ if (dei) {
+ spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
+ REW_PCP_MAP_DE1_PCP_DE1, sparx5,
+ REW_PCP_MAP_DE1(port->portno, i));
+
+ spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
+ REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
+ REW_DEI_MAP_DE1(port->portno, i));
+ } else {
+ spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
+ REW_PCP_MAP_DE0_PCP_DE0, sparx5,
+ REW_PCP_MAP_DE0(port->portno, i));
+
+ spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
+ REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
+ REW_DEI_MAP_DE0(port->portno, i));
+ }
+ }
+
+ return 0;
+}
+
+int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_pcp *qos)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 *pcp_itr = qos->map.map;
+ u8 pcp, dp;
+ int i;
+
+ /* Enable/disable pcp and dp for qos classification. */
+ spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
+ ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
+ ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
+ sparx5, ANA_CL_QOS_CFG(port->portno));
+
+ /* Map each pcp and dei value to priority and dp */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ pcp = *(pcp_itr + i);
+ dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
+ spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
+ ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
+ ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
+ ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
+ ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
+ }
+
+ return 0;
+}
+
+void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
+ int mode)
+{
+ spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
+ ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
+ ANA_CL_QOS_CFG(port->portno));
+}
+
+int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp_rewr *qos)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ bool rewr = false;
+ u16 dscp;
+ int i;
+
+ /* On egress, rewrite DSCP value to either classified DSCP or frame
+ * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
+ */
+ if (qos->enable)
+ rewr = true;
+
+ spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
+ REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
+ REW_DSCP_MAP(port->portno));
+
+ /* On ingress, map each classified QoS class and DP to classified DSCP
+ * value. This mapping table is global for all ports.
+ */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ dscp = qos->map.map[i];
+ spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
+ ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
+ ANA_CL_QOS_MAP_CFG(i));
+ }
+
+ return 0;
+}
+
+int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp *qos)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 *dscp = qos->map.map;
+ int i;
+
+ /* Enable/disable dscp and dp for qos classification.
+ * Disable rewrite of dscp values for now.
+ */
+ spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
+ ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
+ ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
+ ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
+ ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
+ ANA_CL_QOS_CFG(port->portno));
+
+ /* Map each dscp value to priority and dp */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
+ ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
+ ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
+ ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
+ ANA_CL_DSCP_CFG(i));
+ }
+
+ /* Set per-dscp trust */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ if (qos->qos_enable) {
+ spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
+ ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
+ ANA_CL_DSCP_CFG(i));
+ }
+ }
+
+ return 0;
+}
+
+int sparx5_port_qos_default_set(const struct sparx5_port *port,
+ const struct sparx5_port_qos *qos)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+
+ /* Set default prio and dp level */
+ spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
+ ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
+ ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
+ ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
+ sparx5, ANA_CL_QOS_CFG(port->portno));
+
+ /* Set default pcp and dei for untagged frames */
+ spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
+ ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
+ ANA_CL_VLAN_CTRL_PORT_PCP |
+ ANA_CL_VLAN_CTRL_PORT_DEI,
+ sparx5, ANA_CL_VLAN_CTRL(port->portno));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
new file mode 100644
index 0000000000..607c4ff1df
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_PORT_H__
+#define __SPARX5_PORT_H__
+
+#include "sparx5_main.h"
+
+/* Port PCP rewrite mode */
+#define SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED 0
+#define SPARX5_PORT_REW_TAG_CTRL_DEFAULT 1
+#define SPARX5_PORT_REW_TAG_CTRL_MAPPED 2
+
+/* Port DSCP rewrite mode */
+#define SPARX5_PORT_REW_DSCP_NONE 0
+#define SPARX5_PORT_REW_DSCP_IF_ZERO 1
+#define SPARX5_PORT_REW_DSCP_SELECTED 2
+#define SPARX5_PORT_REW_DSCP_ALL 3
+
+static inline bool sparx5_port_is_2g5(int portno)
+{
+ return portno >= 16 && portno <= 47;
+}
+
+static inline bool sparx5_port_is_5g(int portno)
+{
+ return portno <= 11 || portno == 64;
+}
+
+static inline bool sparx5_port_is_10g(int portno)
+{
+ return (portno >= 12 && portno <= 15) || (portno >= 48 && portno <= 55);
+}
+
+static inline bool sparx5_port_is_25g(int portno)
+{
+ return portno >= 56 && portno <= 63;
+}
+
+static inline u32 sparx5_to_high_dev(int port)
+{
+ if (sparx5_port_is_5g(port))
+ return TARGET_DEV5G;
+ if (sparx5_port_is_10g(port))
+ return TARGET_DEV10G;
+ return TARGET_DEV25G;
+}
+
+static inline u32 sparx5_to_pcs_dev(int port)
+{
+ if (sparx5_port_is_5g(port))
+ return TARGET_PCS5G_BR;
+ if (sparx5_port_is_10g(port))
+ return TARGET_PCS10G_BR;
+ return TARGET_PCS25G_BR;
+}
+
+static inline int sparx5_port_dev_index(int port)
+{
+ if (sparx5_port_is_2g5(port))
+ return port;
+ if (sparx5_port_is_5g(port))
+ return (port <= 11 ? port : 12);
+ if (sparx5_port_is_10g(port))
+ return (port >= 12 && port <= 15) ?
+ port - 12 : port - 44;
+ return (port - 56);
+}
+
+int sparx5_port_init(struct sparx5 *sparx5,
+ struct sparx5_port *spx5_port,
+ struct sparx5_port_config *conf);
+
+int sparx5_port_config(struct sparx5 *sparx5,
+ struct sparx5_port *spx5_port,
+ struct sparx5_port_config *conf);
+
+int sparx5_port_pcs_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+
+int sparx5_serdes_set(struct sparx5 *sparx5,
+ struct sparx5_port *spx5_port,
+ struct sparx5_port_config *conf);
+
+struct sparx5_port_status {
+ bool link;
+ bool link_down;
+ int speed;
+ bool an_complete;
+ int duplex;
+ int pause;
+};
+
+int sparx5_get_port_status(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_status *status);
+
+void sparx5_port_enable(struct sparx5_port *port, bool enable);
+int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed);
+
+#define SPARX5_PORT_QOS_PCP_COUNT 8
+#define SPARX5_PORT_QOS_DEI_COUNT 8
+#define SPARX5_PORT_QOS_PCP_DEI_COUNT \
+ (SPARX5_PORT_QOS_PCP_COUNT + SPARX5_PORT_QOS_DEI_COUNT)
+struct sparx5_port_qos_pcp_map {
+ u8 map[SPARX5_PORT_QOS_PCP_DEI_COUNT];
+};
+
+struct sparx5_port_qos_pcp_rewr_map {
+ u16 map[SPX5_PRIOS];
+};
+
+#define SPARX5_PORT_QOS_DP_NUM 4
+struct sparx5_port_qos_dscp_rewr_map {
+ u16 map[SPX5_PRIOS * SPARX5_PORT_QOS_DP_NUM];
+};
+
+#define SPARX5_PORT_QOS_DSCP_COUNT 64
+struct sparx5_port_qos_dscp_map {
+ u8 map[SPARX5_PORT_QOS_DSCP_COUNT];
+};
+
+struct sparx5_port_qos_pcp {
+ struct sparx5_port_qos_pcp_map map;
+ bool qos_enable;
+ bool dp_enable;
+};
+
+struct sparx5_port_qos_pcp_rewr {
+ struct sparx5_port_qos_pcp_rewr_map map;
+ bool enable;
+};
+
+struct sparx5_port_qos_dscp {
+ struct sparx5_port_qos_dscp_map map;
+ bool qos_enable;
+ bool dp_enable;
+};
+
+struct sparx5_port_qos_dscp_rewr {
+ struct sparx5_port_qos_dscp_rewr_map map;
+ bool enable;
+};
+
+struct sparx5_port_qos {
+ struct sparx5_port_qos_pcp pcp;
+ struct sparx5_port_qos_pcp_rewr pcp_rewr;
+ struct sparx5_port_qos_dscp dscp;
+ struct sparx5_port_qos_dscp_rewr dscp_rewr;
+ u8 default_prio;
+};
+
+int sparx5_port_qos_set(struct sparx5_port *port, struct sparx5_port_qos *qos);
+
+int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_pcp *qos);
+
+int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_pcp_rewr *qos);
+
+int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp *qos);
+
+void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
+ int mode);
+
+int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp_rewr *qos);
+
+int sparx5_port_qos_default_set(const struct sparx5_port *port,
+ const struct sparx5_port_qos *qos);
+
+#endif /* __SPARX5_PORT_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
new file mode 100644
index 0000000000..8dee1ab1fa
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define SPX5_PSFP_SF_CNT 1024
+#define SPX5_PSFP_SG_CONFIG_CHANGE_SLEEP 1000
+#define SPX5_PSFP_SG_CONFIG_CHANGE_TIMEO 100000
+
+/* Pool of available service policers */
+static struct sparx5_pool_entry sparx5_psfp_fm_pool[SPX5_SDLB_CNT];
+
+/* Pool of available stream gates */
+static struct sparx5_pool_entry sparx5_psfp_sg_pool[SPX5_PSFP_SG_CNT];
+
+/* Pool of available stream filters */
+static struct sparx5_pool_entry sparx5_psfp_sf_pool[SPX5_PSFP_SF_CNT];
+
+static int sparx5_psfp_sf_get(u32 *id)
+{
+ return sparx5_pool_get(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+}
+
+static int sparx5_psfp_sf_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+}
+
+static int sparx5_psfp_sg_get(u32 idx, u32 *id)
+{
+ return sparx5_pool_get_with_idx(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT,
+ idx, id);
+}
+
+static int sparx5_psfp_sg_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT, id);
+}
+
+static int sparx5_psfp_fm_get(u32 idx, u32 *id)
+{
+ return sparx5_pool_get_with_idx(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, idx,
+ id);
+}
+
+static int sparx5_psfp_fm_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, id);
+}
+
+u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx)
+{
+ return ANA_L2_TSN_CFG_TSN_SFID_GET(spx5_rd(sparx5,
+ ANA_L2_TSN_CFG(isdx)));
+}
+
+u32 sparx5_psfp_isdx_get_fm(struct sparx5 *sparx5, u32 isdx)
+{
+ return ANA_L2_DLB_CFG_DLB_IDX_GET(spx5_rd(sparx5,
+ ANA_L2_DLB_CFG(isdx)));
+}
+
+u32 sparx5_psfp_sf_get_sg(struct sparx5 *sparx5, u32 sfid)
+{
+ return ANA_AC_TSN_SF_CFG_TSN_SGID_GET(spx5_rd(sparx5,
+ ANA_AC_TSN_SF_CFG(sfid)));
+}
+
+void sparx5_isdx_conf_set(struct sparx5 *sparx5, u32 isdx, u32 sfid, u32 fmid)
+{
+ spx5_rmw(ANA_L2_TSN_CFG_TSN_SFID_SET(sfid), ANA_L2_TSN_CFG_TSN_SFID,
+ sparx5, ANA_L2_TSN_CFG(isdx));
+
+ spx5_rmw(ANA_L2_DLB_CFG_DLB_IDX_SET(fmid), ANA_L2_DLB_CFG_DLB_IDX,
+ sparx5, ANA_L2_DLB_CFG(isdx));
+}
+
+/* Internal priority value to internal priority selector */
+static u32 sparx5_psfp_ipv_to_ips(s32 ipv)
+{
+ return ipv > 0 ? (ipv | BIT(3)) : 0;
+}
+
+static int sparx5_psfp_sgid_get_status(struct sparx5 *sparx5)
+{
+ return spx5_rd(sparx5, ANA_AC_SG_ACCESS_CTRL);
+}
+
+static int sparx5_psfp_sgid_wait_for_completion(struct sparx5 *sparx5)
+{
+ u32 val;
+
+ return readx_poll_timeout(sparx5_psfp_sgid_get_status, sparx5, val,
+ !ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(val),
+ SPX5_PSFP_SG_CONFIG_CHANGE_SLEEP,
+ SPX5_PSFP_SG_CONFIG_CHANGE_TIMEO);
+}
+
+static void sparx5_psfp_sg_config_change(struct sparx5 *sparx5, u32 id)
+{
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_SGID_SET(id), sparx5,
+ ANA_AC_SG_ACCESS_CTRL);
+
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(1) |
+ ANA_AC_SG_ACCESS_CTRL_SGID_SET(id),
+ sparx5, ANA_AC_SG_ACCESS_CTRL);
+
+ if (sparx5_psfp_sgid_wait_for_completion(sparx5) < 0)
+ pr_debug("%s:%d timed out waiting for sgid completion",
+ __func__, __LINE__);
+}
+
+static void sparx5_psfp_sf_set(struct sparx5 *sparx5, u32 id,
+ const struct sparx5_psfp_sf *sf)
+{
+ /* Configure stream gate*/
+ spx5_rmw(ANA_AC_TSN_SF_CFG_TSN_SGID_SET(sf->sgid) |
+ ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(sf->max_sdu) |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_SET(sf->sblock_osize) |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_SET(sf->sblock_osize_ena),
+ ANA_AC_TSN_SF_CFG_TSN_SGID | ANA_AC_TSN_SF_CFG_TSN_MAX_SDU |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA,
+ sparx5, ANA_AC_TSN_SF_CFG(id));
+}
+
+static int sparx5_psfp_sg_set(struct sparx5 *sparx5, u32 id,
+ const struct sparx5_psfp_sg *sg)
+{
+ u32 ips, base_lsb, base_msb, accum_time_interval = 0;
+ const struct sparx5_psfp_gce *gce;
+ int i;
+
+ ips = sparx5_psfp_ipv_to_ips(sg->ipv);
+ base_lsb = sg->basetime.tv_sec & 0xffffffff;
+ base_msb = sg->basetime.tv_sec >> 32;
+
+ /* Set stream gate id */
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_SGID_SET(id), sparx5,
+ ANA_AC_SG_ACCESS_CTRL);
+
+ /* Write AdminPSFP values */
+ spx5_wr(sg->basetime.tv_nsec, sparx5, ANA_AC_SG_CONFIG_REG_1);
+ spx5_wr(base_lsb, sparx5, ANA_AC_SG_CONFIG_REG_2);
+
+ spx5_rmw(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(base_msb) |
+ ANA_AC_SG_CONFIG_REG_3_INIT_IPS_SET(ips) |
+ ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_SET(sg->num_entries) |
+ ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_SET(sg->gate_state) |
+ ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_SET(1),
+ ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB |
+ ANA_AC_SG_CONFIG_REG_3_INIT_IPS |
+ ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH |
+ ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE,
+ sparx5, ANA_AC_SG_CONFIG_REG_3);
+
+ spx5_wr(sg->cycletime, sparx5, ANA_AC_SG_CONFIG_REG_4);
+ spx5_wr(sg->cycletimeext, sparx5, ANA_AC_SG_CONFIG_REG_5);
+
+ /* For each scheduling entry */
+ for (i = 0; i < sg->num_entries; i++) {
+ gce = &sg->gce[i];
+ ips = sparx5_psfp_ipv_to_ips(gce->ipv);
+ /* hardware needs TimeInterval to be cumulative */
+ accum_time_interval += gce->interval;
+ /* Set gate state */
+ spx5_wr(ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(ips) |
+ ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_SET(gce->gate_state),
+ sparx5, ANA_AC_SG_GCL_GS_CONFIG(i));
+
+ /* Set time interval */
+ spx5_wr(accum_time_interval, sparx5,
+ ANA_AC_SG_GCL_TI_CONFIG(i));
+
+ /* Set maximum octets */
+ spx5_wr(gce->maxoctets, sparx5, ANA_AC_SG_GCL_OCT_CONFIG(i));
+ }
+
+ return 0;
+}
+
+static int sparx5_sdlb_conf_set(struct sparx5 *sparx5,
+ struct sparx5_psfp_fm *fm)
+{
+ int (*sparx5_sdlb_group_action)(struct sparx5 *sparx5, u32 group,
+ u32 idx);
+
+ if (!fm->pol.rate && !fm->pol.burst)
+ sparx5_sdlb_group_action = &sparx5_sdlb_group_del;
+ else
+ sparx5_sdlb_group_action = &sparx5_sdlb_group_add;
+
+ sparx5_policer_conf_set(sparx5, &fm->pol);
+
+ return sparx5_sdlb_group_action(sparx5, fm->pol.group, fm->pol.idx);
+}
+
+int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf,
+ u32 *id)
+{
+ int ret;
+
+ ret = sparx5_psfp_sf_get(id);
+ if (ret < 0)
+ return ret;
+
+ sparx5_psfp_sf_set(sparx5, *id, sf);
+
+ return 0;
+}
+
+int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id)
+{
+ const struct sparx5_psfp_sf sf = { 0 };
+
+ sparx5_psfp_sf_set(sparx5, id, &sf);
+
+ return sparx5_psfp_sf_put(id);
+}
+
+int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_sg *sg, u32 *id)
+{
+ ktime_t basetime;
+ int ret;
+
+ ret = sparx5_psfp_sg_get(uidx, id);
+ if (ret < 0)
+ return ret;
+ /* Was already in use, no need to reconfigure */
+ if (ret > 1)
+ return 0;
+
+ /* Calculate basetime for this stream gate */
+ sparx5_new_base_time(sparx5, sg->cycletime, 0, &basetime);
+ sg->basetime = ktime_to_timespec64(basetime);
+
+ sparx5_psfp_sg_set(sparx5, *id, sg);
+
+ /* Signal hardware to copy AdminPSFP values into OperPSFP values */
+ sparx5_psfp_sg_config_change(sparx5, *id);
+
+ return 0;
+}
+
+int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id)
+{
+ const struct sparx5_psfp_sg sg = { 0 };
+ int ret;
+
+ ret = sparx5_psfp_sg_put(id);
+ if (ret < 0)
+ return ret;
+ /* Stream gate still in use ? */
+ if (ret > 0)
+ return 0;
+
+ return sparx5_psfp_sg_set(sparx5, id, &sg);
+}
+
+int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_fm *fm, u32 *id)
+{
+ struct sparx5_policer *pol = &fm->pol;
+ int ret;
+
+ /* Get flow meter */
+ ret = sparx5_psfp_fm_get(uidx, &fm->pol.idx);
+ if (ret < 0)
+ return ret;
+ /* Was already in use, no need to reconfigure */
+ if (ret > 1)
+ return 0;
+
+ ret = sparx5_sdlb_group_get_by_rate(sparx5, pol->rate, pol->burst);
+ if (ret < 0)
+ return ret;
+
+ fm->pol.group = ret;
+
+ ret = sparx5_sdlb_conf_set(sparx5, fm);
+ if (ret < 0)
+ return ret;
+
+ *id = fm->pol.idx;
+
+ return 0;
+}
+
+int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id)
+{
+ struct sparx5_psfp_fm fm = { .pol.idx = id,
+ .pol.type = SPX5_POL_SERVICE };
+ int ret;
+
+ /* Find the group that this lb belongs to */
+ ret = sparx5_sdlb_group_get_by_index(sparx5, id, &fm.pol.group);
+ if (ret < 0)
+ return ret;
+
+ ret = sparx5_psfp_fm_put(id);
+ if (ret < 0)
+ return ret;
+ /* Do not reset flow-meter if still in use. */
+ if (ret > 0)
+ return 0;
+
+ return sparx5_sdlb_conf_set(sparx5, &fm);
+}
+
+void sparx5_psfp_init(struct sparx5 *sparx5)
+{
+ const struct sparx5_sdlb_group *group;
+ int i;
+
+ for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
+ group = &sdlb_groups[i];
+ sparx5_sdlb_group_init(sparx5, group->max_rate,
+ group->min_burst, group->frame_size, i);
+ }
+
+ spx5_wr(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_SET(1),
+ sparx5, ANA_AC_SG_CYCLETIME_UPDATE_PERIOD);
+
+ spx5_rmw(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_SET(1),
+ ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, sparx5, ANA_L2_FWD_CFG);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
new file mode 100644
index 0000000000..5a932460db
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+#include <linux/ptp_classify.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define SPARX5_MAX_PTP_ID 512
+
+#define TOD_ACC_PIN 0x4
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_TOD
+};
+
+static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5)
+{
+ /* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625)
+ * 1.99609375000(500), 3.99218750000(250) as reference
+ * The value is calculated as following:
+ * (1/1000000)/((2^-59)/X)
+ */
+
+ u64 res = 0;
+
+ switch (sparx5->coreclock) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ res = 2301339409586;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ res = 1150669704793;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ res = 920535763834;
+ break;
+ default:
+ WARN(1, "Invalid core clock");
+ break;
+ }
+
+ return res;
+}
+
+static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
+{
+ u64 res = 0;
+
+ switch (sparx5->coreclock) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ res = 0x1FF0000000000000;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ res = 0x0FF8000000000000;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ res = 0x0CC6666666666666;
+ break;
+ default:
+ WARN(1, "Invalid core clock");
+ break;
+ }
+
+ return res;
+}
+
+int sparx5_ptp_hwtstamp_set(struct sparx5_port *port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_phc *phc;
+
+ /* For now don't allow to run ptp on ports that are part of a bridge,
+ * because in case of transparent clock the HW will still forward the
+ * frames, so there would be duplicate frames
+ */
+
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ return -EINVAL;
+
+ switch (cfg->tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_cmd = IFH_REW_OP_NOOP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ mutex_lock(&sparx5->ptp_lock);
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ phc->hwtstamp_config = *cfg;
+ mutex_unlock(&sparx5->ptp_lock);
+
+ return 0;
+}
+
+void sparx5_ptp_hwtstamp_get(struct sparx5_port *port,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_phc *phc;
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ *cfg = phc->hwtstamp_config;
+}
+
+static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb,
+ u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset)
+{
+ struct ptp_header *header;
+ u8 msgtype;
+ int type;
+
+ if (port->ptp_cmd == IFH_REW_OP_NOOP) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ header = ptp_parse_header(skb, type);
+ if (!header) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ *pdu_w16_offset = 7;
+ if (type & PTP_CLASS_L2)
+ *pdu_type = IFH_PDU_TYPE_PTP;
+ if (type & PTP_CLASS_IPV4)
+ *pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP;
+ if (type & PTP_CLASS_IPV6)
+ *pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP;
+
+ if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+ return;
+ }
+
+ /* If it is sync and run 1 step then set the correct operation,
+ * otherwise run as 2 step
+ */
+ msgtype = ptp_get_msgtype(header, type);
+ if ((msgtype & 0xf) == 0) {
+ *rew_op = IFH_REW_OP_ONE_STEP_PTP;
+ return;
+ }
+
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+}
+
+static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port)
+{
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT,
+ jiffies)
+ break;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+}
+
+int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
+ struct sk_buff *skb)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 rew_op, pdu_type, pdu_w16_offset;
+ unsigned long flags;
+
+ sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset);
+ SPARX5_SKB_CB(skb)->rew_op = rew_op;
+ SPARX5_SKB_CB(skb)->pdu_type = pdu_type;
+ SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset;
+
+ if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
+ return 0;
+
+ sparx5_ptp_txtstamp_old_release(port);
+
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
+ if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) {
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_queue_tail(&port->tx_skbs, skb);
+ SPARX5_SKB_CB(skb)->ts_id = port->ts_id;
+ SPARX5_SKB_CB(skb)->jiffies = jiffies;
+
+ sparx5->ptp_skbs++;
+ port->ts_id++;
+ if (port->ts_id == SPARX5_MAX_PTP_ID)
+ port->ts_id = 0;
+
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+
+ return 0;
+}
+
+void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
+ struct sk_buff *skb)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
+ port->ts_id--;
+ sparx5->ptp_skbs--;
+ skb_unlink(skb, &port->tx_skbs);
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+}
+
+static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
+ struct timespec64 *ts,
+ u32 nsec)
+{
+ /* Read current PTP time to get seconds */
+ unsigned long flags;
+ u32 curr_nsec;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ ts->tv_nsec = nsec;
+
+ /* Sec has incremented since the ts was registered */
+ if (curr_nsec < nsec)
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+}
+
+irqreturn_t sparx5_ptp_irq_handler(int irq, void *args)
+{
+ int budget = SPARX5_MAX_PTP_ID;
+ struct sparx5 *sparx5 = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sparx5_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL);
+
+ if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = sparx5->ports[txport];
+
+ /* Retrieve the delay */
+ delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
+ delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ REW_PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retried */
+ if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
+ id <<= 8;
+ id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (SPARX5_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ REW_PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock(&sparx5->ptp_ts_id_lock);
+ sparx5->ptp_skbs--;
+ spin_unlock(&sparx5->ptp_ts_id_lock);
+
+ /* Get the h/w timestamp */
+ sparx5_get_hwtimestamp(sparx5, &ts, delay);
+
+ /* Set the timestamp into the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+ bool neg_adj = 0;
+ u64 tod_inc;
+ u64 ref;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ tod_inc = sparx5_ptp_get_nominal_value(sparx5);
+
+ /* The multiplication is split in 2 separate additions because of
+ * overflow issues. If scaled_ppm with 16bit fractional part was bigger
+ * than 20ppm then we got overflow.
+ */
+ ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16);
+ ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16;
+ tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5,
+ PTP_CLK_PER_CFG(phc->index, 0));
+ spx5_wr((u32)(tod_inc >> 32), sparx5,
+ PTP_CLK_PER_CFG(phc->index, 1));
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5,
+ PTP_PTP_DOM_CFG);
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ /* Set new value */
+ spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
+ sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ spx5_wr(lower_32_bits(ts->tv_sec),
+ sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Apply new values */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+ time64_t s;
+ s64 ns;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s <<= 32;
+ s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC;
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta),
+ sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Adjust time with the value of PTP_TOD_NSEC */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using sparx5_ptp_settime64 which is not exact */
+ struct timespec64 ts;
+ u64 now;
+
+ sparx5_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ sparx5_ptp_settime64(ptp, &ts);
+ }
+
+ return 0;
+}
+
+static struct ptp_clock_info sparx5_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sparx5 ptp",
+ .max_adj = 200000,
+ .gettime64 = sparx5_ptp_gettime64,
+ .settime64 = sparx5_ptp_settime64,
+ .adjtime = sparx5_ptp_adjtime,
+ .adjfine = sparx5_ptp_adjfine,
+};
+
+static int sparx5_ptp_phc_init(struct sparx5 *sparx5,
+ int index,
+ struct ptp_clock_info *clock_info)
+{
+ struct sparx5_phc *phc = &sparx5->phc[index];
+
+ phc->info = *clock_info;
+ phc->clock = ptp_clock_register(&phc->info, sparx5->dev);
+ if (IS_ERR(phc->clock))
+ return PTR_ERR(phc->clock);
+
+ phc->index = index;
+ phc->sparx5 = sparx5;
+
+ /* PTP Rx stamping is always enabled. */
+ phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+
+int sparx5_ptp_init(struct sparx5 *sparx5)
+{
+ u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5);
+ struct sparx5_port *port;
+ int err, i;
+
+ if (!sparx5->ptp)
+ return 0;
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
+ err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info);
+ if (err)
+ return err;
+ }
+
+ spin_lock_init(&sparx5->ptp_clock_lock);
+ spin_lock_init(&sparx5->ptp_ts_id_lock);
+ mutex_init(&sparx5->ptp_lock);
+
+ /* Disable master counters */
+ spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG);
+
+ /* Configure the nominal TOD increment per clock cycle */
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
+ spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5,
+ PTP_CLK_PER_CFG(i, 0));
+ spx5_wr((u32)(tod_adj >> 32), sparx5,
+ PTP_CLK_PER_CFG(i, 1));
+ }
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ /* Enable master counters */
+ spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
+
+ for (i = 0; i < SPX5_PORTS; i++) {
+ port = sparx5->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_head_init(&port->tx_skbs);
+ }
+
+ return 0;
+}
+
+void sparx5_ptp_deinit(struct sparx5 *sparx5)
+{
+ struct sparx5_port *port;
+ int i;
+
+ for (i = 0; i < SPX5_PORTS; i++) {
+ port = sparx5->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_purge(&port->tx_skbs);
+ }
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i)
+ ptp_clock_unregister(sparx5->phc[i].clock);
+}
+
+void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
+ u64 timestamp)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sparx5_phc *phc;
+ struct timespec64 ts;
+ u64 full_ts_in_ns;
+
+ if (!sparx5->ptp)
+ return;
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ sparx5_ptp_gettime64(&phc->info, &ts);
+
+ if (ts.tv_nsec < timestamp)
+ ts.tv_sec--;
+ ts.tv_nsec = timestamp;
+ full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
new file mode 100644
index 0000000000..5f34febaee
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "sparx5_main.h"
+#include "sparx5_qos.h"
+
+/* Calculate new base_time based on cycle_time.
+ *
+ * The hardware requires a base_time that is always in the future.
+ * We define threshold_time as current_time + (2 * cycle_time).
+ * If base_time is below threshold_time this function recalculates it to be in
+ * the interval:
+ * threshold_time <= base_time < (threshold_time + cycle_time)
+ *
+ * A very simple algorithm could be like this:
+ * new_base_time = org_base_time + N * cycle_time
+ * using the lowest N so (new_base_time >= threshold_time
+ */
+void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
+ const ktime_t org_base_time, ktime_t *new_base_time)
+{
+ ktime_t current_time, threshold_time, new_time;
+ struct timespec64 ts;
+ u64 nr_of_cycles_p2;
+ u64 nr_of_cycles;
+ u64 diff_time;
+
+ new_time = org_base_time;
+
+ sparx5_ptp_gettime64(&sparx5->phc[SPARX5_PHC_PORT].info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ threshold_time = current_time + (2 * cycle_time);
+ diff_time = threshold_time - new_time;
+ nr_of_cycles = div_u64(diff_time, cycle_time);
+ nr_of_cycles_p2 = 1; /* Use 2^0 as start value */
+
+ if (new_time >= threshold_time) {
+ *new_base_time = new_time;
+ return;
+ }
+
+ /* Calculate the smallest power of 2 (nr_of_cycles_p2)
+ * that is larger than nr_of_cycles.
+ */
+ while (nr_of_cycles_p2 < nr_of_cycles)
+ nr_of_cycles_p2 <<= 1; /* Next (higher) power of 2 */
+
+ /* Add as big chunks (power of 2 * cycle_time)
+ * as possible for each power of 2
+ */
+ while (nr_of_cycles_p2) {
+ if (new_time < threshold_time) {
+ new_time += cycle_time * nr_of_cycles_p2;
+ while (new_time < threshold_time)
+ new_time += cycle_time * nr_of_cycles_p2;
+ new_time -= cycle_time * nr_of_cycles_p2;
+ }
+ nr_of_cycles_p2 >>= 1; /* Next (lower) power of 2 */
+ }
+ new_time += cycle_time;
+ *new_base_time = new_time;
+}
+
+/* Max rates for leak groups */
+static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
+ 1048568, /* 1.049 Gbps */
+ 2621420, /* 2.621 Gbps */
+ 10485680, /* 10.486 Gbps */
+ 26214200 /* 26.214 Gbps */
+};
+
+static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
+
+static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
+ return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
+}
+
+static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 leak_time)
+{
+ spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
+ HSCH_HSCH_TIMER_CFG(layer, group));
+}
+
+static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
+ return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
+}
+
+static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
+ return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
+}
+
+static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 itr, next;
+
+ itr = sparx5_lg_get_first(sparx5, layer, group);
+
+ for (;;) {
+ next = sparx5_lg_get_next(sparx5, layer, group, itr);
+ if (itr == next)
+ return itr;
+
+ itr = next;
+ }
+}
+
+static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+{
+ return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
+}
+
+static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+{
+ return idx == sparx5_lg_get_first(sparx5, layer, group);
+}
+
+static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
+}
+
+static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ if (sparx5_lg_is_empty(sparx5, layer, group))
+ return false;
+
+ return sparx5_lg_get_first(sparx5, layer, group) ==
+ sparx5_lg_get_last(sparx5, layer, group);
+}
+
+static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 leak_time)
+{
+ sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
+}
+
+static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ sparx5_lg_set_leak_time(sparx5, layer, group, 0);
+}
+
+static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
+ u32 idx, u32 *group)
+{
+ u32 itr, next;
+ int i;
+
+ for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
+ if (sparx5_lg_is_empty(sparx5, layer, i))
+ continue;
+
+ itr = sparx5_lg_get_first(sparx5, layer, i);
+
+ for (;;) {
+ next = sparx5_lg_get_next(sparx5, layer, i, itr);
+
+ if (itr == idx) {
+ *group = i;
+ return 0; /* Found it */
+ }
+ if (itr == next)
+ break; /* Was not found */
+
+ itr = next;
+ }
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
+{
+ struct sparx5_layer *l = &layers[layer];
+ struct sparx5_lg *lg;
+ u32 i;
+
+ for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
+ lg = &l->leak_groups[i];
+ if (rate <= lg->max_rate) {
+ *group = i;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx, u32 *prev, u32 *next, u32 *first)
+{
+ u32 itr;
+
+ *first = sparx5_lg_get_first(sparx5, layer, group);
+ *prev = *first;
+ *next = *first;
+ itr = *first;
+
+ for (;;) {
+ *next = sparx5_lg_get_next(sparx5, layer, group, itr);
+
+ if (itr == idx)
+ return 0; /* Found it */
+
+ if (itr == *next)
+ return -1; /* Was not found */
+
+ *prev = itr;
+ itr = *next;
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 se_first, u32 idx, u32 idx_next, bool empty)
+{
+ u32 leak_time = layers[layer].leak_groups[group].leak_time;
+
+ /* Stop leaking */
+ sparx5_lg_disable(sparx5, layer, group);
+
+ if (empty)
+ return 0;
+
+ /* Select layer */
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Link elements */
+ spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
+ HSCH_SE_CONNECT(idx));
+
+ /* Set the first element. */
+ spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
+ HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
+ HSCH_HSCH_LEAK_CFG(layer, group));
+
+ /* Start leaking */
+ sparx5_lg_enable(sparx5, layer, group, leak_time);
+
+ return 0;
+}
+
+static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
+{
+ u32 first, next, prev;
+ bool empty = false;
+
+ /* idx *must* be present in the leak group */
+ WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
+ &first) < 0);
+
+ if (sparx5_lg_is_singular(sparx5, layer, group)) {
+ empty = true;
+ } else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
+ /* idx is removed, prev is now last */
+ idx = prev;
+ next = prev;
+ } else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
+ /* idx is removed and points to itself, first is next */
+ first = next;
+ next = idx;
+ } else {
+ /* Next is not touched */
+ idx = prev;
+ }
+
+ return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
+ empty);
+}
+
+static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
+ u32 idx)
+{
+ u32 first, next, old_group;
+
+ pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
+ idx);
+
+ /* Is this SE already shaping ? */
+ if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
+ if (old_group != new_group) {
+ /* Delete from old group */
+ sparx5_lg_del(sparx5, layer, old_group, idx);
+ } else {
+ /* Nothing to do here */
+ return 0;
+ }
+ }
+
+ /* We always add to head of the list */
+ first = idx;
+
+ if (sparx5_lg_is_empty(sparx5, layer, new_group))
+ next = idx;
+ else
+ next = sparx5_lg_get_first(sparx5, layer, new_group);
+
+ return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
+ false);
+}
+
+static int sparx5_shaper_conf_set(struct sparx5_port *port,
+ const struct sparx5_shaper *sh, u32 layer,
+ u32 idx, u32 group)
+{
+ int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (!sh->rate && !sh->burst)
+ sparx5_lg_action = &sparx5_lg_del;
+ else
+ sparx5_lg_action = &sparx5_lg_add;
+
+ /* Select layer */
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Set frame mode */
+ spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
+ sparx5, HSCH_SE_CFG(idx));
+
+ /* Set committed rate and burst */
+ spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
+ HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
+ sparx5, HSCH_CIR_CFG(idx));
+
+ /* This has to be done after the shaper configuration has been set */
+ sparx5_lg_action(sparx5, layer, group, idx);
+
+ return 0;
+}
+
+static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
+{
+ return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
+ 1;
+}
+
+static int sparx5_dwrr_conf_set(struct sparx5_port *port,
+ struct sparx5_dwrr *dwrr)
+{
+ int i;
+
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
+ HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
+ port->sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Number of *lower* indexes that are arbitrated dwrr */
+ spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
+ HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
+ HSCH_SE_CFG(port->portno));
+
+ for (i = 0; i < dwrr->count; i++) {
+ spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
+ HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
+ HSCH_DWRR_ENTRY(i));
+ }
+
+ return 0;
+}
+
+static int sparx5_leak_groups_init(struct sparx5 *sparx5)
+{
+ struct sparx5_layer *layer;
+ u32 sys_clk_per_100ps;
+ struct sparx5_lg *lg;
+ u32 leak_time_us;
+ int i, ii;
+
+ sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
+
+ for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
+ layer = &layers[i];
+ for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
+ lg = &layer->leak_groups[ii];
+ lg->max_rate = spx5_hsch_max_group_rate[ii];
+
+ /* Calculate the leak time in us, to serve a maximum
+ * rate of 'max_rate' for this group
+ */
+ leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
+
+ /* Hardware wants leak time in ns */
+ lg->leak_time = 1000 * leak_time_us;
+
+ /* Calculate resolution */
+ lg->resolution = 1000 / leak_time_us;
+
+ /* Maximum number of shapers that can be served by
+ * this leak group
+ */
+ lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
+
+ /* Example:
+ * Wanted bandwidth is 100Mbit:
+ *
+ * 100 mbps can be served by leak group zero.
+ *
+ * leak_time is 125000 ns.
+ * resolution is: 8
+ *
+ * cir = 100000 / 8 = 12500
+ * leaks_pr_sec = 125000 / 10^9 = 8000
+ * bw = 12500 * 8000 = 10^8 (100 Mbit)
+ */
+
+ /* Disable by default - this also indicates an empty
+ * leak group
+ */
+ sparx5_lg_disable(sparx5, i, ii);
+ }
+ }
+
+ return 0;
+}
+
+int sparx5_qos_init(struct sparx5 *sparx5)
+{
+ int ret;
+
+ ret = sparx5_leak_groups_init(sparx5);
+ if (ret < 0)
+ return ret;
+
+ ret = sparx5_dcb_init(sparx5);
+ if (ret < 0)
+ return ret;
+
+ sparx5_psfp_init(sparx5);
+
+ return 0;
+}
+
+int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
+{
+ int i;
+
+ if (num_tc != SPX5_PRIOS) {
+ netdev_err(ndev, "Only %d traffic classes supported\n",
+ SPX5_PRIOS);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(ndev, i, 1, i);
+
+ netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
+ ndev->num_tc, ndev->real_num_tx_queues);
+
+ return 0;
+}
+
+int sparx5_tc_mqprio_del(struct net_device *ndev)
+{
+ netdev_reset_tc(ndev);
+
+ netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
+ ndev->num_tc, ndev->real_num_tx_queues);
+
+ return 0;
+}
+
+int sparx5_tc_tbf_add(struct sparx5_port *port,
+ struct tc_tbf_qopt_offload_replace_params *params,
+ u32 layer, u32 idx)
+{
+ struct sparx5_shaper sh = {
+ .mode = SPX5_SE_MODE_DATARATE,
+ .rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
+ .burst = params->max_size,
+ };
+ struct sparx5_lg *lg;
+ u32 group;
+
+ /* Find suitable group for this se */
+ if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
+ pr_debug("Could not find leak group for se with rate: %d",
+ sh.rate);
+ return -EINVAL;
+ }
+
+ lg = &layers[layer].leak_groups[group];
+
+ pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
+
+ if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
+ return -EINVAL;
+
+ /* Calculate committed rate and burst */
+ sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
+ sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
+
+ if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
+ return -EINVAL;
+
+ return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
+}
+
+int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
+{
+ struct sparx5_shaper sh = {0};
+ u32 group;
+
+ sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
+
+ return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
+}
+
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params)
+{
+ struct sparx5_dwrr dwrr = {0};
+ /* Minimum weight for each iteration */
+ unsigned int w_min = 100;
+ int i;
+
+ /* Find minimum weight for all dwrr bands */
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ if (params->quanta[i] == 0)
+ continue;
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ /* Strict band; skip */
+ if (params->quanta[i] == 0)
+ continue;
+
+ dwrr.count++;
+
+ /* On the sparx5, bands with higher indexes are preferred and
+ * arbitrated strict. Strict bands are put in the lower indexes,
+ * by tc, so we reverse the bands here.
+ *
+ * Also convert the weight to something the hardware
+ * understands.
+ */
+ dwrr.cost[SPX5_PRIOS - i - 1] =
+ sparx5_weight_to_hw_cost(w_min, params->weights[i]);
+ }
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
+
+int sparx5_tc_ets_del(struct sparx5_port *port)
+{
+ struct sparx5_dwrr dwrr = {0};
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
new file mode 100644
index 0000000000..ced35033a6
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_QOS_H__
+#define __SPARX5_QOS_H__
+
+#include <linux/netdevice.h>
+
+/* Number of Layers */
+#define SPX5_HSCH_LAYER_CNT 3
+
+/* Scheduling elements per layer */
+#define SPX5_HSCH_L0_SE_CNT 5040
+#define SPX5_HSCH_L1_SE_CNT 64
+#define SPX5_HSCH_L2_SE_CNT 64
+
+/* Calculate Layer 0 Scheduler Element when using normal hierarchy */
+#define SPX5_HSCH_L0_GET_IDX(port, queue) ((64 * (port)) + (8 * (queue)))
+
+/* Number of leak groups */
+#define SPX5_HSCH_LEAK_GRP_CNT 4
+
+/* Scheduler modes */
+#define SPX5_SE_MODE_LINERATE 0
+#define SPX5_SE_MODE_DATARATE 1
+
+/* Rate and burst */
+#define SPX5_SE_RATE_MAX 262143
+#define SPX5_SE_BURST_MAX 127
+#define SPX5_SE_RATE_MIN 1
+#define SPX5_SE_BURST_MIN 1
+#define SPX5_SE_BURST_UNIT 4096
+
+/* Dwrr */
+#define SPX5_DWRR_COST_MAX 63
+
+struct sparx5_shaper {
+ u32 mode;
+ u32 rate;
+ u32 burst;
+};
+
+struct sparx5_lg {
+ u32 max_rate;
+ u32 resolution;
+ u32 leak_time;
+ u32 max_ses;
+};
+
+struct sparx5_layer {
+ struct sparx5_lg leak_groups[SPX5_HSCH_LEAK_GRP_CNT];
+};
+
+struct sparx5_dwrr {
+ u32 count; /* Number of inputs running dwrr */
+ u8 cost[SPX5_PRIOS];
+};
+
+int sparx5_qos_init(struct sparx5 *sparx5);
+
+/* Multi-Queue Priority */
+int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc);
+int sparx5_tc_mqprio_del(struct net_device *ndev);
+
+/* Token Bucket Filter */
+struct tc_tbf_qopt_offload_replace_params;
+int sparx5_tc_tbf_add(struct sparx5_port *port,
+ struct tc_tbf_qopt_offload_replace_params *params,
+ u32 layer, u32 idx);
+int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx);
+
+/* Enhanced Transmission Selection */
+struct tc_ets_qopt_offload_replace_params;
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params);
+
+int sparx5_tc_ets_del(struct sparx5_port *port);
+
+#endif /* __SPARX5_QOS_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
new file mode 100644
index 0000000000..f5267218ca
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT] = {
+ { SPX5_SDLB_GROUP_RATE_MAX, 8192 / 1, 64 }, /* 25 G */
+ { 15000000000ULL, 8192 / 1, 64 }, /* 15 G */
+ { 10000000000ULL, 8192 / 1, 64 }, /* 10 G */
+ { 5000000000ULL, 8192 / 1, 64 }, /* 5 G */
+ { 2500000000ULL, 8192 / 1, 64 }, /* 2.5 G */
+ { 1000000000ULL, 8192 / 2, 64 }, /* 1 G */
+ { 500000000ULL, 8192 / 2, 64 }, /* 500 M */
+ { 100000000ULL, 8192 / 4, 64 }, /* 100 M */
+ { 50000000ULL, 8192 / 4, 64 }, /* 50 M */
+ { 5000000ULL, 8192 / 8, 64 } /* 5 M */
+};
+
+int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5)
+{
+ u32 clk_per_100ps;
+ u64 clk_hz;
+
+ clk_per_100ps = HSCH_SYS_CLK_PER_100PS_GET(spx5_rd(sparx5,
+ HSCH_SYS_CLK_PER));
+ if (!clk_per_100ps)
+ clk_per_100ps = SPX5_CLK_PER_100PS_DEFAULT;
+
+ clk_hz = (10 * 1000 * 1000) / clk_per_100ps;
+ return clk_hz *= 1000;
+}
+
+static int sparx5_sdlb_pup_interval_get(struct sparx5 *sparx5, u32 max_token,
+ u64 max_rate)
+{
+ u64 clk_hz;
+
+ clk_hz = sparx5_sdlb_clk_hz_get(sparx5);
+
+ return div64_u64((8 * clk_hz * max_token), max_rate);
+}
+
+int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval, u64 rate)
+{
+ u64 clk_hz;
+
+ if (!rate)
+ return SPX5_SDLB_PUP_TOKEN_DISABLE;
+
+ clk_hz = sparx5_sdlb_clk_hz_get(sparx5);
+
+ return DIV64_U64_ROUND_UP((rate * pup_interval), (clk_hz * 8));
+}
+
+static void sparx5_sdlb_group_disable(struct sparx5 *sparx5, u32 group)
+{
+ spx5_rmw(ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(0),
+ ANA_AC_SDLB_PUP_CTRL_PUP_ENA, sparx5,
+ ANA_AC_SDLB_PUP_CTRL(group));
+}
+
+static void sparx5_sdlb_group_enable(struct sparx5 *sparx5, u32 group)
+{
+ spx5_rmw(ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(1),
+ ANA_AC_SDLB_PUP_CTRL_PUP_ENA, sparx5,
+ ANA_AC_SDLB_PUP_CTRL(group));
+}
+
+static u32 sparx5_sdlb_group_get_first(struct sparx5 *sparx5, u32 group)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_XLB_START(group));
+
+ return ANA_AC_SDLB_XLB_START_LBSET_START_GET(val);
+}
+
+static u32 sparx5_sdlb_group_get_next(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_XLB_NEXT(lb));
+
+ return ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(val);
+}
+
+static bool sparx5_sdlb_group_is_first(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ return lb == sparx5_sdlb_group_get_first(sparx5, group);
+}
+
+static bool sparx5_sdlb_group_is_last(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ return lb == sparx5_sdlb_group_get_next(sparx5, group, lb);
+}
+
+static bool sparx5_sdlb_group_is_empty(struct sparx5 *sparx5, u32 group)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_PUP_CTRL(group));
+
+ return ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(val) == 0;
+}
+
+static u32 sparx5_sdlb_group_get_last(struct sparx5 *sparx5, u32 group)
+{
+ u32 itr, next;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, group);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+ if (itr == next)
+ return itr;
+
+ itr = next;
+ }
+}
+
+static bool sparx5_sdlb_group_is_singular(struct sparx5 *sparx5, u32 group)
+{
+ if (sparx5_sdlb_group_is_empty(sparx5, group))
+ return false;
+
+ return sparx5_sdlb_group_get_first(sparx5, group) ==
+ sparx5_sdlb_group_get_last(sparx5, group);
+}
+
+static int sparx5_sdlb_group_get_adjacent(struct sparx5 *sparx5, u32 group,
+ u32 idx, u32 *prev, u32 *next,
+ u32 *first)
+{
+ u32 itr;
+
+ *first = sparx5_sdlb_group_get_first(sparx5, group);
+ *prev = *first;
+ *next = *first;
+ itr = *first;
+
+ for (;;) {
+ *next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+
+ if (itr == idx)
+ return 0; /* Found it */
+
+ if (itr == *next)
+ return -EINVAL; /* Was not found */
+
+ *prev = itr;
+ itr = *next;
+ }
+}
+
+static int sparx5_sdlb_group_get_count(struct sparx5 *sparx5, u32 group)
+{
+ u32 itr, next;
+ int count = 0;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, group);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+ if (itr == next)
+ return count;
+
+ itr = next;
+ count++;
+ }
+}
+
+int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst)
+{
+ const struct sparx5_sdlb_group *group;
+ u64 rate_bps;
+ int i, count;
+
+ rate_bps = rate * 1000;
+
+ for (i = SPX5_SDLB_GROUP_CNT - 1; i >= 0; i--) {
+ group = &sdlb_groups[i];
+
+ count = sparx5_sdlb_group_get_count(sparx5, i);
+
+ /* Check that this group is not full.
+ * According to LB group configuration rules: the number of XLBs
+ * in a group must not exceed PUP_INTERVAL/4 - 1.
+ */
+ if (count > ((group->pup_interval / 4) - 1))
+ continue;
+
+ if (rate_bps < group->max_rate)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group)
+{
+ u32 itr, next;
+ int i;
+
+ for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
+ if (sparx5_sdlb_group_is_empty(sparx5, i))
+ continue;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, i);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, i, itr);
+
+ if (itr == idx) {
+ *group = i;
+ return 0; /* Found it */
+ }
+ if (itr == next)
+ break; /* Was not found */
+
+ itr = next;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int sparx5_sdlb_group_link(struct sparx5 *sparx5, u32 group, u32 idx,
+ u32 first, u32 next, bool empty)
+{
+ /* Stop leaking */
+ sparx5_sdlb_group_disable(sparx5, group);
+
+ if (empty)
+ return 0;
+
+ /* Link insertion lb to next lb */
+ spx5_wr(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(next) |
+ ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(group),
+ sparx5, ANA_AC_SDLB_XLB_NEXT(idx));
+
+ /* Set the first lb */
+ spx5_wr(ANA_AC_SDLB_XLB_START_LBSET_START_SET(first), sparx5,
+ ANA_AC_SDLB_XLB_START(group));
+
+ /* Start leaking */
+ sparx5_sdlb_group_enable(sparx5, group);
+
+ return 0;
+};
+
+int sparx5_sdlb_group_add(struct sparx5 *sparx5, u32 group, u32 idx)
+{
+ u32 first, next;
+
+ /* We always add to head of the list */
+ first = idx;
+
+ if (sparx5_sdlb_group_is_empty(sparx5, group))
+ next = idx;
+ else
+ next = sparx5_sdlb_group_get_first(sparx5, group);
+
+ return sparx5_sdlb_group_link(sparx5, group, idx, first, next, false);
+}
+
+int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx)
+{
+ u32 first, next, prev;
+ bool empty = false;
+
+ if (sparx5_sdlb_group_get_adjacent(sparx5, group, idx, &prev, &next,
+ &first) < 0) {
+ pr_err("%s:%d Could not find idx: %d in group: %d", __func__,
+ __LINE__, idx, group);
+ return -EINVAL;
+ }
+
+ if (sparx5_sdlb_group_is_singular(sparx5, group)) {
+ empty = true;
+ } else if (sparx5_sdlb_group_is_last(sparx5, group, idx)) {
+ /* idx is removed, prev is now last */
+ idx = prev;
+ next = prev;
+ } else if (sparx5_sdlb_group_is_first(sparx5, group, idx)) {
+ /* idx is removed and points to itself, first is next */
+ first = next;
+ next = idx;
+ } else {
+ /* Next is not touched */
+ idx = prev;
+ }
+
+ return sparx5_sdlb_group_link(sparx5, group, idx, first, next, empty);
+}
+
+void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst,
+ u32 frame_size, u32 idx)
+{
+ u32 thres_shift, mask = 0x01, power = 0;
+ struct sparx5_sdlb_group *group;
+ u64 max_token;
+
+ group = &sdlb_groups[idx];
+
+ /* Number of positions to right-shift LB's threshold value. */
+ while ((min_burst & mask) == 0) {
+ power++;
+ mask <<= 1;
+ }
+ thres_shift = SPX5_SDLB_2CYCLES_TYPE2_THRES_OFFSET - power;
+
+ max_token = (min_burst > SPX5_SDLB_PUP_TOKEN_MAX) ?
+ SPX5_SDLB_PUP_TOKEN_MAX :
+ min_burst;
+ group->pup_interval =
+ sparx5_sdlb_pup_interval_get(sparx5, max_token, max_rate);
+
+ group->frame_size = frame_size;
+
+ spx5_wr(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(group->pup_interval),
+ sparx5, ANA_AC_SDLB_PUP_INTERVAL(idx));
+
+ spx5_wr(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(frame_size),
+ sparx5, ANA_AC_SDLB_FRM_RATE_TOKENS(idx));
+
+ spx5_wr(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(thres_shift), sparx5,
+ ANA_AC_SDLB_LBGRP_MISC(idx));
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
new file mode 100644
index 0000000000..4af85d108a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static struct workqueue_struct *sparx5_owq;
+
+struct sparx5_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ struct sparx5 *sparx5;
+ unsigned long event;
+};
+
+static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
+{
+ bool should_flood = flood_flag || port->is_mrouter;
+ int pgid;
+
+ for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++)
+ sparx5_pgid_update_mask(port, pgid, should_flood);
+}
+
+static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_MCAST_FLOOD) {
+ sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD));
+ sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
+ }
+
+ if (flags.mask & BR_FLOOD)
+ sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
+ if (flags.mask & BR_BCAST_FLOOD)
+ sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
+}
+
+static void sparx5_attr_stp_state_set(struct sparx5_port *port,
+ u8 state)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (!test_bit(port->portno, sparx5->bridge_mask)) {
+ netdev_err(port->ndev,
+ "Controlling non-bridged port %d?\n", port->portno);
+ return;
+ }
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ set_bit(port->portno, sparx5->bridge_fwd_mask);
+ fallthrough;
+ case BR_STATE_LEARNING:
+ set_bit(port->portno, sparx5->bridge_lrn_mask);
+ break;
+
+ default:
+ /* All other states treated as blocking */
+ clear_bit(port->portno, sparx5->bridge_fwd_mask);
+ clear_bit(port->portno, sparx5->bridge_lrn_mask);
+ break;
+ }
+
+ /* apply the bridge_fwd_mask to all the ports */
+ sparx5_update_fwd(sparx5);
+}
+
+static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
+
+ sparx5_set_ageing(port->sparx5, ageing_time);
+}
+
+static void sparx5_port_attr_mrouter_set(struct sparx5_port *port,
+ struct net_device *orig_dev,
+ bool enable)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_mdb_entry *e;
+ bool flood_flag;
+
+ if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter))
+ return;
+
+ /* Add/del mrouter port on all active mdb entries in HW.
+ * Don't change entry port mask, since that represents
+ * ports that actually joined that group.
+ */
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry(e, &sparx5->mdb_entries, list) {
+ if (!test_bit(port->portno, e->port_mask) &&
+ ether_addr_is_ip_mcast(e->addr))
+ sparx5_pgid_update_mask(port, e->pgid_idx, enable);
+ }
+ mutex_unlock(&sparx5->mdb_lock);
+
+ /* Enable/disable flooding depending on if port is mrouter port
+ * or if mcast flood is enabled.
+ */
+ port->is_mrouter = enable;
+ flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD);
+ sparx5_port_update_mcast_ip_flood(port, flood_flag);
+}
+
+static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ return sparx5_port_attr_pre_bridge_flags(port,
+ attr->u.brport_flags);
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ sparx5_attr_stp_state_set(port, attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ /* Used PVID 1 when default_pvid is 0, to avoid
+ * collision with non-bridged ports.
+ */
+ if (port->pvid == 0)
+ port->pvid = 1;
+ port->vlan_aware = attr->u.vlan_filtering;
+ sparx5_vlan_port_apply(port->sparx5, port);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ sparx5_port_attr_mrouter_set(port,
+ attr->orig_dev,
+ attr->u.mrouter);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int sparx5_port_bridge_join(struct sparx5_port *port,
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct net_device *ndev = port->ndev;
+ int err;
+
+ if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
+ /* First bridged port */
+ sparx5->hw_bridge_dev = bridge;
+ else
+ if (sparx5->hw_bridge_dev != bridge)
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ return -ENODEV;
+
+ set_bit(port->portno, sparx5->bridge_mask);
+
+ err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
+ false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
+ /* Remove standalone port entry */
+ sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
+
+ /* Port enters in bridge mode therefor don't need to copy to CPU
+ * frames for multicast in case the bridge is not requesting them
+ */
+ __dev_mc_unsync(ndev, sparx5_mc_unsync);
+
+ return 0;
+
+err_switchdev_offload:
+ clear_bit(port->portno, sparx5->bridge_mask);
+ return err;
+}
+
+static void sparx5_port_bridge_leave(struct sparx5_port *port,
+ struct net_device *bridge)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+
+ switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
+
+ clear_bit(port->portno, sparx5->bridge_mask);
+ if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
+ sparx5->hw_bridge_dev = NULL;
+
+ /* Clear bridge vlan settings before updating the port settings */
+ port->vlan_aware = 0;
+ port->pvid = NULL_VID;
+ port->vid = NULL_VID;
+
+ /* Forward frames to CPU */
+ sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
+
+ /* Port enters in host more therefore restore mc list */
+ __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
+}
+
+static int sparx5_port_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ err = sparx5_port_bridge_join(port, info->upper_dev,
+ extack);
+ else
+ sparx5_port_bridge_leave(port, info->upper_dev);
+
+ sparx5_vlan_port_apply(port->sparx5, port);
+ }
+
+ return err;
+}
+
+static int sparx5_port_add_addr(struct net_device *dev, bool up)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+ u16 vid = port->pvid;
+
+ if (up)
+ sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
+ else
+ sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
+
+ return 0;
+}
+
+static int sparx5_netdevice_port_event(struct net_device *dev,
+ struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ if (!sparx5_netdevice_check(dev))
+ return 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ err = sparx5_port_changeupper(dev, ptr);
+ break;
+ case NETDEV_PRE_UP:
+ err = sparx5_port_add_addr(dev, true);
+ break;
+ case NETDEV_DOWN:
+ err = sparx5_port_add_addr(dev, false);
+ break;
+ }
+
+ return err;
+}
+
+static int sparx5_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret = 0;
+
+ ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
+
+ return notifier_from_errno(ret);
+}
+
+static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
+{
+ struct sparx5_switchdev_event_work *switchdev_work =
+ container_of(work, struct sparx5_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct sparx5_port *port;
+ struct sparx5 *sparx5;
+ bool host_addr;
+ u16 vid;
+
+ rtnl_lock();
+ if (!sparx5_netdevice_check(dev)) {
+ host_addr = true;
+ sparx5 = switchdev_work->sparx5;
+ } else {
+ host_addr = false;
+ sparx5 = switchdev_work->sparx5;
+ port = netdev_priv(dev);
+ }
+
+ fdb_info = &switchdev_work->fdb_info;
+
+ /* Used PVID 1 when default_pvid is 0, to avoid
+ * collision with non-bridged ports.
+ */
+ if (fdb_info->vid == 0)
+ vid = 1;
+ else
+ vid = fdb_info->vid;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (host_addr)
+ sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
+ fdb_info->addr, vid);
+ else
+ sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
+ fdb_info->addr, vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
+ break;
+ }
+
+ rtnl_unlock();
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+static void sparx5_schedule_work(struct work_struct *work)
+{
+ queue_work(sparx5_owq, work);
+}
+
+static int sparx5_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct sparx5_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct sparx5 *spx5;
+ int err;
+
+ spx5 = container_of(nb, struct sparx5, switchdev_nb);
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ sparx5_netdevice_check,
+ sparx5_port_attr_set);
+ return notifier_from_errno(err);
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fallthrough;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+ switchdev_work->sparx5 = spx5;
+
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ INIT_WORK(&switchdev_work->work,
+ sparx5_switchdev_bridge_fdb_event_work);
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(dev);
+
+ sparx5_schedule_work(&switchdev_work->work);
+ break;
+ }
+
+ return NOTIFY_DONE;
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int sparx5_handle_port_vlan_add(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_vlan *v)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ if (netif_is_bridge_master(dev)) {
+ struct sparx5 *sparx5 =
+ container_of(nb, struct sparx5,
+ switchdev_blocking_nb);
+
+ /* Flood broadcast to CPU */
+ sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
+ v->vid);
+ return 0;
+ }
+
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
+ return sparx5_vlan_vid_add(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+}
+
+static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid,
+ struct sparx5_mdb_entry **entry_out)
+{
+ struct sparx5_mdb_entry *entry;
+ u16 pgid_idx;
+ int err;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx);
+ if (err) {
+ kfree(entry);
+ return err;
+ }
+
+ memcpy(entry->addr, addr, ETH_ALEN);
+ entry->vid = vid;
+ entry->pgid_idx = pgid_idx;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_add_tail(&entry->list, &sparx5->mdb_entries);
+ mutex_unlock(&sparx5->mdb_lock);
+
+ *entry_out = entry;
+ return 0;
+}
+
+static void sparx5_free_mdb_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct sparx5_mdb_entry *entry, *tmp;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) {
+ if ((vid == 0 || entry->vid == vid) &&
+ ether_addr_equal(addr, entry->addr)) {
+ list_del(&entry->list);
+
+ sparx5_pgid_free(sparx5, entry->pgid_idx);
+ kfree(entry);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&sparx5->mdb_lock);
+}
+
+static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct sparx5_mdb_entry *e, *found = NULL;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry(e, &sparx5->mdb_entries, list) {
+ if (ether_addr_equal(e->addr, addr) && e->vid == vid) {
+ found = e;
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&sparx5->mdb_lock);
+ return found;
+}
+
+static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable)
+{
+ spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable),
+ ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
+ ANA_AC_PGID_MISC_CFG(pgid));
+}
+
+static int sparx5_handle_port_mdb_add(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_mdb *v)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *spx5 = port->sparx5;
+ struct sparx5_mdb_entry *entry;
+ bool is_host, is_new;
+ int err, i;
+ u16 vid;
+
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
+ is_host = netif_is_bridge_master(v->obj.orig_dev);
+
+ /* When VLAN unaware the vlan value is not parsed and we receive vid 0.
+ * Fall back to bridge vid 1.
+ */
+ if (!br_vlan_enabled(spx5->hw_bridge_dev))
+ vid = 1;
+ else
+ vid = v->vid;
+
+ is_new = false;
+ entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
+ if (!entry) {
+ err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry);
+ is_new = true;
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&spx5->mdb_lock);
+
+ /* Add any mrouter ports to the new entry */
+ if (is_new && ether_addr_is_ip_mcast(v->addr))
+ for (i = 0; i < SPX5_PORTS; i++)
+ if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
+ sparx5_pgid_update_mask(spx5->ports[i],
+ entry->pgid_idx,
+ true);
+
+ if (is_host && !entry->cpu_copy) {
+ sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true);
+ entry->cpu_copy = true;
+ } else if (!is_host) {
+ sparx5_pgid_update_mask(port, entry->pgid_idx, true);
+ set_bit(port->portno, entry->port_mask);
+ }
+ mutex_unlock(&spx5->mdb_lock);
+
+ sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid);
+
+ return 0;
+}
+
+static int sparx5_handle_port_mdb_del(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_mdb *v)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *spx5 = port->sparx5;
+ struct sparx5_mdb_entry *entry;
+ bool is_host;
+ u16 vid;
+
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
+ is_host = netif_is_bridge_master(v->obj.orig_dev);
+
+ if (!br_vlan_enabled(spx5->hw_bridge_dev))
+ vid = 1;
+ else
+ vid = v->vid;
+
+ entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
+ if (!entry)
+ return 0;
+
+ mutex_lock(&spx5->mdb_lock);
+ if (is_host && entry->cpu_copy) {
+ sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false);
+ entry->cpu_copy = false;
+ } else if (!is_host) {
+ clear_bit(port->portno, entry->port_mask);
+
+ /* Port not mrouter port or addr is L2 mcast, remove port from mask. */
+ if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr))
+ sparx5_pgid_update_mask(port, entry->pgid_idx, false);
+ }
+ mutex_unlock(&spx5->mdb_lock);
+
+ if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) {
+ /* Clear pgid in case mrouter ports exists
+ * that are not part of the group.
+ */
+ sparx5_pgid_clear(spx5, entry->pgid_idx);
+ sparx5_mact_forget(spx5, entry->addr, entry->vid);
+ sparx5_free_mdb_entry(spx5, entry->addr, entry->vid);
+ }
+ return 0;
+}
+
+static int sparx5_handle_port_obj_add(struct net_device *dev,
+ struct notifier_block *nb,
+ struct switchdev_notifier_port_obj_info *info)
+{
+ const struct switchdev_obj *obj = info->obj;
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = sparx5_handle_port_vlan_add(dev, nb,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = sparx5_handle_port_mdb_add(dev, nb,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ info->handled = true;
+ return err;
+}
+
+static int sparx5_handle_port_vlan_del(struct net_device *dev,
+ struct notifier_block *nb,
+ u16 vid)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ int ret;
+
+ /* Master bridge? */
+ if (netif_is_bridge_master(dev)) {
+ struct sparx5 *sparx5 =
+ container_of(nb, struct sparx5,
+ switchdev_blocking_nb);
+
+ sparx5_mact_forget(sparx5, dev->broadcast, vid);
+ return 0;
+ }
+
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
+ ret = sparx5_vlan_vid_del(port, vid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int sparx5_handle_port_obj_del(struct net_device *dev,
+ struct notifier_block *nb,
+ struct switchdev_notifier_port_obj_info *info)
+{
+ const struct switchdev_obj *obj = info->obj;
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = sparx5_handle_port_vlan_del(dev, nb,
+ SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = sparx5_handle_port_mdb_del(dev, nb,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ info->handled = true;
+ return err;
+}
+
+static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = sparx5_handle_port_obj_add(dev, nb, ptr);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = sparx5_handle_port_obj_del(dev, nb, ptr);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ sparx5_netdevice_check,
+ sparx5_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+int sparx5_register_notifier_blocks(struct sparx5 *s5)
+{
+ int err;
+
+ s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
+ err = register_netdevice_notifier(&s5->netdevice_nb);
+ if (err)
+ return err;
+
+ s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
+ err = register_switchdev_notifier(&s5->switchdev_nb);
+ if (err)
+ goto err_switchdev_nb;
+
+ s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
+ err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
+ if (err)
+ goto err_switchdev_blocking_nb;
+
+ sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
+ if (!sparx5_owq) {
+ err = -ENOMEM;
+ goto err_switchdev_blocking_nb;
+ }
+
+ return 0;
+
+err_switchdev_blocking_nb:
+ unregister_switchdev_notifier(&s5->switchdev_nb);
+err_switchdev_nb:
+ unregister_netdevice_notifier(&s5->netdevice_nb);
+
+ return err;
+}
+
+void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
+{
+ destroy_workqueue(sparx5_owq);
+
+ unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
+ unregister_switchdev_notifier(&s5->switchdev_nb);
+ unregister_netdevice_notifier(&s5->netdevice_nb);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
new file mode 100644
index 0000000000..e80f3166db
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+#include "sparx5_tc.h"
+#include "sparx5_main.h"
+#include "sparx5_qos.h"
+
+/* tc block handling */
+static LIST_HEAD(sparx5_block_cb_list);
+
+static int sparx5_tc_block_cb(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct net_device *ndev = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return sparx5_tc_matchall(ndev, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return sparx5_tc_flower(ndev, type_data, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sparx5_tc_block_cb_ingress(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return sparx5_tc_block_cb(type, type_data, cb_priv, true);
+}
+
+static int sparx5_tc_block_cb_egress(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return sparx5_tc_block_cb(type, type_data, cb_priv, false);
+}
+
+static int sparx5_tc_setup_block(struct net_device *ndev,
+ struct flow_block_offload *fbo)
+{
+ flow_setup_cb_t *cb;
+
+ if (fbo->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ cb = sparx5_tc_block_cb_ingress;
+ else if (fbo->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ cb = sparx5_tc_block_cb_egress;
+ else
+ return -EOPNOTSUPP;
+
+ return flow_block_cb_setup_simple(fbo, &sparx5_block_cb_list,
+ cb, ndev, ndev, false);
+}
+
+static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer,
+ u32 *idx)
+{
+ if (parent == TC_H_ROOT) {
+ *layer = 2;
+ *idx = portno;
+ } else {
+ u32 queue = TC_H_MIN(parent) - 1;
+ *layer = 0;
+ *idx = SPX5_HSCH_L0_GET_IDX(portno, queue);
+ }
+}
+
+static int sparx5_tc_setup_qdisc_mqprio(struct net_device *ndev,
+ struct tc_mqprio_qopt_offload *m)
+{
+ m->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ if (m->qopt.num_tc == 0)
+ return sparx5_tc_mqprio_del(ndev);
+ else
+ return sparx5_tc_mqprio_add(ndev, m->qopt.num_tc);
+}
+
+static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 layer, se_idx;
+
+ sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer,
+ &se_idx);
+
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ return sparx5_tc_tbf_add(port, &qopt->replace_params, layer,
+ se_idx);
+ case TC_TBF_DESTROY:
+ return sparx5_tc_tbf_del(port, layer, se_idx);
+ case TC_TBF_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params =
+ &qopt->replace_params;
+ struct sparx5_port *port = netdev_priv(ndev);
+ int i;
+
+ /* Only allow ets on ports */
+ if (qopt->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+
+ /* We support eight priorities */
+ if (params->bands != SPX5_PRIOS)
+ return -EOPNOTSUPP;
+
+ /* Sanity checks */
+ for (i = 0; i < SPX5_PRIOS; ++i) {
+ /* Priority map is *always* reverse e.g: 7 6 5 .. 0 */
+ if (params->priomap[i] != (7 - i))
+ return -EOPNOTSUPP;
+ /* Throw an error if we receive zero weights by tc */
+ if (params->quanta[i] && params->weights[i] == 0) {
+ pr_err("Invalid ets configuration; band %d has weight zero",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ return sparx5_tc_ets_add(port, params);
+ case TC_ETS_DESTROY:
+
+ return sparx5_tc_ets_del(port);
+ case TC_ETS_GRAFT:
+ return -EOPNOTSUPP;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return sparx5_tc_setup_block(ndev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return sparx5_tc_setup_qdisc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return sparx5_tc_setup_qdisc_tbf(ndev, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return sparx5_tc_setup_qdisc_ets(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
new file mode 100644
index 0000000000..7ef470b285
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_TC_H__
+#define __SPARX5_TC_H__
+
+#include <net/flow_offload.h>
+#include <net/pkt_cls.h>
+#include <linux/netdevice.h>
+
+/* Controls how PORT_MASK is applied */
+enum SPX5_PORT_MASK_MODE {
+ SPX5_PMM_OR_DSTMASK,
+ SPX5_PMM_AND_VLANMASK,
+ SPX5_PMM_REPLACE_PGID,
+ SPX5_PMM_REPLACE_ALL,
+ SPX5_PMM_REDIR_PGID,
+ SPX5_PMM_OR_PGID_MASK,
+};
+
+/* Controls ES0 forwarding */
+enum SPX5_FORWARDING_SEL {
+ SPX5_FWSEL_NO_ACTION,
+ SPX5_FWSEL_COPY_TO_LOOPBACK,
+ SPX5_FWSEL_REDIRECT_TO_LOOPBACK,
+ SPX5_FWSEL_DISCARD,
+};
+
+/* Controls tag A (outer tagging) */
+enum SPX5_OUTER_TAG_SEL {
+ SPX5_OTAG_PORT,
+ SPX5_OTAG_TAG_A,
+ SPX5_OTAG_FORCED_PORT,
+ SPX5_OTAG_UNTAG,
+};
+
+/* Selects TPID for ES0 tag A */
+enum SPX5_TPID_A_SEL {
+ SPX5_TPID_A_8100,
+ SPX5_TPID_A_88A8,
+ SPX5_TPID_A_CUST1,
+ SPX5_TPID_A_CUST2,
+ SPX5_TPID_A_CUST3,
+ SPX5_TPID_A_CLASSIFIED,
+};
+
+/* Selects VID for ES0 tag A */
+enum SPX5_VID_A_SEL {
+ SPX5_VID_A_CLASSIFIED,
+ SPX5_VID_A_VAL,
+ SPX5_VID_A_IFH,
+ SPX5_VID_A_RESERVED,
+};
+
+/* Select PCP source for ES0 tag A */
+enum SPX5_PCP_A_SEL {
+ SPX5_PCP_A_CLASSIFIED,
+ SPX5_PCP_A_VAL,
+ SPX5_PCP_A_RESERVED,
+ SPX5_PCP_A_POPPED,
+ SPX5_PCP_A_MAPPED_0,
+ SPX5_PCP_A_MAPPED_1,
+ SPX5_PCP_A_MAPPED_2,
+ SPX5_PCP_A_MAPPED_3,
+};
+
+/* Select DEI source for ES0 tag A */
+enum SPX5_DEI_A_SEL {
+ SPX5_DEI_A_CLASSIFIED,
+ SPX5_DEI_A_VAL,
+ SPX5_DEI_A_REW,
+ SPX5_DEI_A_POPPED,
+ SPX5_DEI_A_MAPPED_0,
+ SPX5_DEI_A_MAPPED_1,
+ SPX5_DEI_A_MAPPED_2,
+ SPX5_DEI_A_MAPPED_3,
+};
+
+/* Controls tag B (inner tagging) */
+enum SPX5_INNER_TAG_SEL {
+ SPX5_ITAG_NO_PUSH,
+ SPX5_ITAG_PUSH_B_TAG,
+};
+
+/* Selects TPID for ES0 tag B. */
+enum SPX5_TPID_B_SEL {
+ SPX5_TPID_B_8100,
+ SPX5_TPID_B_88A8,
+ SPX5_TPID_B_CUST1,
+ SPX5_TPID_B_CUST2,
+ SPX5_TPID_B_CUST3,
+ SPX5_TPID_B_CLASSIFIED,
+};
+
+int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+
+int sparx5_tc_matchall(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress);
+
+int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
+ bool ingress);
+
+#endif /* __SPARX5_TC_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
new file mode 100644
index 0000000000..523e0c4708
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -0,0 +1,1483 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP API
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/tc_act/tc_gate.h>
+#include <net/tcp.h>
+
+#include "sparx5_tc.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "vcap_tc.h"
+#include "sparx5_main.h"
+#include "sparx5_vcap_impl.h"
+
+#define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
+
+/* Collect keysets and type ids for multiple rules per size */
+struct sparx5_wildcard_rule {
+ bool selected;
+ u8 value;
+ u8 mask;
+ enum vcap_keyfield_set keyset;
+};
+
+struct sparx5_multiple_rules {
+ struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
+};
+
+struct sparx5_tc_flower_template {
+ struct list_head list; /* for insertion in the list of templates */
+ int cid; /* chain id */
+ enum vcap_keyfield_set orig; /* keyset used before the template */
+ enum vcap_keyfield_set keyset; /* new keyset used by template */
+ u16 l3_proto; /* protocol specified in the template */
+};
+
+static int
+sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ switch (st->tpid) {
+ case ETH_P_8021Q:
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_8021Q_TPID,
+ SPX5_TPID_SEL_8100, ~0);
+ break;
+ case ETH_P_8021AD:
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_8021Q_TPID,
+ SPX5_TPID_SEL_88A8, ~0);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_basic mt;
+ int err = 0;
+
+ flow_rule_match_basic(st->frule, &mt);
+
+ if (mt.mask->n_proto) {
+ st->l3_proto = be16_to_cpu(mt.key->n_proto);
+ if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IP) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IPV6) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+ }
+ }
+
+ if (mt.mask->ip_proto) {
+ st->l4_proto = mt.key->ip_proto;
+ if (st->l4_proto == IPPROTO_TCP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l4_proto == IPPROTO_UDP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+ } else {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP_PROTO,
+ st->l4_proto, ~0);
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_control mt;
+ u32 value, mask;
+ int err = 0;
+
+ flow_rule_match_control(st->frule, &mt);
+
+ if (mt.mask->flags) {
+ if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
+ value = 1; /* initial fragment */
+ mask = 0x3;
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
+
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ value, mask);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ if (st->admin->vtype != VCAP_TYPE_IS0) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "cvlan not supported in this VCAP");
+ return -EINVAL;
+ }
+
+ return vcap_tc_flower_handler_cvlan_usage(st);
+}
+
+static int
+sparx5_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
+ int err;
+
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ vid_key = VCAP_KF_8021Q_VID0;
+ pcp_key = VCAP_KF_8021Q_PCP0;
+ }
+
+ err = vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key);
+ if (err)
+ return err;
+
+ if (st->admin->vtype == VCAP_TYPE_ES0 && st->tpid)
+ err = sparx5_tc_flower_es0_tpid(st);
+
+ return err;
+}
+
+static int (*sparx5_tc_flower_usage_handlers[])(struct vcap_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
+ [FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage,
+ [FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
+};
+
+static int sparx5_tc_use_dissectors(struct vcap_tc_flower_parse_usage *st,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ int idx, err = 0;
+
+ for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
+ if (!flow_rule_match_key(st->frule, idx))
+ continue;
+ if (!sparx5_tc_flower_usage_handlers[idx])
+ continue;
+ err = sparx5_tc_flower_usage_handlers[idx](st);
+ if (err)
+ return err;
+ }
+
+ if (st->frule->match.dissector->used_keys ^ st->used_keys) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "Unsupported match item");
+ return -ENOENT;
+ }
+
+ return err;
+}
+
+static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
+ struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ bool ingress)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
+ struct flow_action_entry *actent, *last_actent = NULL;
+ struct flow_action *act = &rule->action;
+ u64 action_mask = 0;
+ int idx;
+
+ if (!flow_action_has_entries(act)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
+ return -EINVAL;
+ }
+
+ if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
+ return -EOPNOTSUPP;
+
+ flow_action_for_each(idx, actent, act) {
+ if (action_mask & BIT(actent->id)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "More actions of the same type");
+ return -EINVAL;
+ }
+ action_mask |= BIT(actent->id);
+ last_actent = actent; /* Save last action for later check */
+ }
+
+ /* Check if last action is a goto
+ * The last chain/lookup does not need to have a goto action
+ */
+ if (last_actent->id == FLOW_ACTION_GOTO) {
+ /* Check if the destination chain is in one of the VCAPs */
+ if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
+ last_actent->chain_index)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid goto chain");
+ return -EINVAL;
+ }
+ } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
+ ingress)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Last action must be 'goto'");
+ return -EINVAL;
+ }
+
+ /* Catch unsupported combinations of actions */
+ if (action_mask & BIT(FLOW_ACTION_TRAP) &&
+ action_mask & BIT(FLOW_ACTION_ACCEPT)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine pass and trap action");
+ return -EOPNOTSUPP;
+ }
+
+ if (action_mask & BIT(FLOW_ACTION_VLAN_PUSH) &&
+ action_mask & BIT(FLOW_ACTION_VLAN_POP)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine vlan push and pop action");
+ return -EOPNOTSUPP;
+ }
+
+ if (action_mask & BIT(FLOW_ACTION_VLAN_PUSH) &&
+ action_mask & BIT(FLOW_ACTION_VLAN_MANGLE)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine vlan push and modify action");
+ return -EOPNOTSUPP;
+ }
+
+ if (action_mask & BIT(FLOW_ACTION_VLAN_POP) &&
+ action_mask & BIT(FLOW_ACTION_VLAN_MANGLE)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine vlan pop and modify action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* Add a rule counter action */
+static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ int err;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ break;
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_mod_action_u32(vrule, VCAP_AF_ESDX,
+ vrule->id);
+ if (err)
+ return err;
+ vcap_rule_set_counter_id(vrule, vrule->id);
+ break;
+ case VCAP_TYPE_IS2:
+ case VCAP_TYPE_ES2:
+ err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID,
+ vrule->id);
+ if (err)
+ return err;
+ vcap_rule_set_counter_id(vrule, vrule->id);
+ break;
+ default:
+ pr_err("%s:%d: vcap type: %d not supported\n",
+ __func__, __LINE__, admin->vtype);
+ break;
+ }
+ return 0;
+}
+
+/* Collect all port keysets and apply the first of them, possibly wildcarded */
+static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
+ struct vcap_rule *vrule,
+ struct vcap_admin *admin,
+ u16 l3_proto,
+ struct sparx5_multiple_rules *multi)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_keyset_list portkeysetlist = {};
+ enum vcap_keyfield_set portkeysets[10] = {};
+ struct vcap_keyset_list matches = {};
+ enum vcap_keyfield_set keysets[10];
+ int idx, jdx, err = 0, count = 0;
+ struct sparx5_wildcard_rule *mru;
+ const struct vcap_set *kinfo;
+ struct vcap_control *vctrl;
+
+ vctrl = port->sparx5->vcap_ctrl;
+
+ /* Find the keysets that the rule can use */
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+ if (!vcap_rule_find_keysets(vrule, &matches))
+ return -EINVAL;
+
+ /* Find the keysets that the port configuration supports */
+ portkeysetlist.max = ARRAY_SIZE(portkeysets);
+ portkeysetlist.keysets = portkeysets;
+ err = sparx5_vcap_get_port_keyset(ndev,
+ admin, vrule->vcap_chain_id,
+ l3_proto,
+ &portkeysetlist);
+ if (err)
+ return err;
+
+ /* Find the intersection of the two sets of keyset */
+ for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
+ kinfo = vcap_keyfieldset(vctrl, admin->vtype,
+ portkeysetlist.keysets[idx]);
+ if (!kinfo)
+ continue;
+
+ /* Find a port keyset that matches the required keys
+ * If there are multiple keysets then compose a type id mask
+ */
+ for (jdx = 0; jdx < matches.cnt; ++jdx) {
+ if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
+ continue;
+
+ mru = &multi->rule[kinfo->sw_per_item];
+ if (!mru->selected) {
+ mru->selected = true;
+ mru->keyset = portkeysetlist.keysets[idx];
+ mru->value = kinfo->type_id;
+ }
+ mru->value &= kinfo->type_id;
+ mru->mask |= kinfo->type_id;
+ ++count;
+ }
+ }
+ if (count == 0)
+ return -EPROTO;
+
+ if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
+ return -ENOENT;
+
+ for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
+ mru = &multi->rule[idx];
+ if (!mru->selected)
+ continue;
+
+ /* Align the mask to the combined value */
+ mru->mask ^= mru->value;
+ }
+
+ /* Set the chosen keyset on the rule and set a wildcarded type if there
+ * are more than one keyset
+ */
+ for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
+ mru = &multi->rule[idx];
+ if (!mru->selected)
+ continue;
+
+ vcap_set_rule_set_keyset(vrule, mru->keyset);
+ if (count > 1)
+ /* Some keysets do not have a type field */
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
+ mru->value,
+ ~mru->mask);
+ mru->selected = false; /* mark as done */
+ break; /* Stop here and add more rules later */
+ }
+ return err;
+}
+
+static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
+ struct flow_cls_offload *fco,
+ struct vcap_rule *erule,
+ struct vcap_admin *admin,
+ struct sparx5_wildcard_rule *rule)
+{
+ enum vcap_key_field keylist[] = {
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_TYPE,
+ };
+ struct vcap_rule *vrule;
+ int err;
+
+ /* Add an extra rule with a special user and the new keyset */
+ erule->user = VCAP_USER_TC_EXTRA;
+ vrule = vcap_copy_rule(erule);
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ /* Link the new rule to the existing rule with the cookie */
+ vrule->cookie = erule->cookie;
+ vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
+ err = vcap_set_rule_set_keyset(vrule, rule->keyset);
+ if (err) {
+ pr_err("%s:%d: could not set keyset %s in rule: %u\n",
+ __func__, __LINE__,
+ vcap_keyset_name(vctrl, rule->keyset),
+ vrule->id);
+ goto out;
+ }
+
+ /* Some keysets do not have a type field, so ignore return value */
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
+
+ err = vcap_set_rule_set_actionset(vrule, erule->actionset);
+ if (err)
+ goto out;
+
+ err = sparx5_tc_add_rule_counter(admin, vrule);
+ if (err)
+ goto out;
+
+ err = vcap_val_rule(vrule, ETH_P_ALL);
+ if (err) {
+ pr_err("%s:%d: could not validate rule: %u\n",
+ __func__, __LINE__, vrule->id);
+ vcap_set_tc_exterr(fco, vrule);
+ goto out;
+ }
+ err = vcap_add_rule(vrule);
+ if (err) {
+ pr_err("%s:%d: could not add rule: %u\n",
+ __func__, __LINE__, vrule->id);
+ goto out;
+ }
+out:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
+ struct flow_cls_offload *fco,
+ struct vcap_rule *erule,
+ struct vcap_admin *admin,
+ struct sparx5_multiple_rules *multi)
+{
+ int idx, err = 0;
+
+ for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
+ if (!multi->rule[idx].selected)
+ continue;
+
+ err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
+ &multi->rule[idx]);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/* Add the actionset that is the default for the VCAP type */
+static int sparx5_tc_set_actionset(struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ enum vcap_actionfield_set aset;
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ aset = VCAP_AFS_CLASSIFICATION;
+ break;
+ case VCAP_TYPE_IS2:
+ aset = VCAP_AFS_BASE_TYPE;
+ break;
+ case VCAP_TYPE_ES0:
+ aset = VCAP_AFS_ES0;
+ break;
+ case VCAP_TYPE_ES2:
+ aset = VCAP_AFS_BASE_TYPE;
+ break;
+ default:
+ pr_err("%s:%d: %s\n", __func__, __LINE__, "Invalid VCAP type");
+ return -EINVAL;
+ }
+ /* Do not overwrite any current actionset */
+ if (vrule->actionset == VCAP_AFS_NO_VALUE)
+ err = vcap_set_rule_set_actionset(vrule, aset);
+ return err;
+}
+
+/* Add the VCAP key to match on for a rule target value */
+static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ int target_cid)
+{
+ int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
+ int err;
+
+ if (!link_val)
+ return 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ /* Add NXT_IDX key for chaining rules between IS0 instances */
+ err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ 1, /* enable */
+ ~0);
+ if (err)
+ return err;
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
+ link_val, /* target */
+ ~0);
+ case VCAP_TYPE_IS2:
+ /* Add PAG key for chaining rules from IS0 */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
+ link_val, /* target */
+ ~0);
+ case VCAP_TYPE_ES0:
+ case VCAP_TYPE_ES2:
+ /* Add ISDX key for chaining rules from IS0 */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS, link_val,
+ ~0);
+ default:
+ break;
+ }
+ return 0;
+}
+
+/* Add the VCAP action that adds a target value to a rule */
+static int sparx5_tc_add_rule_link(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ int from_cid, int to_cid)
+{
+ struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
+ int diff, err = 0;
+
+ if (!to_admin) {
+ pr_err("%s:%d: unsupported chain direction: %d\n",
+ __func__, __LINE__, to_cid);
+ return -EINVAL;
+ }
+
+ diff = vcap_chain_offset(vctrl, from_cid, to_cid);
+ if (!diff)
+ return 0;
+
+ if (admin->vtype == VCAP_TYPE_IS0 &&
+ to_admin->vtype == VCAP_TYPE_IS0) {
+ /* Between IS0 instances the G_IDX value is used */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX, diff);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX_CTRL,
+ 1); /* Replace */
+ if (err)
+ goto out;
+ } else if (admin->vtype == VCAP_TYPE_IS0 &&
+ to_admin->vtype == VCAP_TYPE_IS2) {
+ /* Between IS0 and IS2 the PAG value is used */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PAG_OVERRIDE_MASK,
+ 0xff);
+ if (err)
+ goto out;
+ } else if (admin->vtype == VCAP_TYPE_IS0 &&
+ (to_admin->vtype == VCAP_TYPE_ES0 ||
+ to_admin->vtype == VCAP_TYPE_ES2)) {
+ /* Between IS0 and ES0/ES2 the ISDX value is used */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL,
+ diff);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else {
+ pr_err("%s:%d: unsupported chain destination: %d\n",
+ __func__, __LINE__, to_cid);
+ err = -EOPNOTSUPP;
+ }
+out:
+ return err;
+}
+
+static int sparx5_tc_flower_parse_act_gate(struct sparx5_psfp_sg *sg,
+ struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ if (act->gate.prio < -1 || act->gate.prio > SPX5_PSFP_SG_MAX_IPV) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate priority");
+ return -EINVAL;
+ }
+
+ if (act->gate.cycletime < SPX5_PSFP_SG_MIN_CYCLE_TIME_NS ||
+ act->gate.cycletime > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletime");
+ return -EINVAL;
+ }
+
+ if (act->gate.cycletimeext > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletimeext");
+ return -EINVAL;
+ }
+
+ if (act->gate.num_entries >= SPX5_PSFP_GCE_CNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid number of gate entries");
+ return -EINVAL;
+ }
+
+ sg->gate_state = true;
+ sg->ipv = act->gate.prio;
+ sg->num_entries = act->gate.num_entries;
+ sg->cycletime = act->gate.cycletime;
+ sg->cycletimeext = act->gate.cycletimeext;
+
+ for (i = 0; i < sg->num_entries; i++) {
+ sg->gce[i].gate_state = !!act->gate.entries[i].gate_state;
+ sg->gce[i].interval = act->gate.entries[i].interval;
+ sg->gce[i].ipv = act->gate.entries[i].ipv;
+ sg->gce[i].maxoctets = act->gate.entries[i].maxoctets;
+ }
+
+ return 0;
+}
+
+static int sparx5_tc_flower_parse_act_police(struct sparx5_policer *pol,
+ struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ pol->type = SPX5_POL_SERVICE;
+ pol->rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
+ pol->burst = act->police.burst;
+ pol->idx = act->hw_index;
+
+ /* rate is now in kbit */
+ if (pol->rate > DIV_ROUND_UP(SPX5_SDLB_GROUP_RATE_MAX, 1000)) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum rate exceeded");
+ return -EINVAL;
+ }
+
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5,
+ struct vcap_rule *vrule, int sg_idx,
+ int pol_idx, struct sparx5_psfp_sg *sg,
+ struct sparx5_psfp_fm *fm,
+ struct sparx5_psfp_sf *sf)
+{
+ u32 psfp_sfid = 0, psfp_fmid = 0, psfp_sgid = 0;
+ int ret;
+
+ /* Must always have a stream gate - max sdu (filter option) is evaluated
+ * after frames have passed the gate, so in case of only a policer, we
+ * allocate a stream gate that is always open.
+ */
+ if (sg_idx < 0) {
+ sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN);
+ sg->ipv = 0; /* Disabled */
+ sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
+ sg->num_entries = 1;
+ sg->gate_state = 1; /* Open */
+ sg->gate_enabled = 1;
+ sg->gce[0].gate_state = 1;
+ sg->gce[0].interval = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
+ sg->gce[0].ipv = 0;
+ sg->gce[0].maxoctets = 0; /* Disabled */
+ }
+
+ ret = sparx5_psfp_sg_add(sparx5, sg_idx, sg, &psfp_sgid);
+ if (ret < 0)
+ return ret;
+
+ if (pol_idx >= 0) {
+ /* Add new flow-meter */
+ ret = sparx5_psfp_fm_add(sparx5, pol_idx, fm, &psfp_fmid);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Map stream filter to stream gate */
+ sf->sgid = psfp_sgid;
+
+ /* Add new stream-filter and map it to a steam gate */
+ ret = sparx5_psfp_sf_add(sparx5, sf, &psfp_sfid);
+ if (ret < 0)
+ return ret;
+
+ /* Streams are classified by ISDX - map ISDX 1:1 to sfid for now. */
+ sparx5_isdx_conf_set(sparx5, psfp_sfid, psfp_sfid, psfp_fmid);
+
+ ret = vcap_rule_add_action_bit(vrule, VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_BIT_1);
+ if (ret)
+ return ret;
+
+ ret = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL, psfp_sfid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Handle the action trap for a VCAP rule */
+static int sparx5_tc_action_trap(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS2:
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_BIT_1);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_CPU_QUEUE_NUM, 0);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_MASK_MODE,
+ SPX5_PMM_REPLACE_ALL);
+ break;
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_FWD_SEL,
+ SPX5_FWSEL_REDIRECT_TO_LOOPBACK);
+ break;
+ case VCAP_TYPE_ES2:
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_BIT_1);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_CPU_QUEUE_NUM, 0);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Trap action not supported in this VCAP");
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static int sparx5_tc_action_vlan_pop(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ u16 tpid)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "VLAN pop action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ switch (tpid) {
+ case ETH_P_8021Q:
+ case ETH_P_8021AD:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PUSH_OUTER_TAG,
+ SPX5_OTAG_UNTAG);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int sparx5_tc_action_vlan_modify(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act,
+ u16 tpid)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PUSH_OUTER_TAG,
+ SPX5_OTAG_TAG_A);
+ if (err)
+ return err;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "VLAN modify action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ switch (tpid) {
+ case ETH_P_8021Q:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_TPID_SEL,
+ SPX5_TPID_A_8100);
+ break;
+ case ETH_P_8021AD:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_TPID_SEL,
+ SPX5_TPID_A_88A8);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ }
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_VID_SEL,
+ SPX5_VID_A_VAL);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_VID_A_VAL,
+ act->vlan.vid);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_PCP_SEL,
+ SPX5_PCP_A_VAL);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PCP_A_VAL,
+ act->vlan.prio);
+ if (err)
+ return err;
+
+ return vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_DEI_SEL,
+ SPX5_DEI_A_CLASSIFIED);
+}
+
+static int sparx5_tc_action_vlan_push(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act,
+ u16 tpid)
+{
+ u16 act_tpid = be16_to_cpu(act->vlan.proto);
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "VLAN push action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ if (tpid == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot push on double tagged frames");
+ return -EOPNOTSUPP;
+ }
+
+ err = sparx5_tc_action_vlan_modify(admin, vrule, fco, act, act_tpid);
+ if (err)
+ return err;
+
+ switch (act_tpid) {
+ case ETH_P_8021Q:
+ break;
+ case ETH_P_8021AD:
+ /* Push classified tag as inner tag */
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PUSH_INNER_TAG,
+ SPX5_ITAG_PUSH_B_TAG);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_B_TPID_SEL,
+ SPX5_TPID_B_CLASSIFIED);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+/* Remove rule keys that may prevent templates from matching a keyset */
+static void sparx5_tc_flower_simplify_rule(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ u16 l3_proto)
+{
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ vcap_rule_rem_key(vrule, VCAP_KF_ETYPE);
+ switch (l3_proto) {
+ case ETH_P_IP:
+ break;
+ case ETH_P_IPV6:
+ vcap_rule_rem_key(vrule, VCAP_KF_IP_SNAP_IS);
+ break;
+ default:
+ break;
+ }
+ break;
+ case VCAP_TYPE_ES2:
+ switch (l3_proto) {
+ case ETH_P_IP:
+ if (vrule->keyset == VCAP_KFS_IP4_OTHER)
+ vcap_rule_rem_key(vrule, VCAP_KF_TCP_IS);
+ break;
+ case ETH_P_IPV6:
+ if (vrule->keyset == VCAP_KFS_IP6_STD)
+ vcap_rule_rem_key(vrule, VCAP_KF_TCP_IS);
+ vcap_rule_rem_key(vrule, VCAP_KF_IP4_IS);
+ break;
+ default:
+ break;
+ }
+ break;
+ case VCAP_TYPE_IS2:
+ switch (l3_proto) {
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ vcap_rule_rem_key(vrule, VCAP_KF_IP4_IS);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static bool sparx5_tc_flower_use_template(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_tc_flower_template *ftp;
+
+ list_for_each_entry(ftp, &port->tc_templates, list) {
+ if (ftp->cid != fco->common.chain_index)
+ continue;
+
+ vcap_set_rule_set_keyset(vrule, ftp->keyset);
+ sparx5_tc_flower_simplify_rule(admin, vrule, ftp->l3_proto);
+ return true;
+ }
+ return false;
+}
+
+static int sparx5_tc_flower_replace(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin,
+ bool ingress)
+{
+ struct sparx5_psfp_sf sf = { .max_sdu = SPX5_PSFP_SF_MAX_SDU };
+ struct netlink_ext_ack *extack = fco->common.extack;
+ int err, idx, tc_sg_idx = -1, tc_pol_idx = -1;
+ struct vcap_tc_flower_parse_usage state = {
+ .fco = fco,
+ .l3_proto = ETH_P_ALL,
+ .admin = admin,
+ };
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_multiple_rules multi = {};
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_psfp_sg sg = { 0 };
+ struct sparx5_psfp_fm fm = { 0 };
+ struct flow_action_entry *act;
+ struct vcap_control *vctrl;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+
+ vctrl = port->sparx5->vcap_ctrl;
+
+ err = sparx5_tc_flower_action_check(vctrl, ndev, fco, ingress);
+ if (err)
+ return err;
+
+ vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
+ fco->common.prio, 0);
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ vrule->cookie = fco->cookie;
+
+ state.vrule = vrule;
+ state.frule = flow_cls_offload_flow_rule(fco);
+ err = sparx5_tc_use_dissectors(&state, admin, vrule);
+ if (err)
+ goto out;
+
+ err = sparx5_tc_add_rule_counter(admin, vrule);
+ if (err)
+ goto out;
+
+ err = sparx5_tc_add_rule_link_target(admin, vrule,
+ fco->common.chain_index);
+ if (err)
+ goto out;
+
+ frule = flow_cls_offload_flow_rule(fco);
+ flow_action_for_each(idx, act, &frule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_GATE: {
+ err = sparx5_tc_flower_parse_act_gate(&sg, act, extack);
+ if (err < 0)
+ goto out;
+
+ tc_sg_idx = act->hw_index;
+
+ break;
+ }
+ case FLOW_ACTION_POLICE: {
+ err = sparx5_tc_flower_parse_act_police(&fm.pol, act,
+ extack);
+ if (err < 0)
+ goto out;
+
+ tc_pol_idx = fm.pol.idx;
+ sf.max_sdu = act->police.mtu;
+
+ break;
+ }
+ case FLOW_ACTION_TRAP:
+ err = sparx5_tc_action_trap(admin, vrule, fco);
+ if (err)
+ goto out;
+ break;
+ case FLOW_ACTION_ACCEPT:
+ err = sparx5_tc_set_actionset(admin, vrule);
+ if (err)
+ goto out;
+ break;
+ case FLOW_ACTION_GOTO:
+ err = sparx5_tc_set_actionset(admin, vrule);
+ if (err)
+ goto out;
+ sparx5_tc_add_rule_link(vctrl, admin, vrule,
+ fco->common.chain_index,
+ act->chain_index);
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ err = sparx5_tc_action_vlan_pop(admin, vrule, fco,
+ state.tpid);
+ if (err)
+ goto out;
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ err = sparx5_tc_action_vlan_push(admin, vrule, fco,
+ act, state.tpid);
+ if (err)
+ goto out;
+ break;
+ case FLOW_ACTION_VLAN_MANGLE:
+ err = sparx5_tc_action_vlan_modify(admin, vrule, fco,
+ act, state.tpid);
+ if (err)
+ goto out;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Unsupported TC action");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ /* Setup PSFP */
+ if (tc_sg_idx >= 0 || tc_pol_idx >= 0) {
+ err = sparx5_tc_flower_psfp_setup(sparx5, vrule, tc_sg_idx,
+ tc_pol_idx, &sg, &fm, &sf);
+ if (err)
+ goto out;
+ }
+
+ if (!sparx5_tc_flower_use_template(ndev, fco, admin, vrule)) {
+ err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin,
+ state.l3_proto, &multi);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "No matching port keyset for filter protocol and keys");
+ goto out;
+ }
+ }
+
+ /* provide the l3 protocol to guide the keyset selection */
+ err = vcap_val_rule(vrule, state.l3_proto);
+ if (err) {
+ vcap_set_tc_exterr(fco, vrule);
+ goto out;
+ }
+ err = vcap_add_rule(vrule);
+ if (err)
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Could not add the filter");
+
+ if (state.l3_proto == ETH_P_ALL)
+ err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
+ &multi);
+
+out:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static void sparx5_tc_free_psfp_resources(struct sparx5 *sparx5,
+ struct vcap_rule *vrule)
+{
+ struct vcap_client_actionfield *afield;
+ u32 isdx, sfid, sgid, fmid;
+
+ /* Check if VCAP_AF_ISDX_VAL action is set for this rule - and if
+ * it is used for stream and/or flow-meter classification.
+ */
+ afield = vcap_find_actionfield(vrule, VCAP_AF_ISDX_VAL);
+ if (!afield)
+ return;
+
+ isdx = afield->data.u32.value;
+ sfid = sparx5_psfp_isdx_get_sf(sparx5, isdx);
+
+ if (!sfid)
+ return;
+
+ fmid = sparx5_psfp_isdx_get_fm(sparx5, isdx);
+ sgid = sparx5_psfp_sf_get_sg(sparx5, sfid);
+
+ if (fmid && sparx5_psfp_fm_del(sparx5, fmid) < 0)
+ pr_err("%s:%d Could not delete invalid fmid: %d", __func__,
+ __LINE__, fmid);
+
+ if (sgid && sparx5_psfp_sg_del(sparx5, sgid) < 0)
+ pr_err("%s:%d Could not delete invalid sgid: %d", __func__,
+ __LINE__, sgid);
+
+ if (sparx5_psfp_sf_del(sparx5, sfid) < 0)
+ pr_err("%s:%d Could not delete invalid sfid: %d", __func__,
+ __LINE__, sfid);
+
+ sparx5_isdx_conf_set(sparx5, isdx, 0, 0);
+}
+
+static int sparx5_tc_free_rule_resources(struct net_device *ndev,
+ struct vcap_control *vctrl,
+ int rule_id)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct vcap_rule *vrule;
+ int ret = 0;
+
+ vrule = vcap_get_rule(vctrl, rule_id);
+ if (IS_ERR(vrule))
+ return -EINVAL;
+
+ sparx5_tc_free_psfp_resources(sparx5, vrule);
+
+ vcap_free_rule(vrule);
+ return ret;
+}
+
+static int sparx5_tc_flower_destroy(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ int err = -ENOENT, count = 0, rule_id;
+ struct vcap_control *vctrl;
+
+ vctrl = port->sparx5->vcap_ctrl;
+ while (true) {
+ rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
+ if (rule_id <= 0)
+ break;
+ if (count == 0) {
+ /* Resources are attached to the first rule of
+ * a set of rules. Only works if the rules are
+ * in the correct order.
+ */
+ err = sparx5_tc_free_rule_resources(ndev, vctrl,
+ rule_id);
+ if (err)
+ pr_err("%s:%d: could not free resources %d\n",
+ __func__, __LINE__, rule_id);
+ }
+ err = vcap_del_rule(vctrl, ndev, rule_id);
+ if (err) {
+ pr_err("%s:%d: could not delete rule %d\n",
+ __func__, __LINE__, rule_id);
+ break;
+ }
+ }
+ return err;
+}
+
+static int sparx5_tc_flower_stats(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_counter ctr = {};
+ struct vcap_control *vctrl;
+ ulong lastused = 0;
+ int err;
+
+ vctrl = port->sparx5->vcap_ctrl;
+ err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie);
+ if (err)
+ return err;
+ flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+ return err;
+}
+
+static int sparx5_tc_flower_template_create(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_tc_flower_parse_usage state = {
+ .fco = fco,
+ .l3_proto = ETH_P_ALL,
+ .admin = admin,
+ };
+ struct sparx5_tc_flower_template *ftp;
+ struct vcap_keyset_list kslist = {};
+ enum vcap_keyfield_set keysets[10];
+ struct vcap_control *vctrl;
+ struct vcap_rule *vrule;
+ int count, err;
+
+ if (admin->vtype == VCAP_TYPE_ES0) {
+ pr_err("%s:%d: %s\n", __func__, __LINE__,
+ "VCAP does not support templates");
+ return -EINVAL;
+ }
+
+ count = vcap_admin_rule_count(admin, fco->common.chain_index);
+ if (count > 0) {
+ pr_err("%s:%d: %s\n", __func__, __LINE__,
+ "Filters are already present");
+ return -EBUSY;
+ }
+
+ ftp = kzalloc(sizeof(*ftp), GFP_KERNEL);
+ if (!ftp)
+ return -ENOMEM;
+
+ ftp->cid = fco->common.chain_index;
+ ftp->orig = VCAP_KFS_NO_VALUE;
+ ftp->keyset = VCAP_KFS_NO_VALUE;
+
+ vctrl = port->sparx5->vcap_ctrl;
+ vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index,
+ VCAP_USER_TC, fco->common.prio, 0);
+ if (IS_ERR(vrule)) {
+ err = PTR_ERR(vrule);
+ goto err_rule;
+ }
+
+ state.vrule = vrule;
+ state.frule = flow_cls_offload_flow_rule(fco);
+ err = sparx5_tc_use_dissectors(&state, admin, vrule);
+ if (err) {
+ pr_err("%s:%d: key error: %d\n", __func__, __LINE__, err);
+ goto out;
+ }
+
+ ftp->l3_proto = state.l3_proto;
+
+ sparx5_tc_flower_simplify_rule(admin, vrule, state.l3_proto);
+
+ /* Find the keysets that the rule can use */
+ kslist.keysets = keysets;
+ kslist.max = ARRAY_SIZE(keysets);
+ if (!vcap_rule_find_keysets(vrule, &kslist)) {
+ pr_err("%s:%d: %s\n", __func__, __LINE__,
+ "Could not find a suitable keyset");
+ err = -ENOENT;
+ goto out;
+ }
+
+ ftp->keyset = vcap_select_min_rule_keyset(vctrl, admin->vtype, &kslist);
+ kslist.cnt = 0;
+ sparx5_vcap_set_port_keyset(ndev, admin, fco->common.chain_index,
+ state.l3_proto,
+ ftp->keyset,
+ &kslist);
+
+ if (kslist.cnt > 0)
+ ftp->orig = kslist.keysets[0];
+
+ /* Store new template */
+ list_add_tail(&ftp->list, &port->tc_templates);
+ vcap_free_rule(vrule);
+ return 0;
+
+out:
+ vcap_free_rule(vrule);
+err_rule:
+ kfree(ftp);
+ return err;
+}
+
+static int sparx5_tc_flower_template_destroy(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_tc_flower_template *ftp, *tmp;
+ int err = -ENOENT;
+
+ /* Rules using the template are removed by the tc framework */
+ list_for_each_entry_safe(ftp, tmp, &port->tc_templates, list) {
+ if (ftp->cid != fco->common.chain_index)
+ continue;
+
+ sparx5_vcap_set_port_keyset(ndev, admin,
+ fco->common.chain_index,
+ ftp->l3_proto, ftp->orig,
+ NULL);
+ list_del(&ftp->list);
+ kfree(ftp);
+ break;
+ }
+ return err;
+}
+
+int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
+ bool ingress)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_control *vctrl;
+ struct vcap_admin *admin;
+ int err = -EINVAL;
+
+ /* Get vcap instance from the chain id */
+ vctrl = port->sparx5->vcap_ctrl;
+ admin = vcap_find_admin(vctrl, fco->common.chain_index);
+ if (!admin) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
+ return err;
+ }
+
+ switch (fco->command) {
+ case FLOW_CLS_REPLACE:
+ return sparx5_tc_flower_replace(ndev, fco, admin, ingress);
+ case FLOW_CLS_DESTROY:
+ return sparx5_tc_flower_destroy(ndev, fco, admin);
+ case FLOW_CLS_STATS:
+ return sparx5_tc_flower_stats(ndev, fco, admin);
+ case FLOW_CLS_TMPLT_CREATE:
+ return sparx5_tc_flower_template_create(ndev, fco, admin);
+ case FLOW_CLS_TMPLT_DESTROY:
+ return sparx5_tc_flower_template_destroy(ndev, fco, admin);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
new file mode 100644
index 0000000000..d88a93f226
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP API
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_tc.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_vcap_impl.h"
+
+static int sparx5_tc_matchall_replace(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct flow_action_entry *action;
+ struct sparx5 *sparx5;
+ int err;
+
+ if (!flow_offload_has_one_action(&tmo->rule->action)) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Only one action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+ action = &tmo->rule->action.entries[0];
+
+ sparx5 = port->sparx5;
+ switch (action->id) {
+ case FLOW_ACTION_GOTO:
+ err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
+ tmo->common.chain_index,
+ action->chain_index, tmo->cookie,
+ true);
+ if (err == -EFAULT) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Unsupported goto chain");
+ return -EOPNOTSUPP;
+ }
+ if (err == -EADDRINUSE) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "VCAP already enabled");
+ return -EOPNOTSUPP;
+ }
+ if (err == -EADDRNOTAVAIL) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Already matching this chain");
+ return -EOPNOTSUPP;
+ }
+ if (err) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Could not enable VCAP lookups");
+ return err;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int sparx5_tc_matchall_destroy(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5;
+ int err;
+
+ sparx5 = port->sparx5;
+ if (!tmo->rule && tmo->cookie) {
+ err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
+ 0, 0, tmo->cookie, false);
+ if (err)
+ return err;
+ return 0;
+ }
+ NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
+ return -EOPNOTSUPP;
+}
+
+int sparx5_tc_matchall(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress)
+{
+ switch (tmo->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return sparx5_tc_matchall_replace(ndev, tmo, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ return sparx5_tc_matchall_destroy(ndev, tmo, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
new file mode 100644
index 0000000000..556d6ea0ac
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
@@ -0,0 +1,3874 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+/* This file is autogenerated by cml-utils 2023-02-10 11:15:56 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "vcap_api.h"
+#include "sparx5_vcap_ag_api.h"
+
+/* keyfields */
+static const struct vcap_field is0_normal_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 107,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 110,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 113,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 114,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 126,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 129,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 132,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 133,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 145,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 193,
+ .width = 48,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 241,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 242,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 243,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 259,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 260,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 261,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 263,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 264,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 265,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 271,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 399,
+ .width = 128,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 527,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 528,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 529,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 545,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 19,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 92,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 114,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 115,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 133,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 134,
+ .width = 12,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 148,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 158,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 190,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 222,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 232,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 240,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 90,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 138,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 186,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 187,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 203,
+ .width = 64,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 267,
+ .width = 16,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 283,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 284,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 86,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 134,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 135,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 136,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 137,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 138,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 139,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 140,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 142,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 206,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 207,
+ .width = 16,
+ },
+};
+
+static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 169,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 202,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 220,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 221,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 222,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 223,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 224,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 225,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 226,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field is2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 169,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 193,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field is2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 91,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 220,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 228,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 244,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 86,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 112,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 116,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 119,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 120,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 121,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 169,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 217,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 219,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 227,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 355,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 483,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 484,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 485,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 486,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 502,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 518,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 534,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 535,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 536,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 537,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 538,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 539,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 540,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 541,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 542,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es0_isdx_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_IF_EGR_PORT_NO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 13,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 27,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field es2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 99,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 147,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 195,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 196,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 212,
+ .width = 64,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 276,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 277,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 98,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 148,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 154,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 177,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 178,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 210,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 226,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 227,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 232,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 233,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 234,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 185,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field es2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 74,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 75,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 96,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 193,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 202,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 330,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 458,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 459,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 460,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 461,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 477,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 493,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 509,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 510,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 511,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 512,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 513,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 514,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 515,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 516,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 517,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 100,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 229,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 237,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 253,
+ .width = 40,
+ },
+};
+
+/* keyfield_set */
+static const struct vcap_set is0_keyfield_set[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = {
+ .type_id = 0,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+static const struct vcap_set is2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 5,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 6,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = 1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es0_keyfield_set[] = {
+ [VCAP_KFS_ISDX] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 1,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = -1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+/* keyfield_set map */
+static const struct vcap_field *is0_keyfield_set_map[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield,
+};
+
+static const struct vcap_field *is2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = is2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
+ [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
+};
+
+static const struct vcap_field *es0_keyfield_set_map[] = {
+ [VCAP_KFS_ISDX] = es0_isdx_keyfield,
+};
+
+static const struct vcap_field *es2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = es2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = es2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_STD] = es2_ip6_std_keyfield,
+};
+
+/* keyfield_set map sizes */
+static int is0_keyfield_set_map_size[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield),
+};
+
+static int is2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
+};
+
+static int es0_keyfield_set_map_size[] = {
+ [VCAP_KFS_ISDX] = ARRAY_SIZE(es0_isdx_keyfield),
+};
+
+static int es2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(es2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(es2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(es2_ip6_std_keyfield),
+};
+
+/* actionfields */
+static const struct vcap_field is0_classification_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 68,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 69,
+ .width = 12,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 117,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 171,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_full_actionfield[] = {
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 67,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 12,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 80,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 83,
+ .width = 65,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 204,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 212,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 298,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 301,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_class_reduced_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 47,
+ .width = 12,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 93,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is2_base_type_actionfield[] = {
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 5,
+ },
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 3,
+ },
+ [VCAP_AF_LRN_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_AF_RT_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 6,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 30,
+ .width = 68,
+ },
+ [VCAP_AF_MIRROR_PROBE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 2,
+ },
+ [VCAP_AF_MATCH_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 159,
+ .width = 16,
+ },
+ [VCAP_AF_MATCH_ID_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 16,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 191,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field es0_es0_actionfield[] = {
+ [VCAP_AF_PUSH_OUTER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_AF_PUSH_INNER_TAG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_A_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_A_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 31,
+ .width = 3,
+ },
+ [VCAP_AF_VID_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 34,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 46,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_A_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_AF_VID_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_B_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 65,
+ .width = 1,
+ },
+ [VCAP_AF_VID_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_C_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_AF_POP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 2,
+ },
+ [VCAP_AF_UNTAG_VID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_C_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 2,
+ },
+ [VCAP_AF_DSCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 6,
+ },
+ [VCAP_AF_ESDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 323,
+ .width = 13,
+ },
+ [VCAP_AF_FWD_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 459,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_QU] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 461,
+ .width = 3,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 464,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 466,
+ .width = 1,
+ },
+ [VCAP_AF_SWAP_MACS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 475,
+ .width = 1,
+ },
+ [VCAP_AF_LOOP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 476,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_AF_COPY_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 16,
+ },
+ [VCAP_AF_COPY_PORT_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 7,
+ },
+ [VCAP_AF_MIRROR_PROBE_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 3,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 33,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_REMARK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 34,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 35,
+ .width = 6,
+ },
+ [VCAP_AF_ES2_REW_CMD] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 11,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+};
+
+/* actionfield_set */
+static const struct vcap_set is0_actionfield_set[] = {
+ [VCAP_AFS_CLASSIFICATION] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+ [VCAP_AFS_FULL] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_AFS_CLASS_REDUCED] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 12,
+ },
+};
+
+static const struct vcap_set is2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set es0_actionfield_set[] = {
+ [VCAP_AFS_ES0] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+/* actionfield_set map */
+static const struct vcap_field *is0_actionfield_set_map[] = {
+ [VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
+ [VCAP_AFS_FULL] = is0_full_actionfield,
+ [VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield,
+};
+
+static const struct vcap_field *is2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
+};
+
+static const struct vcap_field *es0_actionfield_set_map[] = {
+ [VCAP_AFS_ES0] = es0_es0_actionfield,
+};
+
+static const struct vcap_field *es2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = es2_base_type_actionfield,
+};
+
+/* actionfield_set map size */
+static int is0_actionfield_set_map_size[] = {
+ [VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
+ [VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield),
+ [VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield),
+};
+
+static int is2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
+};
+
+static int es0_actionfield_set_map_size[] = {
+ [VCAP_AFS_ES0] = ARRAY_SIZE(es0_es0_actionfield),
+};
+
+static int es2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(es2_base_type_actionfield),
+};
+
+/* Type Groups */
+static const struct vcap_typegroup is0_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 5,
+ .value = 16,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 4,
+ .value = 0,
+ },
+ {
+ .offset = 364,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 416,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 520,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 572,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 4,
+ .value = 8,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x3_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 52,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_keyfield_set_typegroups[] = {
+ [12] = is0_x12_keyfield_set_typegroups,
+ [6] = is0_x6_keyfield_set_typegroups,
+ [3] = is0_x3_keyfield_set_typegroups,
+ [2] = is0_x2_keyfield_set_typegroups,
+ [1] = is0_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
+ [12] = is2_x12_keyfield_set_typegroups,
+ [6] = is2_x6_keyfield_set_typegroups,
+ [3] = is2_x3_keyfield_set_typegroups,
+ [1] = is2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es0_keyfield_set_typegroups[] = {
+ [1] = es0_x1_keyfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_keyfield_set_typegroups[] = {
+ [12] = es2_x12_keyfield_set_typegroups,
+ [6] = es2_x6_keyfield_set_typegroups,
+ [3] = es2_x3_keyfield_set_typegroups,
+ [1] = es2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup is0_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 110,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 220,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 110,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 110,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 220,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 21,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 42,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_actionfield_set_typegroups[] = {
+ [3] = is0_x3_actionfield_set_typegroups,
+ [2] = is0_x2_actionfield_set_typegroups,
+ [1] = is0_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
+ [3] = is2_x3_actionfield_set_typegroups,
+ [1] = is2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es0_actionfield_set_typegroups[] = {
+ [1] = es0_x1_actionfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_actionfield_set_typegroups[] = {
+ [3] = es2_x3_actionfield_set_typegroups,
+ [1] = es2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+/* Keyfieldset names */
+static const char * const vcap_keyfield_set_names[] = {
+ [VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
+ [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
+ [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
+ [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
+ [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
+ [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
+};
+
+/* Actionfieldset names */
+static const char * const vcap_actionfield_set_names[] = {
+ [VCAP_AFS_NO_VALUE] = "(None)",
+ [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
+};
+
+/* Keyfield names */
+static const char * const vcap_keyfield_names[] = {
+ [VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
+ [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
+ [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
+ [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
+ [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
+ [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS",
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
+ [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
+ [VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
+ [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
+ [VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
+ [VCAP_KF_ISDX_CLS] = "ISDX_CLS",
+ [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
+ [VCAP_KF_L2_BC_IS] = "L2_BC_IS",
+ [VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
+ [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
+ [VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
+ [VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
+ [VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
+ [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
+ [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
+ [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
+ [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
+ [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
+ [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_RT_IS] = "L3_RT_IS",
+ [VCAP_KF_L3_TOS] = "L3_TOS",
+ [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
+ [VCAP_KF_L4_ACK] = "L4_ACK",
+ [VCAP_KF_L4_DPORT] = "L4_DPORT",
+ [VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD",
+ [VCAP_KF_L4_PSH] = "L4_PSH",
+ [VCAP_KF_L4_RNG] = "L4_RNG",
+ [VCAP_KF_L4_RST] = "L4_RST",
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS",
+ [VCAP_KF_L4_SPORT] = "L4_SPORT",
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS",
+ [VCAP_KF_L4_SYN] = "L4_SYN",
+ [VCAP_KF_L4_URG] = "L4_URG",
+ [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
+ [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
+ [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
+ [VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
+ [VCAP_KF_TYPE] = "TYPE",
+};
+
+/* Actionfield names */
+static const char * const vcap_actionfield_names[] = {
+ [VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
+ [VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
+ [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
+ [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
+ [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
+ [VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
+ [VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
+ [VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MATCH_ID] = "MATCH_ID",
+ [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
+ [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
+ [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
+ [VCAP_AF_POLICE_ENA] = "POLICE_ENA",
+ [VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
+ [VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
+ [VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
+};
+
+/* VCAPs */
+const struct vcap_info sparx5_vcaps[] = {
+ [VCAP_TYPE_IS0] = {
+ .name = "is0",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 110,
+ .default_cnt = 140,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is0_keyfield_set),
+ .actionfield_set = is0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is0_actionfield_set),
+ .keyfield_set_map = is0_keyfield_set_map,
+ .keyfield_set_map_size = is0_keyfield_set_map_size,
+ .actionfield_set_map = is0_actionfield_set_map,
+ .actionfield_set_map_size = is0_actionfield_set_map_size,
+ .keyfield_set_typegroups = is0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_IS2] = {
+ .name = "is2",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 110,
+ .default_cnt = 73,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set),
+ .actionfield_set = is2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set),
+ .keyfield_set_map = is2_keyfield_set_map,
+ .keyfield_set_map_size = is2_keyfield_set_map_size,
+ .actionfield_set_map = is2_actionfield_set_map,
+ .actionfield_set_map_size = is2_actionfield_set_map_size,
+ .keyfield_set_typegroups = is2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is2_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES0] = {
+ .name = "es0",
+ .rows = 4096,
+ .sw_count = 1,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 489,
+ .default_cnt = 70,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es0_keyfield_set),
+ .actionfield_set = es0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es0_actionfield_set),
+ .keyfield_set_map = es0_keyfield_set_map,
+ .keyfield_set_map_size = es0_keyfield_set_map_size,
+ .actionfield_set_map = es0_actionfield_set_map,
+ .actionfield_set_map_size = es0_actionfield_set_map_size,
+ .keyfield_set_typegroups = es0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES2] = {
+ .name = "es2",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 21,
+ .default_cnt = 74,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es2_keyfield_set),
+ .actionfield_set = es2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es2_actionfield_set),
+ .keyfield_set_map = es2_keyfield_set_map,
+ .keyfield_set_map_size = es2_keyfield_set_map_size,
+ .actionfield_set_map = es2_actionfield_set_map,
+ .actionfield_set_map_size = es2_actionfield_set_map_size,
+ .keyfield_set_typegroups = es2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es2_actionfield_set_typegroups,
+ },
+};
+
+const struct vcap_statistics sparx5_vcap_stats = {
+ .name = "sparx5",
+ .count = 4,
+ .keyfield_set_names = vcap_keyfield_set_names,
+ .actionfield_set_names = vcap_actionfield_set_names,
+ .keyfield_names = vcap_keyfield_names,
+ .actionfield_names = vcap_actionfield_names,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
new file mode 100644
index 0000000000..7d106f1276
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200.
+ * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86
+ */
+
+#ifndef __SPARX5_VCAP_AG_API_H__
+#define __SPARX5_VCAP_AG_API_H__
+
+/* VCAPs */
+extern const struct vcap_info sparx5_vcaps[];
+extern const struct vcap_statistics sparx5_vcap_stats;
+
+#endif /* __SPARX5_VCAP_AG_API_H__ */
+
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
new file mode 100644
index 0000000000..12722f728e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver VCAP debugFS implementation
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include "sparx5_vcap_debugfs.h"
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_vcap_impl.h"
+#include "sparx5_vcap_ag_api.h"
+
+static const char *sparx5_vcap_is0_etype_str(u32 value)
+{
+ switch (value) {
+ case VCAP_IS0_PS_ETYPE_DEFAULT:
+ return "default";
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ return "normal_7tuple";
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ return "normal_5tuple_ip4";
+ case VCAP_IS0_PS_ETYPE_MLL:
+ return "mll";
+ case VCAP_IS0_PS_ETYPE_LL_FULL:
+ return "ll_full";
+ case VCAP_IS0_PS_ETYPE_PURE_5TUPLE_IP4:
+ return "pure_5tuple_ip4";
+ case VCAP_IS0_PS_ETYPE_ETAG:
+ return "etag";
+ case VCAP_IS0_PS_ETYPE_NO_LOOKUP:
+ return "no lookup";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *sparx5_vcap_is0_mpls_str(u32 value)
+{
+ switch (value) {
+ case VCAP_IS0_PS_MPLS_FOLLOW_ETYPE:
+ return "follow_etype";
+ case VCAP_IS0_PS_MPLS_NORMAL_7TUPLE:
+ return "normal_7tuple";
+ case VCAP_IS0_PS_MPLS_NORMAL_5TUPLE_IP4:
+ return "normal_5tuple_ip4";
+ case VCAP_IS0_PS_MPLS_MLL:
+ return "mll";
+ case VCAP_IS0_PS_MPLS_LL_FULL:
+ return "ll_full";
+ case VCAP_IS0_PS_MPLS_PURE_5TUPLE_IP4:
+ return "pure_5tuple_ip4";
+ case VCAP_IS0_PS_MPLS_ETAG:
+ return "etag";
+ case VCAP_IS0_PS_MPLS_NO_LOOKUP:
+ return "no lookup";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *sparx5_vcap_is0_mlbs_str(u32 value)
+{
+ switch (value) {
+ case VCAP_IS0_PS_MLBS_FOLLOW_ETYPE:
+ return "follow_etype";
+ case VCAP_IS0_PS_MLBS_NO_LOOKUP:
+ return "no lookup";
+ default:
+ return "unknown";
+ }
+}
+
+static void sparx5_vcap_is0_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value, val;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5,
+ ANA_CL_ADV_CL_CFG(port->portno, lookup));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_CL_ADV_CL_CFG_LOOKUP_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+ val = ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n etype: %s",
+ sparx5_vcap_is0_etype_str(val));
+ val = ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n ipv4: %s",
+ sparx5_vcap_is0_etype_str(val));
+ val = ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n ipv6: %s",
+ sparx5_vcap_is0_etype_str(val));
+ val = ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n mpls_uc: %s",
+ sparx5_vcap_is0_mpls_str(val));
+ val = ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n mpls_mc: %s",
+ sparx5_vcap_is0_mpls_str(val));
+ val = ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n mlbs: %s",
+ sparx5_vcap_is0_mlbs_str(val));
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_is2_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_CFG(port->portno));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(value) & BIT(lookup))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ /* Get key selection state */
+ value = spx5_rd(sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(port->portno, lookup));
+
+ out->prf(out->dst, "\n noneth: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_NONETH_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_NONETH_CUSTOM_1:
+ out->prf(out->dst, "custom1");
+ break;
+ case VCAP_IS2_PS_NONETH_CUSTOM_2:
+ out->prf(out->dst, "custom2");
+ break;
+ case VCAP_IS2_PS_NONETH_NO_LOOKUP:
+ out->prf(out->dst, "none");
+ break;
+ }
+ out->prf(out->dst, "\n ipv4_mc: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV4_MC_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ case VCAP_IS2_PS_IPV4_MC_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_IS2_PS_IPV4_MC_IP4_VID:
+ out->prf(out->dst, "ip4_vid");
+ break;
+ }
+ out->prf(out->dst, "\n ipv4_uc: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV4_UC_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ case VCAP_IS2_PS_IPV4_UC_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ }
+ out->prf(out->dst, "\n ipv6_mc: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV6_MC_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP6_VID:
+ out->prf(out->dst, "ip6_vid");
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP6_STD:
+ out->prf(out->dst, "ip6_std");
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ }
+ out->prf(out->dst, "\n ipv6_uc: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV6_UC_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP6_STD:
+ out->prf(out->dst, "ip6_std");
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ }
+ out->prf(out->dst, "\n arp: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_ARP_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_ARP_ARP:
+ out->prf(out->dst, "arp");
+ break;
+ }
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_is2_port_stickies(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " Sticky bits: ");
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+ /* Get lookup sticky bits */
+ value = spx5_rd(sparx5, ANA_ACL_SEC_LOOKUP_STICKY(lookup));
+
+ if (ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_GET(value))
+ out->prf(out->dst, " sel_clm");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY_GET(value))
+ out->prf(out->dst, " sel_irleg");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY_GET(value))
+ out->prf(out->dst, " sel_erleg");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY_GET(value))
+ out->prf(out->dst, " sel_port");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY_GET(value))
+ out->prf(out->dst, " custom2");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY_GET(value))
+ out->prf(out->dst, " custom1");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY_GET(value))
+ out->prf(out->dst, " oam");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip6_vid");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(value))
+ out->prf(out->dst, " ip6_std");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY_GET(value))
+ out->prf(out->dst, " ip6_tcpudp");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(value))
+ out->prf(out->dst, " ip_7tuple");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip4_vid");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(value))
+ out->prf(out->dst, " ip4_tcpudp");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(value))
+ out->prf(out->dst, " ip4_other");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(value))
+ out->prf(out->dst, " arp");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY_GET(value))
+ out->prf(out->dst, " mac_snap");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY_GET(value))
+ out->prf(out->dst, " mac_llc");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(value))
+ out->prf(out->dst, " mac_etype");
+ /* Clear stickies */
+ spx5_wr(value, sparx5, ANA_ACL_SEC_LOOKUP_STICKY(lookup));
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_es0_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ u32 value;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ out->prf(out->dst, "\n Lookup 0: ");
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5, REW_ES0_CTRL);
+ out->prf(out->dst, "\n state: ");
+ if (REW_ES0_CTRL_ES0_LU_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ out->prf(out->dst, "\n keyset: ");
+ value = spx5_rd(sparx5, REW_RTAG_ETAG_CTRL(port->portno));
+ switch (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_GET(value)) {
+ case VCAP_ES0_PS_NORMAL_SELECTION:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_ES0_PS_FORCE_ISDX_LOOKUPS:
+ out->prf(out->dst, "isdx");
+ break;
+ case VCAP_ES0_PS_FORCE_VID_LOOKUPS:
+ out->prf(out->dst, "vid");
+ break;
+ case VCAP_ES0_PS_RESERVED:
+ out->prf(out->dst, "reserved");
+ break;
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_es2_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5, EACL_VCAP_ES2_KEY_SEL(port->portno,
+ lookup));
+ out->prf(out->dst, "\n state: ");
+ if (EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ out->prf(out->dst, "\n arp: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_ARP_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_ARP_ARP:
+ out->prf(out->dst, "arp");
+ break;
+ }
+ out->prf(out->dst, "\n ipv4: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV4_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_IPV4_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID:
+ out->prf(out->dst, "ip4_tcp_udp ip4_vid");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_VID:
+ out->prf(out->dst, "ip4_vid");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_OTHER:
+ out->prf(out->dst, "ip4_other");
+ break;
+ }
+ out->prf(out->dst, "\n ipv6: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV6_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_VID:
+ out->prf(out->dst, "ip_7tuple ip6_vid");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_STD:
+ out->prf(out->dst, "ip_7tuple ip6_std");
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_VID:
+ out->prf(out->dst, "ip6_vid");
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_STD:
+ out->prf(out->dst, "ip6_std");
+ break;
+ case VCAP_ES2_PS_IPV6_IP4_DOWNGRADE:
+ out->prf(out->dst, "ip4_downgrade");
+ break;
+ }
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_es2_port_stickies(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " Sticky bits: ");
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ value = spx5_rd(sparx5, EACL_SEC_LOOKUP_STICKY(lookup));
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(value))
+ out->prf(out->dst, " ip_7tuple");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip6_vid");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(value))
+ out->prf(out->dst, " ip6_std");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(value))
+ out->prf(out->dst, " ip4_tcp_udp");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip4_vid");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(value))
+ out->prf(out->dst, " ip4_other");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(value))
+ out->prf(out->dst, " arp");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(value))
+ out->prf(out->dst, " mac_etype");
+ /* Clear stickies */
+ spx5_wr(value, sparx5, EACL_SEC_LOOKUP_STICKY(lookup));
+ }
+ out->prf(out->dst, "\n");
+}
+
+/* Provide port information via a callback interface */
+int sparx5_port_info(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ const struct vcap_info *vcap;
+ struct vcap_control *vctrl;
+
+ vctrl = sparx5->vcap_ctrl;
+ vcap = &vctrl->vcaps[admin->vtype];
+ out->prf(out->dst, "%s:\n", vcap->name);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_port_keys(sparx5, admin, port, out);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_port_keys(sparx5, admin, port, out);
+ sparx5_vcap_is2_port_stickies(sparx5, admin, out);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_port_keys(sparx5, admin, port, out);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_port_keys(sparx5, admin, port, out);
+ sparx5_vcap_es2_port_stickies(sparx5, admin, out);
+ break;
+ default:
+ out->prf(out->dst, " no info\n");
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.h
new file mode 100644
index 0000000000..f9ede03441
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver VCAP implementation
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_VCAP_DEBUGFS_H__
+#define __SPARX5_VCAP_DEBUGFS_H__
+
+#include <linux/netdevice.h>
+
+#include <vcap_api.h>
+#include <vcap_api_client.h>
+
+#if defined(CONFIG_DEBUG_FS)
+
+/* Provide port information via a callback interface */
+int sparx5_port_info(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out);
+
+#else
+
+static inline int sparx5_port_info(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* __SPARX5_VCAP_DEBUGFS_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
new file mode 100644
index 0000000000..187efa1fc9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -0,0 +1,2111 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver VCAP implementation
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+
+#include "vcap_api_debugfs.h"
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_vcap_impl.h"
+#include "sparx5_vcap_ag_api.h"
+#include "sparx5_vcap_debugfs.h"
+
+#define SUPER_VCAP_BLK_SIZE 3072 /* addresses per Super VCAP block */
+#define STREAMSIZE (64 * 4) /* bytes in the VCAP cache area */
+
+#define SPARX5_IS2_LOOKUPS 4
+#define VCAP_IS2_KEYSEL(_ena, _noneth, _v4_mc, _v4_uc, _v6_mc, _v6_uc, _arp) \
+ (ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(_ena) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(_noneth) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(_v4_mc) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(_v4_uc) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(_v6_mc) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(_v6_uc) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(_arp))
+
+#define SPARX5_IS0_LOOKUPS 6
+#define VCAP_IS0_KEYSEL(_ena, _etype, _ipv4, _ipv6, _mpls_uc, _mpls_mc, _mlbs) \
+ (ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(_ena) | \
+ ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(_etype) | \
+ ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(_ipv4) | \
+ ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_SET(_ipv6) | \
+ ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_SET(_mpls_uc) | \
+ ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_SET(_mpls_mc) | \
+ ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_SET(_mlbs))
+
+#define SPARX5_ES0_LOOKUPS 1
+#define VCAP_ES0_KEYSEL(_key) (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(_key))
+#define SPARX5_STAT_ESDX_GRN_PKTS 0x300
+#define SPARX5_STAT_ESDX_YEL_PKTS 0x301
+
+#define SPARX5_ES2_LOOKUPS 2
+#define VCAP_ES2_KEYSEL(_ena, _arp, _ipv4, _ipv6) \
+ (EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(_ena) | \
+ EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(_arp) | \
+ EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(_ipv4) | \
+ EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(_ipv6))
+
+static struct sparx5_vcap_inst {
+ enum vcap_type vtype; /* type of vcap */
+ int vinst; /* instance number within the same type */
+ int lookups; /* number of lookups in this vcap type */
+ int lookups_per_instance; /* number of lookups in this instance */
+ int first_cid; /* first chain id in this vcap */
+ int last_cid; /* last chain id in this vcap */
+ int count; /* number of available addresses, not in super vcap */
+ int map_id; /* id in the super vcap block mapping (if applicable) */
+ int blockno; /* starting block in super vcap (if applicable) */
+ int blocks; /* number of blocks in super vcap (if applicable) */
+ bool ingress; /* is vcap in the ingress path */
+} sparx5_vcap_inst_cfg[] = {
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-0 */
+ .vinst = 0,
+ .map_id = 1,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L0,
+ .last_cid = SPARX5_VCAP_CID_IS0_L2 - 1,
+ .blockno = 8, /* Maps block 8-9 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-1 */
+ .vinst = 1,
+ .map_id = 2,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L2,
+ .last_cid = SPARX5_VCAP_CID_IS0_L4 - 1,
+ .blockno = 6, /* Maps block 6-7 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-2 */
+ .vinst = 2,
+ .map_id = 3,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L4,
+ .last_cid = SPARX5_VCAP_CID_IS0_MAX,
+ .blockno = 4, /* Maps block 4-5 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-0 */
+ .vinst = 0,
+ .map_id = 4,
+ .lookups = SPARX5_IS2_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2,
+ .first_cid = SPARX5_VCAP_CID_IS2_L0,
+ .last_cid = SPARX5_VCAP_CID_IS2_L2 - 1,
+ .blockno = 0, /* Maps block 0-1 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-1 */
+ .vinst = 1,
+ .map_id = 5,
+ .lookups = SPARX5_IS2_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2,
+ .first_cid = SPARX5_VCAP_CID_IS2_L2,
+ .last_cid = SPARX5_VCAP_CID_IS2_MAX,
+ .blockno = 2, /* Maps block 2-3 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_ES0,
+ .lookups = SPARX5_ES0_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES0_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES0_L0,
+ .last_cid = SPARX5_VCAP_CID_ES0_MAX,
+ .count = 4096, /* Addresses according to datasheet */
+ .ingress = false,
+ },
+ {
+ .vtype = VCAP_TYPE_ES2,
+ .lookups = SPARX5_ES2_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES2_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES2_L0,
+ .last_cid = SPARX5_VCAP_CID_ES2_MAX,
+ .count = 12288, /* Addresses according to datasheet */
+ .ingress = false,
+ },
+};
+
+/* These protocols have dedicated keysets in IS0 and a TC dissector */
+static u16 sparx5_vcap_is0_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+/* These protocols have dedicated keysets in IS2 and a TC dissector */
+static u16 sparx5_vcap_is2_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_ARP,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+/* These protocols have dedicated keysets in ES2 and a TC dissector */
+static u16 sparx5_vcap_es2_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_ARP,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+static void sparx5_vcap_type_err(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ const char *fname)
+{
+ pr_err("%s: vcap type: %s not supported\n",
+ fname, sparx5_vcaps[admin->vtype].name);
+}
+
+/* Await the super VCAP completion of the current operation */
+static void sparx5_vcap_wait_super_update(struct sparx5 *sparx5)
+{
+ u32 value;
+
+ read_poll_timeout(spx5_rd, value,
+ !VCAP_SUPER_CTRL_UPDATE_SHOT_GET(value), 500, 10000,
+ false, sparx5, VCAP_SUPER_CTRL);
+}
+
+/* Await the ES0 VCAP completion of the current operation */
+static void sparx5_vcap_wait_es0_update(struct sparx5 *sparx5)
+{
+ u32 value;
+
+ read_poll_timeout(spx5_rd, value,
+ !VCAP_ES0_CTRL_UPDATE_SHOT_GET(value), 500, 10000,
+ false, sparx5, VCAP_ES0_CTRL);
+}
+
+/* Await the ES2 VCAP completion of the current operation */
+static void sparx5_vcap_wait_es2_update(struct sparx5 *sparx5)
+{
+ u32 value;
+
+ read_poll_timeout(spx5_rd, value,
+ !VCAP_ES2_CTRL_UPDATE_SHOT_GET(value), 500, 10000,
+ false, sparx5, VCAP_ES2_CTRL);
+}
+
+/* Initializing a VCAP address range */
+static void _sparx5_vcap_range_init(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ u32 addr, u32 count)
+{
+ u32 size = count - 1;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
+ VCAP_SUPER_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_SUPER_CFG);
+ spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_SUPER_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_SUPER_CTRL);
+ sparx5_vcap_wait_super_update(sparx5);
+ break;
+ case VCAP_TYPE_ES0:
+ spx5_wr(VCAP_ES0_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES0_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_ES0_CFG);
+ spx5_wr(VCAP_ES0_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES0_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_ES0_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES0_CTRL);
+ sparx5_vcap_wait_es0_update(sparx5);
+ break;
+ case VCAP_TYPE_ES2:
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES2_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+/* Initializing VCAP rule data area */
+static void sparx5_vcap_block_init(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ _sparx5_vcap_range_init(sparx5, admin, admin->first_valid_addr,
+ admin->last_valid_addr -
+ admin->first_valid_addr);
+}
+
+/* Get the keyset name from the sparx5 VCAP model */
+static const char *sparx5_vcap_keyset_name(struct net_device *ndev,
+ enum vcap_keyfield_set keyset)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ return vcap_keyset_name(port->sparx5->vcap_ctrl, keyset);
+}
+
+/* Check if this is the first lookup of IS0 */
+static bool sparx5_vcap_is0_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= SPARX5_VCAP_CID_IS0_L0 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS0_L1) ||
+ ((rule->vcap_chain_id >= SPARX5_VCAP_CID_IS0_L2 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS0_L3)) ||
+ ((rule->vcap_chain_id >= SPARX5_VCAP_CID_IS0_L4 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS0_L5));
+}
+
+/* Check if this is the first lookup of IS2 */
+static bool sparx5_vcap_is2_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= SPARX5_VCAP_CID_IS2_L0 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L1) ||
+ ((rule->vcap_chain_id >= SPARX5_VCAP_CID_IS2_L2 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L3));
+}
+
+static bool sparx5_vcap_es2_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= SPARX5_VCAP_CID_ES2_L0 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_ES2_L1);
+}
+
+/* Set the narrow range ingress port mask on a rule */
+static void sparx5_vcap_add_ingress_range_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 port_mask;
+ u32 range;
+
+ range = port->portno / BITS_PER_TYPE(u32);
+ /* Port bit set to match-any */
+ port_mask = ~BIT(port->portno % BITS_PER_TYPE(u32));
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_SEL, 0, 0xf);
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_RNG, range, 0xf);
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0, port_mask);
+}
+
+/* Set the wide range ingress port mask on a rule */
+static void sparx5_vcap_add_wide_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_u72_key port_mask;
+ u32 range;
+
+ /* Port bit set to match-any */
+ memset(port_mask.value, 0, sizeof(port_mask.value));
+ memset(port_mask.mask, 0xff, sizeof(port_mask.mask));
+ range = port->portno / BITS_PER_BYTE;
+ port_mask.mask[range] = ~BIT(port->portno % BITS_PER_BYTE);
+ vcap_rule_add_key_u72(rule, VCAP_KF_IF_IGR_PORT_MASK, &port_mask);
+}
+
+static void sparx5_vcap_add_egress_range_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 port_mask;
+ u32 range;
+
+ /* Mask range selects:
+ * 0-2: Physical/Logical egress port number 0-31, 32–63, 64.
+ * 3-5: Virtual Interface Number 0-31, 32-63, 64.
+ * 6: CPU queue Number 0-7.
+ *
+ * Use physical/logical port ranges (0-2)
+ */
+ range = port->portno / BITS_PER_TYPE(u32);
+ /* Port bit set to match-any */
+ port_mask = ~BIT(port->portno % BITS_PER_TYPE(u32));
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_MASK_RNG, range, 0xf);
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_MASK, 0, port_mask);
+}
+
+/* Convert IS0 chain id to vcap lookup id */
+static int sparx5_vcap_is0_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= SPARX5_VCAP_CID_IS0_L1 && cid < SPARX5_VCAP_CID_IS0_L2)
+ lookup = 1;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L2 && cid < SPARX5_VCAP_CID_IS0_L3)
+ lookup = 2;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L3 && cid < SPARX5_VCAP_CID_IS0_L4)
+ lookup = 3;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L4 && cid < SPARX5_VCAP_CID_IS0_L5)
+ lookup = 4;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L5 && cid < SPARX5_VCAP_CID_IS0_MAX)
+ lookup = 5;
+
+ return lookup;
+}
+
+/* Convert IS2 chain id to vcap lookup id */
+static int sparx5_vcap_is2_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= SPARX5_VCAP_CID_IS2_L1 && cid < SPARX5_VCAP_CID_IS2_L2)
+ lookup = 1;
+ else if (cid >= SPARX5_VCAP_CID_IS2_L2 && cid < SPARX5_VCAP_CID_IS2_L3)
+ lookup = 2;
+ else if (cid >= SPARX5_VCAP_CID_IS2_L3 && cid < SPARX5_VCAP_CID_IS2_MAX)
+ lookup = 3;
+
+ return lookup;
+}
+
+/* Convert ES2 chain id to vcap lookup id */
+static int sparx5_vcap_es2_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= SPARX5_VCAP_CID_ES2_L1)
+ lookup = 1;
+
+ return lookup;
+}
+
+/* Add ethernet type IS0 keyset to a list */
+static void
+sparx5_vcap_is0_get_port_etype_keysets(struct vcap_keyset_list *keysetlist,
+ u32 value)
+{
+ switch (ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_GET(value)) {
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL_7TUPLE);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL_5TUPLE_IP4);
+ break;
+ }
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_is0_get_port_keysets(struct net_device *ndev,
+ int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, ANA_CL_ADV_CL_CFG(portno, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL)
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist, value);
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP)
+ switch (ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_GET(value)) {
+ case VCAP_IS0_PS_ETYPE_DEFAULT:
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist,
+ value);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_7TUPLE);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_5TUPLE_IP4);
+ break;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6)
+ switch (ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_GET(value)) {
+ case VCAP_IS0_PS_ETYPE_DEFAULT:
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist,
+ value);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_7TUPLE);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_5TUPLE_IP4);
+ break;
+ }
+
+ if (l3_proto != ETH_P_IP && l3_proto != ETH_P_IPV6)
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist, value);
+ return 0;
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
+ int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_ARP_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_IS2_PS_ARP_ARP:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP) {
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV4_UC_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ case VCAP_IS2_PS_IPV4_UC_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ }
+
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV4_MC_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ case VCAP_IS2_PS_IPV4_MC_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV6_UC_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ }
+
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV6_MC_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP6_VID:
+ /* Not used */
+ break;
+ }
+ }
+
+ if (l3_proto != ETH_P_ARP && l3_proto != ETH_P_IP &&
+ l3_proto != ETH_P_IPV6) {
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_NONETH_MAC_ETYPE:
+ /* IS2 non-classified frames generate MAC_ETYPE */
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ }
+ }
+ return 0;
+}
+
+/* Return the keysets for the vcap port IP4 traffic class configuration */
+static void
+sparx5_vcap_es2_get_port_ipv4_keysets(struct vcap_keyset_list *keysetlist,
+ u32 value)
+{
+ switch (EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV4_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_IPV4_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_VID:
+ /* Not used */
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ }
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_es0_get_port_keysets(struct net_device *ndev,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, REW_RTAG_ETAG_CTRL(portno));
+
+ /* Collect all keysets for the port in a list */
+ switch (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_GET(value)) {
+ case VCAP_ES0_PS_NORMAL_SELECTION:
+ case VCAP_ES0_PS_FORCE_ISDX_LOOKUPS:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ISDX);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_es2_get_port_keysets(struct net_device *ndev,
+ int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
+ switch (EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_ARP_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_ARP_ARP:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP)
+ sparx5_vcap_es2_get_port_ipv4_keysets(keysetlist, value);
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV6_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_VID:
+ /* Not used */
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_ES2_PS_IPV6_IP4_DOWNGRADE:
+ sparx5_vcap_es2_get_port_ipv4_keysets(keysetlist,
+ value);
+ break;
+ }
+ }
+
+ if (l3_proto != ETH_P_ARP && l3_proto != ETH_P_IP &&
+ l3_proto != ETH_P_IPV6) {
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ }
+ return 0;
+}
+
+/* Get the port keyset for the vcap lookup */
+int sparx5_vcap_get_port_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ int cid,
+ u16 l3_proto,
+ struct vcap_keyset_list *kslist)
+{
+ int lookup, err = -EINVAL;
+ struct sparx5_port *port;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ lookup = sparx5_vcap_is0_cid_to_lookup(cid);
+ err = sparx5_vcap_is0_get_port_keysets(ndev, lookup, kslist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = sparx5_vcap_is2_cid_to_lookup(cid);
+ err = sparx5_vcap_is2_get_port_keysets(ndev, lookup, kslist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_ES0:
+ err = sparx5_vcap_es0_get_port_keysets(ndev, kslist, l3_proto);
+ break;
+ case VCAP_TYPE_ES2:
+ lookup = sparx5_vcap_es2_cid_to_lookup(cid);
+ err = sparx5_vcap_es2_get_port_keysets(ndev, lookup, kslist,
+ l3_proto);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
+ return err;
+}
+
+/* Check if the ethertype is supported by the vcap port classification */
+bool sparx5_vcap_is_known_etype(struct vcap_admin *admin, u16 etype)
+{
+ const u16 *known_etypes;
+ int size, idx;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ known_etypes = sparx5_vcap_is0_known_etypes;
+ size = ARRAY_SIZE(sparx5_vcap_is0_known_etypes);
+ break;
+ case VCAP_TYPE_IS2:
+ known_etypes = sparx5_vcap_is2_known_etypes;
+ size = ARRAY_SIZE(sparx5_vcap_is2_known_etypes);
+ break;
+ case VCAP_TYPE_ES0:
+ return true;
+ case VCAP_TYPE_ES2:
+ known_etypes = sparx5_vcap_es2_known_etypes;
+ size = ARRAY_SIZE(sparx5_vcap_es2_known_etypes);
+ break;
+ default:
+ return false;
+ }
+ for (idx = 0; idx < size; ++idx)
+ if (known_etypes[idx] == etype)
+ return true;
+ return false;
+}
+
+/* API callback used for validating a field keyset (check the port keysets) */
+static enum vcap_keyfield_set
+sparx5_vcap_validate_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto)
+{
+ struct vcap_keyset_list keysetlist = {};
+ enum vcap_keyfield_set keysets[10] = {};
+ struct sparx5_port *port;
+ int idx, jdx, lookup;
+
+ if (!kslist || kslist->cnt == 0)
+ return VCAP_KFS_NO_VALUE;
+
+ keysetlist.max = ARRAY_SIZE(keysets);
+ keysetlist.keysets = keysets;
+
+ /* Get a list of currently configured keysets in the lookups */
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ lookup = sparx5_vcap_is0_cid_to_lookup(rule->vcap_chain_id);
+ sparx5_vcap_is0_get_port_keysets(ndev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = sparx5_vcap_is2_cid_to_lookup(rule->vcap_chain_id);
+ sparx5_vcap_is2_get_port_keysets(ndev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_get_port_keysets(ndev, &keysetlist, l3_proto);
+ break;
+ case VCAP_TYPE_ES2:
+ lookup = sparx5_vcap_es2_cid_to_lookup(rule->vcap_chain_id);
+ sparx5_vcap_es2_get_port_keysets(ndev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
+
+ /* Check if there is a match and return the match */
+ for (idx = 0; idx < kslist->cnt; ++idx)
+ for (jdx = 0; jdx < keysetlist.cnt; ++jdx)
+ if (kslist->keysets[idx] == keysets[jdx])
+ return kslist->keysets[idx];
+
+ pr_err("%s:%d: %s not supported in port key selection\n",
+ __func__, __LINE__,
+ sparx5_vcap_keyset_name(ndev, kslist->keysets[0]));
+
+ return -ENOENT;
+}
+
+static void sparx5_vcap_ingress_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ const struct vcap_field *field;
+ bool is_first;
+
+ /* Add ingress port mask matching the net device */
+ field = vcap_lookup_keyfield(rule, VCAP_KF_IF_IGR_PORT_MASK);
+ if (field && field->width == SPX5_PORTS)
+ sparx5_vcap_add_wide_port_mask(rule, ndev);
+ else if (field && field->width == BITS_PER_TYPE(u32))
+ sparx5_vcap_add_ingress_range_port_mask(rule, ndev);
+ else
+ pr_err("%s:%d: %s: could not add an ingress port mask for: %s\n",
+ __func__, __LINE__, netdev_name(ndev),
+ sparx5_vcap_keyset_name(ndev, rule->keyset));
+
+ if (admin->vtype == VCAP_TYPE_IS0)
+ is_first = sparx5_vcap_is0_is_first_chain(rule);
+ else
+ is_first = sparx5_vcap_is2_is_first_chain(rule);
+
+ /* Add key that selects the first/second lookup */
+ if (is_first)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+static void sparx5_vcap_es0_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_NO, port->portno, ~0);
+ /* Match untagged frames if there was no VLAN key */
+ vcap_rule_add_key_u32(rule, VCAP_KF_8021Q_TPID, SPX5_TPID_SEL_UNTAGGED,
+ ~0);
+}
+
+static void sparx5_vcap_es2_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ const struct vcap_field *field;
+ bool is_first;
+
+ /* Add egress port mask matching the net device */
+ field = vcap_lookup_keyfield(rule, VCAP_KF_IF_EGR_PORT_MASK);
+ if (field)
+ sparx5_vcap_add_egress_range_port_mask(rule, ndev);
+
+ /* Add key that selects the first/second lookup */
+ is_first = sparx5_vcap_es2_is_first_chain(rule);
+
+ if (is_first)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+/* API callback used for adding default fields to a rule */
+static void sparx5_vcap_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct sparx5_port *port;
+
+ /* add the lookup bit */
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_ingress_add_default_fields(ndev, admin, rule);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_add_default_fields(ndev, admin, rule);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_add_default_fields(ndev, admin, rule);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
+}
+
+/* API callback used for erasing the vcap cache area (not the register area) */
+static void sparx5_vcap_cache_erase(struct vcap_admin *admin)
+{
+ memset(admin->cache.keystream, 0, STREAMSIZE);
+ memset(admin->cache.maskstream, 0, STREAMSIZE);
+ memset(admin->cache.actionstream, 0, STREAMSIZE);
+ memset(&admin->cache.counter, 0, sizeof(admin->cache.counter));
+}
+
+static void sparx5_vcap_is0_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+
+ if (sel & VCAP_SEL_COUNTER)
+ spx5_wr(admin->cache.counter, sparx5,
+ VCAP_SUPER_VCAP_CNT_DAT(0));
+}
+
+static void sparx5_vcap_is2_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0xfff; /* counter limit */
+ if (admin->vinst == 0)
+ spx5_wr(admin->cache.counter, sparx5,
+ ANA_ACL_CNT_A(start));
+ else
+ spx5_wr(admin->cache.counter, sparx5,
+ ANA_ACL_CNT_B(start));
+ spx5_wr(admin->cache.sticky, sparx5,
+ VCAP_SUPER_VCAP_CNT_DAT(0));
+ }
+}
+
+/* Use ESDX counters located in the XQS */
+static void sparx5_es0_write_esdx_counter(struct sparx5 *sparx5,
+ struct vcap_admin *admin, u32 id)
+{
+ mutex_lock(&sparx5->queue_stats_lock);
+ spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(id), sparx5, XQS_STAT_CFG);
+ spx5_wr(admin->cache.counter, sparx5,
+ XQS_CNT(SPARX5_STAT_ESDX_GRN_PKTS));
+ spx5_wr(0, sparx5, XQS_CNT(SPARX5_STAT_ESDX_YEL_PKTS));
+ mutex_unlock(&sparx5->queue_stats_lock);
+}
+
+static void sparx5_vcap_es0_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_ES0_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_ES0_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_ES0_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+ if (sel & VCAP_SEL_COUNTER) {
+ spx5_wr(admin->cache.counter, sparx5, VCAP_ES0_VCAP_CNT_DAT(0));
+ sparx5_es0_write_esdx_counter(sparx5, admin, start);
+ }
+}
+
+static void sparx5_vcap_es2_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_ES2_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_ES2_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_ES2_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0x7ff; /* counter limit */
+ spx5_wr(admin->cache.counter, sparx5, EACL_ES2_CNT(start));
+ spx5_wr(admin->cache.sticky, sparx5, VCAP_ES2_VCAP_CNT_DAT(0));
+ }
+}
+
+/* API callback used for writing to the VCAP cache */
+static void sparx5_vcap_cache_write(struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_cache_write(sparx5, admin, sel, start, count);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+static void sparx5_vcap_is0_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] = ~spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ admin->cache.counter =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ }
+}
+
+static void sparx5_vcap_is2_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] = ~spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0xfff; /* counter limit */
+ if (admin->vinst == 0)
+ admin->cache.counter =
+ spx5_rd(sparx5, ANA_ACL_CNT_A(start));
+ else
+ admin->cache.counter =
+ spx5_rd(sparx5, ANA_ACL_CNT_B(start));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ }
+}
+
+/* Use ESDX counters located in the XQS */
+static void sparx5_es0_read_esdx_counter(struct sparx5 *sparx5,
+ struct vcap_admin *admin, u32 id)
+{
+ u32 counter;
+
+ mutex_lock(&sparx5->queue_stats_lock);
+ spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(id), sparx5, XQS_STAT_CFG);
+ counter = spx5_rd(sparx5, XQS_CNT(SPARX5_STAT_ESDX_GRN_PKTS)) +
+ spx5_rd(sparx5, XQS_CNT(SPARX5_STAT_ESDX_YEL_PKTS));
+ mutex_unlock(&sparx5->queue_stats_lock);
+ if (counter)
+ admin->cache.counter = counter;
+}
+
+static void sparx5_vcap_es0_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] =
+ spx5_rd(sparx5, VCAP_ES0_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] =
+ ~spx5_rd(sparx5, VCAP_ES0_VCAP_MASK_DAT(idx));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] =
+ spx5_rd(sparx5, VCAP_ES0_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ admin->cache.counter =
+ spx5_rd(sparx5, VCAP_ES0_VCAP_CNT_DAT(0));
+ admin->cache.sticky = admin->cache.counter;
+ sparx5_es0_read_esdx_counter(sparx5, admin, start);
+ }
+}
+
+static void sparx5_vcap_es2_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] =
+ ~spx5_rd(sparx5, VCAP_ES2_VCAP_MASK_DAT(idx));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0x7ff; /* counter limit */
+ admin->cache.counter =
+ spx5_rd(sparx5, EACL_ES2_CNT(start));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_CNT_DAT(0));
+ }
+}
+
+/* API callback used for reading from the VCAP into the VCAP cache */
+static void sparx5_vcap_cache_read(struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_cache_read(sparx5, admin, sel, start, count);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+/* API callback used for initializing a VCAP address range */
+static void sparx5_vcap_range_init(struct net_device *ndev,
+ struct vcap_admin *admin, u32 addr,
+ u32 count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ _sparx5_vcap_range_init(sparx5, admin, addr, count);
+}
+
+static void sparx5_vcap_super_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
+ VCAP_SUPER_CFG_MV_SIZE_SET(0), sparx5, VCAP_SUPER_CFG);
+ spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_SUPER_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_SUPER_CTRL);
+ sparx5_vcap_wait_super_update(sparx5);
+}
+
+static void sparx5_vcap_es0_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ spx5_wr(VCAP_ES0_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES0_CFG_MV_SIZE_SET(0), sparx5, VCAP_ES0_CFG);
+ spx5_wr(VCAP_ES0_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_ES0_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES0_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_ES0_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES0_CTRL);
+ sparx5_vcap_wait_es0_update(sparx5);
+}
+
+static void sparx5_vcap_es2_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES2_CFG_MV_SIZE_SET(0), sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+}
+
+/* API callback used for updating the VCAP cache */
+static void sparx5_vcap_update(struct net_device *ndev,
+ struct vcap_admin *admin, enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_super_update(sparx5, cmd, sel, addr);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_update(sparx5, cmd, sel, addr);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_update(sparx5, cmd, sel, addr);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+static void sparx5_vcap_super_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
+ spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_SUPER_CFG_MV_SIZE_SET(mv_size),
+ sparx5, VCAP_SUPER_CFG);
+ spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_SUPER_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_SUPER_CTRL);
+ sparx5_vcap_wait_super_update(sparx5);
+}
+
+static void sparx5_vcap_es0_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
+ spx5_wr(VCAP_ES0_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_ES0_CFG_MV_SIZE_SET(mv_size),
+ sparx5, VCAP_ES0_CFG);
+ spx5_wr(VCAP_ES0_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES0_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_ES0_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES0_CTRL);
+ sparx5_vcap_wait_es0_update(sparx5);
+}
+
+static void sparx5_vcap_es2_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_ES2_CFG_MV_SIZE_SET(mv_size),
+ sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+}
+
+/* API callback used for moving a block of rules in the VCAP */
+static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
+ u32 addr, int offset, int count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ enum vcap_command cmd;
+ u16 mv_num_pos;
+ u16 mv_size;
+
+ mv_size = count - 1;
+ if (offset > 0) {
+ mv_num_pos = offset - 1;
+ cmd = VCAP_CMD_MOVE_DOWN;
+ } else {
+ mv_num_pos = -offset - 1;
+ cmd = VCAP_CMD_MOVE_UP;
+ }
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_super_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+static struct vcap_operations sparx5_vcap_ops = {
+ .validate_keyset = sparx5_vcap_validate_keyset,
+ .add_default_fields = sparx5_vcap_add_default_fields,
+ .cache_erase = sparx5_vcap_cache_erase,
+ .cache_write = sparx5_vcap_cache_write,
+ .cache_read = sparx5_vcap_cache_read,
+ .init = sparx5_vcap_range_init,
+ .update = sparx5_vcap_update,
+ .move = sparx5_vcap_move,
+ .port_info = sparx5_port_info,
+};
+
+static u32 sparx5_vcap_is0_keyset_to_etype_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_NORMAL_7TUPLE:
+ return VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE;
+ case VCAP_KFS_NORMAL_5TUPLE_IP4:
+ return VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4;
+ default:
+ return VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE;
+ }
+}
+
+static void sparx5_vcap_is0_set_port_keyset(struct net_device *ndev, int lookup,
+ enum vcap_keyfield_set keyset,
+ int l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ switch (l3_proto) {
+ case ETH_P_IP:
+ value = sparx5_vcap_is0_keyset_to_etype_ps(keyset);
+ spx5_rmw(ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(value),
+ ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ break;
+ case ETH_P_IPV6:
+ value = sparx5_vcap_is0_keyset_to_etype_ps(keyset);
+ spx5_rmw(ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_SET(value),
+ ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ break;
+ default:
+ value = sparx5_vcap_is0_keyset_to_etype_ps(keyset);
+ spx5_rmw(ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(value),
+ ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ break;
+ }
+}
+
+static u32 sparx5_vcap_is2_keyset_to_arp_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_ARP:
+ return VCAP_IS2_PS_ARP_ARP;
+ default:
+ return VCAP_IS2_PS_ARP_MAC_ETYPE;
+ }
+}
+
+static u32 sparx5_vcap_is2_keyset_to_ipv4_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_MAC_ETYPE:
+ return VCAP_IS2_PS_IPV4_UC_MAC_ETYPE;
+ case VCAP_KFS_IP4_OTHER:
+ case VCAP_KFS_IP4_TCP_UDP:
+ return VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER;
+ case VCAP_KFS_IP_7TUPLE:
+ return VCAP_IS2_PS_IPV4_UC_IP_7TUPLE;
+ default:
+ return VCAP_KFS_NO_VALUE;
+ }
+}
+
+static u32 sparx5_vcap_is2_keyset_to_ipv6_uc_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_MAC_ETYPE:
+ return VCAP_IS2_PS_IPV6_UC_MAC_ETYPE;
+ case VCAP_KFS_IP4_OTHER:
+ case VCAP_KFS_IP4_TCP_UDP:
+ return VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER;
+ case VCAP_KFS_IP_7TUPLE:
+ return VCAP_IS2_PS_IPV6_UC_IP_7TUPLE;
+ default:
+ return VCAP_KFS_NO_VALUE;
+ }
+}
+
+static u32 sparx5_vcap_is2_keyset_to_ipv6_mc_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_MAC_ETYPE:
+ return VCAP_IS2_PS_IPV6_MC_MAC_ETYPE;
+ case VCAP_KFS_IP4_OTHER:
+ case VCAP_KFS_IP4_TCP_UDP:
+ return VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER;
+ case VCAP_KFS_IP_7TUPLE:
+ return VCAP_IS2_PS_IPV6_MC_IP_7TUPLE;
+ default:
+ return VCAP_KFS_NO_VALUE;
+ }
+}
+
+static void sparx5_vcap_is2_set_port_keyset(struct net_device *ndev, int lookup,
+ enum vcap_keyfield_set keyset,
+ int l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ switch (l3_proto) {
+ case ETH_P_ARP:
+ value = sparx5_vcap_is2_keyset_to_arp_ps(keyset);
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ break;
+ case ETH_P_IP:
+ value = sparx5_vcap_is2_keyset_to_ipv4_ps(keyset);
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ break;
+ case ETH_P_IPV6:
+ value = sparx5_vcap_is2_keyset_to_ipv6_uc_ps(keyset);
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ value = sparx5_vcap_is2_keyset_to_ipv6_mc_ps(keyset);
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ break;
+ default:
+ value = VCAP_IS2_PS_NONETH_MAC_ETYPE;
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ break;
+ }
+}
+
+static u32 sparx5_vcap_es2_keyset_to_arp_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_ARP:
+ return VCAP_ES2_PS_ARP_ARP;
+ default:
+ return VCAP_ES2_PS_ARP_MAC_ETYPE;
+ }
+}
+
+static u32 sparx5_vcap_es2_keyset_to_ipv4_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_MAC_ETYPE:
+ return VCAP_ES2_PS_IPV4_MAC_ETYPE;
+ case VCAP_KFS_IP_7TUPLE:
+ return VCAP_ES2_PS_IPV4_IP_7TUPLE;
+ case VCAP_KFS_IP4_TCP_UDP:
+ return VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER;
+ case VCAP_KFS_IP4_OTHER:
+ return VCAP_ES2_PS_IPV4_IP4_OTHER;
+ default:
+ return VCAP_ES2_PS_IPV4_MAC_ETYPE;
+ }
+}
+
+static u32 sparx5_vcap_es2_keyset_to_ipv6_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_MAC_ETYPE:
+ return VCAP_ES2_PS_IPV6_MAC_ETYPE;
+ case VCAP_KFS_IP4_TCP_UDP:
+ case VCAP_KFS_IP4_OTHER:
+ return VCAP_ES2_PS_IPV6_IP4_DOWNGRADE;
+ case VCAP_KFS_IP_7TUPLE:
+ return VCAP_ES2_PS_IPV6_IP_7TUPLE;
+ case VCAP_KFS_IP6_STD:
+ return VCAP_ES2_PS_IPV6_IP6_STD;
+ default:
+ return VCAP_ES2_PS_IPV6_MAC_ETYPE;
+ }
+}
+
+static void sparx5_vcap_es2_set_port_keyset(struct net_device *ndev, int lookup,
+ enum vcap_keyfield_set keyset,
+ int l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ switch (l3_proto) {
+ case ETH_P_IP:
+ value = sparx5_vcap_es2_keyset_to_ipv4_ps(keyset);
+ spx5_rmw(EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(value),
+ EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL,
+ sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+ break;
+ case ETH_P_IPV6:
+ value = sparx5_vcap_es2_keyset_to_ipv6_ps(keyset);
+ spx5_rmw(EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(value),
+ EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL,
+ sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+ break;
+ case ETH_P_ARP:
+ value = sparx5_vcap_es2_keyset_to_arp_ps(keyset);
+ spx5_rmw(EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(value),
+ EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL,
+ sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+ break;
+ }
+}
+
+/* Change the port keyset for the lookup and protocol */
+void sparx5_vcap_set_port_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ int cid,
+ u16 l3_proto,
+ enum vcap_keyfield_set keyset,
+ struct vcap_keyset_list *orig)
+{
+ struct sparx5_port *port;
+ int lookup;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ lookup = sparx5_vcap_is0_cid_to_lookup(cid);
+ if (orig)
+ sparx5_vcap_is0_get_port_keysets(ndev, lookup, orig,
+ l3_proto);
+ sparx5_vcap_is0_set_port_keyset(ndev, lookup, keyset, l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = sparx5_vcap_is2_cid_to_lookup(cid);
+ if (orig)
+ sparx5_vcap_is2_get_port_keysets(ndev, lookup, orig,
+ l3_proto);
+ sparx5_vcap_is2_set_port_keyset(ndev, lookup, keyset, l3_proto);
+ break;
+ case VCAP_TYPE_ES0:
+ break;
+ case VCAP_TYPE_ES2:
+ lookup = sparx5_vcap_es2_cid_to_lookup(cid);
+ if (orig)
+ sparx5_vcap_es2_get_port_keysets(ndev, lookup, orig,
+ l3_proto);
+ sparx5_vcap_es2_set_port_keyset(ndev, lookup, keyset, l3_proto);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
+}
+
+/* Enable IS0 lookups per port and set the keyset generation */
+static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+ u32 keysel;
+
+ keysel = VCAP_IS0_KEYSEL(false,
+ VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE,
+ VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4,
+ VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE,
+ VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MLBS_FOLLOW_ETYPE);
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ spx5_wr(keysel, sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
+ ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ }
+ }
+}
+
+/* Enable IS2 lookups per port and set the keyset generation */
+static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+ u32 keysel;
+
+ keysel = VCAP_IS2_KEYSEL(true, VCAP_IS2_PS_NONETH_MAC_ETYPE,
+ VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER,
+ VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER,
+ VCAP_IS2_PS_IPV6_MC_IP_7TUPLE,
+ VCAP_IS2_PS_IPV6_UC_IP_7TUPLE,
+ VCAP_IS2_PS_ARP_ARP);
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ spx5_wr(keysel, sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ }
+ }
+ /* IS2 lookups are in bit 0:3 */
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf),
+ ANA_ACL_VCAP_S2_CFG_SEC_ENA,
+ sparx5,
+ ANA_ACL_VCAP_S2_CFG(portno));
+}
+
+/* Enable ES0 lookups per port and set the keyset generation */
+static void sparx5_vcap_es0_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno;
+ u32 keysel;
+
+ keysel = VCAP_ES0_KEYSEL(VCAP_ES0_PS_FORCE_ISDX_LOOKUPS);
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(keysel, REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA,
+ sparx5, REW_RTAG_ETAG_CTRL(portno));
+
+ spx5_rmw(REW_ES0_CTRL_ES0_LU_ENA_SET(1), REW_ES0_CTRL_ES0_LU_ENA,
+ sparx5, REW_ES0_CTRL);
+}
+
+/* Enable ES2 lookups per port and set the keyset generation */
+static void sparx5_vcap_es2_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+ u32 keysel;
+
+ keysel = VCAP_ES2_KEYSEL(true, VCAP_ES2_PS_ARP_MAC_ETYPE,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE);
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_wr(keysel, sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+}
+
+/* Enable lookups per port and set the keyset generation */
+static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_port_key_selection(sparx5, admin);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_port_key_selection(sparx5, admin);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_port_key_selection(sparx5, admin);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_port_key_selection(sparx5, admin);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+/* Disable lookups per port */
+static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(0),
+ ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ break;
+ case VCAP_TYPE_IS2:
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0),
+ ANA_ACL_VCAP_S2_CFG_SEC_ENA,
+ sparx5,
+ ANA_ACL_VCAP_S2_CFG(portno));
+ break;
+ case VCAP_TYPE_ES0:
+ spx5_rmw(REW_ES0_CTRL_ES0_LU_ENA_SET(0),
+ REW_ES0_CTRL_ES0_LU_ENA, sparx5, REW_ES0_CTRL);
+ break;
+ case VCAP_TYPE_ES2:
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(0),
+ EACL_VCAP_ES2_KEY_SEL_KEY_ENA,
+ sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+static void sparx5_vcap_admin_free(struct vcap_admin *admin)
+{
+ if (!admin)
+ return;
+ mutex_destroy(&admin->lock);
+ kfree(admin->cache.keystream);
+ kfree(admin->cache.maskstream);
+ kfree(admin->cache.actionstream);
+ kfree(admin);
+}
+
+/* Allocate a vcap instance with a rule list and a cache area */
+static struct vcap_admin *
+sparx5_vcap_admin_alloc(struct sparx5 *sparx5, struct vcap_control *ctrl,
+ const struct sparx5_vcap_inst *cfg)
+{
+ struct vcap_admin *admin;
+
+ admin = kzalloc(sizeof(*admin), GFP_KERNEL);
+ if (!admin)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&admin->list);
+ INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+ mutex_init(&admin->lock);
+ admin->vtype = cfg->vtype;
+ admin->vinst = cfg->vinst;
+ admin->ingress = cfg->ingress;
+ admin->lookups = cfg->lookups;
+ admin->lookups_per_instance = cfg->lookups_per_instance;
+ admin->first_cid = cfg->first_cid;
+ admin->last_cid = cfg->last_cid;
+ admin->cache.keystream =
+ kzalloc(STREAMSIZE, GFP_KERNEL);
+ admin->cache.maskstream =
+ kzalloc(STREAMSIZE, GFP_KERNEL);
+ admin->cache.actionstream =
+ kzalloc(STREAMSIZE, GFP_KERNEL);
+ if (!admin->cache.keystream || !admin->cache.maskstream ||
+ !admin->cache.actionstream) {
+ sparx5_vcap_admin_free(admin);
+ return ERR_PTR(-ENOMEM);
+ }
+ return admin;
+}
+
+/* Do block allocations and provide addresses for VCAP instances */
+static void sparx5_vcap_block_alloc(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ const struct sparx5_vcap_inst *cfg)
+{
+ int idx, cores;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ /* Super VCAP block mapping and address configuration. Block 0
+ * is assigned addresses 0 through 3071, block 1 is assigned
+ * addresses 3072 though 6143, and so on.
+ */
+ for (idx = cfg->blockno; idx < cfg->blockno + cfg->blocks;
+ ++idx) {
+ spx5_wr(VCAP_SUPER_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_SUPER_IDX);
+ spx5_wr(VCAP_SUPER_MAP_CORE_MAP_SET(cfg->map_id),
+ sparx5, VCAP_SUPER_MAP);
+ }
+ admin->first_valid_addr = cfg->blockno * SUPER_VCAP_BLK_SIZE;
+ admin->last_used_addr = admin->first_valid_addr +
+ cfg->blocks * SUPER_VCAP_BLK_SIZE;
+ admin->last_valid_addr = admin->last_used_addr - 1;
+ break;
+ case VCAP_TYPE_ES0:
+ admin->first_valid_addr = 0;
+ admin->last_used_addr = cfg->count;
+ admin->last_valid_addr = cfg->count - 1;
+ cores = spx5_rd(sparx5, VCAP_ES0_CORE_CNT);
+ for (idx = 0; idx < cores; ++idx) {
+ spx5_wr(VCAP_ES0_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_ES0_IDX);
+ spx5_wr(VCAP_ES0_MAP_CORE_MAP_SET(1), sparx5,
+ VCAP_ES0_MAP);
+ }
+ break;
+ case VCAP_TYPE_ES2:
+ admin->first_valid_addr = 0;
+ admin->last_used_addr = cfg->count;
+ admin->last_valid_addr = cfg->count - 1;
+ cores = spx5_rd(sparx5, VCAP_ES2_CORE_CNT);
+ for (idx = 0; idx < cores; ++idx) {
+ spx5_wr(VCAP_ES2_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_ES2_IDX);
+ spx5_wr(VCAP_ES2_MAP_CORE_MAP_SET(1), sparx5,
+ VCAP_ES2_MAP);
+ }
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+/* Allocate a vcap control and vcap instances and configure the system */
+int sparx5_vcap_init(struct sparx5 *sparx5)
+{
+ const struct sparx5_vcap_inst *cfg;
+ struct vcap_control *ctrl;
+ struct vcap_admin *admin;
+ struct dentry *dir;
+ int err = 0, idx;
+
+ /* Create a VCAP control instance that owns the platform specific VCAP
+ * model with VCAP instances and information about keysets, keys,
+ * actionsets and actions
+ * - Create administrative state for each available VCAP
+ * - Lists of rules
+ * - Address information
+ * - Initialize VCAP blocks
+ * - Configure port keysets
+ */
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ sparx5->vcap_ctrl = ctrl;
+ /* select the sparx5 VCAP model */
+ ctrl->vcaps = sparx5_vcaps;
+ ctrl->stats = &sparx5_vcap_stats;
+ /* Setup callbacks to allow the API to use the VCAP HW */
+ ctrl->ops = &sparx5_vcap_ops;
+
+ INIT_LIST_HEAD(&ctrl->list);
+ for (idx = 0; idx < ARRAY_SIZE(sparx5_vcap_inst_cfg); ++idx) {
+ cfg = &sparx5_vcap_inst_cfg[idx];
+ admin = sparx5_vcap_admin_alloc(sparx5, ctrl, cfg);
+ if (IS_ERR(admin)) {
+ err = PTR_ERR(admin);
+ pr_err("%s:%d: vcap allocation failed: %d\n",
+ __func__, __LINE__, err);
+ return err;
+ }
+ sparx5_vcap_block_alloc(sparx5, admin, cfg);
+ sparx5_vcap_block_init(sparx5, admin);
+ if (cfg->vinst == 0)
+ sparx5_vcap_port_key_selection(sparx5, admin);
+ list_add_tail(&admin->list, &ctrl->list);
+ }
+ dir = vcap_debugfs(sparx5->dev, sparx5->debugfs_root, ctrl);
+ for (idx = 0; idx < SPX5_PORTS; ++idx)
+ if (sparx5->ports[idx])
+ vcap_port_debugfs(sparx5->dev, dir, ctrl,
+ sparx5->ports[idx]->ndev);
+
+ return err;
+}
+
+void sparx5_vcap_destroy(struct sparx5 *sparx5)
+{
+ struct vcap_control *ctrl = sparx5->vcap_ctrl;
+ struct vcap_admin *admin, *admin_next;
+
+ if (!ctrl)
+ return;
+
+ list_for_each_entry_safe(admin, admin_next, &ctrl->list, list) {
+ sparx5_vcap_port_key_deselection(sparx5, admin);
+ vcap_del_rules(ctrl, admin);
+ list_del(&admin->list);
+ sparx5_vcap_admin_free(admin);
+ }
+ kfree(ctrl);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
new file mode 100644
index 0000000000..2684d9199b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver VCAP implementation
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+
+#ifndef __SPARX5_VCAP_IMPL_H__
+#define __SPARX5_VCAP_IMPL_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+#define SPARX5_VCAP_CID_IS0_L0 VCAP_CID_INGRESS_L0 /* IS0/CLM lookup 0 */
+#define SPARX5_VCAP_CID_IS0_L1 VCAP_CID_INGRESS_L1 /* IS0/CLM lookup 1 */
+#define SPARX5_VCAP_CID_IS0_L2 VCAP_CID_INGRESS_L2 /* IS0/CLM lookup 2 */
+#define SPARX5_VCAP_CID_IS0_L3 VCAP_CID_INGRESS_L3 /* IS0/CLM lookup 3 */
+#define SPARX5_VCAP_CID_IS0_L4 VCAP_CID_INGRESS_L4 /* IS0/CLM lookup 4 */
+#define SPARX5_VCAP_CID_IS0_L5 VCAP_CID_INGRESS_L5 /* IS0/CLM lookup 5 */
+#define SPARX5_VCAP_CID_IS0_MAX \
+ (VCAP_CID_INGRESS_L5 + VCAP_CID_LOOKUP_SIZE - 1) /* IS0/CLM Max */
+
+#define SPARX5_VCAP_CID_IS2_L0 VCAP_CID_INGRESS_STAGE2_L0 /* IS2 lookup 0 */
+#define SPARX5_VCAP_CID_IS2_L1 VCAP_CID_INGRESS_STAGE2_L1 /* IS2 lookup 1 */
+#define SPARX5_VCAP_CID_IS2_L2 VCAP_CID_INGRESS_STAGE2_L2 /* IS2 lookup 2 */
+#define SPARX5_VCAP_CID_IS2_L3 VCAP_CID_INGRESS_STAGE2_L3 /* IS2 lookup 3 */
+#define SPARX5_VCAP_CID_IS2_MAX \
+ (VCAP_CID_INGRESS_STAGE2_L3 + VCAP_CID_LOOKUP_SIZE - 1) /* IS2 Max */
+
+#define SPARX5_VCAP_CID_ES0_L0 VCAP_CID_EGRESS_L0 /* ES0 lookup 0 */
+#define SPARX5_VCAP_CID_ES0_MAX (VCAP_CID_EGRESS_L1 - 1) /* ES0 Max */
+
+#define SPARX5_VCAP_CID_ES2_L0 VCAP_CID_EGRESS_STAGE2_L0 /* ES2 lookup 0 */
+#define SPARX5_VCAP_CID_ES2_L1 VCAP_CID_EGRESS_STAGE2_L1 /* ES2 lookup 1 */
+#define SPARX5_VCAP_CID_ES2_MAX \
+ (VCAP_CID_EGRESS_STAGE2_L1 + VCAP_CID_LOOKUP_SIZE - 1) /* ES2 Max */
+
+/* IS0 port keyset selection control */
+
+/* IS0 ethernet, IPv4, IPv6 traffic type keyset generation */
+enum vcap_is0_port_sel_etype {
+ VCAP_IS0_PS_ETYPE_DEFAULT, /* None or follow depending on class */
+ VCAP_IS0_PS_ETYPE_MLL,
+ VCAP_IS0_PS_ETYPE_SGL_MLBS,
+ VCAP_IS0_PS_ETYPE_DBL_MLBS,
+ VCAP_IS0_PS_ETYPE_TRI_MLBS,
+ VCAP_IS0_PS_ETYPE_TRI_VID,
+ VCAP_IS0_PS_ETYPE_LL_FULL,
+ VCAP_IS0_PS_ETYPE_NORMAL_SRC,
+ VCAP_IS0_PS_ETYPE_NORMAL_DST,
+ VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE,
+ VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4,
+ VCAP_IS0_PS_ETYPE_PURE_5TUPLE_IP4,
+ VCAP_IS0_PS_ETYPE_DBL_VID_IDX,
+ VCAP_IS0_PS_ETYPE_ETAG,
+ VCAP_IS0_PS_ETYPE_NO_LOOKUP,
+};
+
+/* IS0 MPLS traffic type keyset generation */
+enum vcap_is0_port_sel_mpls_uc_mc {
+ VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MPLS_MLL,
+ VCAP_IS0_PS_MPLS_SGL_MLBS,
+ VCAP_IS0_PS_MPLS_DBL_MLBS,
+ VCAP_IS0_PS_MPLS_TRI_MLBS,
+ VCAP_IS0_PS_MPLS_TRI_VID,
+ VCAP_IS0_PS_MPLS_LL_FULL,
+ VCAP_IS0_PS_MPLS_NORMAL_SRC,
+ VCAP_IS0_PS_MPLS_NORMAL_DST,
+ VCAP_IS0_PS_MPLS_NORMAL_7TUPLE,
+ VCAP_IS0_PS_MPLS_NORMAL_5TUPLE_IP4,
+ VCAP_IS0_PS_MPLS_PURE_5TUPLE_IP4,
+ VCAP_IS0_PS_MPLS_DBL_VID_IDX,
+ VCAP_IS0_PS_MPLS_ETAG,
+ VCAP_IS0_PS_MPLS_NO_LOOKUP,
+};
+
+/* IS0 MBLS traffic type keyset generation */
+enum vcap_is0_port_sel_mlbs {
+ VCAP_IS0_PS_MLBS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MLBS_SGL_MLBS,
+ VCAP_IS0_PS_MLBS_DBL_MLBS,
+ VCAP_IS0_PS_MLBS_TRI_MLBS,
+ VCAP_IS0_PS_MLBS_NO_LOOKUP = 17,
+};
+
+/* IS2 port keyset selection control */
+
+/* IS2 non-ethernet traffic type keyset generation */
+enum vcap_is2_port_sel_noneth {
+ VCAP_IS2_PS_NONETH_MAC_ETYPE,
+ VCAP_IS2_PS_NONETH_CUSTOM_1,
+ VCAP_IS2_PS_NONETH_CUSTOM_2,
+ VCAP_IS2_PS_NONETH_NO_LOOKUP
+};
+
+/* IS2 IPv4 unicast traffic type keyset generation */
+enum vcap_is2_port_sel_ipv4_uc {
+ VCAP_IS2_PS_IPV4_UC_MAC_ETYPE,
+ VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER,
+ VCAP_IS2_PS_IPV4_UC_IP_7TUPLE,
+};
+
+/* IS2 IPv4 multicast traffic type keyset generation */
+enum vcap_is2_port_sel_ipv4_mc {
+ VCAP_IS2_PS_IPV4_MC_MAC_ETYPE,
+ VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER,
+ VCAP_IS2_PS_IPV4_MC_IP_7TUPLE,
+ VCAP_IS2_PS_IPV4_MC_IP4_VID,
+};
+
+/* IS2 IPv6 unicast traffic type keyset generation */
+enum vcap_is2_port_sel_ipv6_uc {
+ VCAP_IS2_PS_IPV6_UC_MAC_ETYPE,
+ VCAP_IS2_PS_IPV6_UC_IP_7TUPLE,
+ VCAP_IS2_PS_IPV6_UC_IP6_STD,
+ VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER,
+};
+
+/* IS2 IPv6 multicast traffic type keyset generation */
+enum vcap_is2_port_sel_ipv6_mc {
+ VCAP_IS2_PS_IPV6_MC_MAC_ETYPE,
+ VCAP_IS2_PS_IPV6_MC_IP_7TUPLE,
+ VCAP_IS2_PS_IPV6_MC_IP6_VID,
+ VCAP_IS2_PS_IPV6_MC_IP6_STD,
+ VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER,
+};
+
+/* IS2 ARP traffic type keyset generation */
+enum vcap_is2_port_sel_arp {
+ VCAP_IS2_PS_ARP_MAC_ETYPE,
+ VCAP_IS2_PS_ARP_ARP,
+};
+
+/* ES0 port keyset selection control */
+
+/* ES0 Egress port traffic type classification */
+enum vcap_es0_port_sel {
+ VCAP_ES0_PS_NORMAL_SELECTION,
+ VCAP_ES0_PS_FORCE_ISDX_LOOKUPS,
+ VCAP_ES0_PS_FORCE_VID_LOOKUPS,
+ VCAP_ES0_PS_RESERVED,
+};
+
+/* ES2 port keyset selection control */
+
+/* ES2 IPv4 traffic type keyset generation */
+enum vcap_es2_port_sel_ipv4 {
+ VCAP_ES2_PS_IPV4_MAC_ETYPE,
+ VCAP_ES2_PS_IPV4_IP_7TUPLE,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
+ VCAP_ES2_PS_IPV4_IP4_VID,
+ VCAP_ES2_PS_IPV4_IP4_OTHER,
+};
+
+/* ES2 IPv6 traffic type keyset generation */
+enum vcap_es2_port_sel_ipv6 {
+ VCAP_ES2_PS_IPV6_MAC_ETYPE,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE_VID,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE_STD,
+ VCAP_ES2_PS_IPV6_IP6_VID,
+ VCAP_ES2_PS_IPV6_IP6_STD,
+ VCAP_ES2_PS_IPV6_IP4_DOWNGRADE,
+};
+
+/* ES2 ARP traffic type keyset generation */
+enum vcap_es2_port_sel_arp {
+ VCAP_ES2_PS_ARP_MAC_ETYPE,
+ VCAP_ES2_PS_ARP_ARP,
+};
+
+/* Selects TPID for ES0 matching */
+enum SPX5_TPID_SEL {
+ SPX5_TPID_SEL_UNTAGGED,
+ SPX5_TPID_SEL_8100,
+ SPX5_TPID_SEL_UNUSED_0,
+ SPX5_TPID_SEL_UNUSED_1,
+ SPX5_TPID_SEL_88A8,
+ SPX5_TPID_SEL_TPIDCFG_1,
+ SPX5_TPID_SEL_TPIDCFG_2,
+ SPX5_TPID_SEL_TPIDCFG_3,
+};
+
+/* Get the port keyset for the vcap lookup */
+int sparx5_vcap_get_port_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ int cid,
+ u16 l3_proto,
+ struct vcap_keyset_list *kslist);
+
+/* Change the port keyset for the lookup and protocol */
+void sparx5_vcap_set_port_keyset(struct net_device *ndev,
+ struct vcap_admin *admin, int cid,
+ u16 l3_proto, enum vcap_keyfield_set keyset,
+ struct vcap_keyset_list *orig);
+
+/* Check if the ethertype is supported by the vcap port classification */
+bool sparx5_vcap_is_known_etype(struct vcap_admin *admin, u16 etype);
+
+#endif /* __SPARX5_VCAP_IMPL_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
new file mode 100644
index 0000000000..ac001ae59a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
+{
+ u32 mask[3];
+
+ /* Divide up mask in 32 bit words */
+ bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS);
+
+ /* Output mask to respective registers */
+ spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
+ spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
+ spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
+
+ return 0;
+}
+
+void sparx5_vlan_init(struct sparx5 *sparx5)
+{
+ u16 vid;
+
+ spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1),
+ ANA_L3_VLAN_CTRL_VLAN_ENA,
+ sparx5,
+ ANA_L3_VLAN_CTRL);
+
+ /* Map VLAN = FID */
+ for (vid = NULL_VID; vid < VLAN_N_VID; vid++)
+ spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid),
+ ANA_L3_VLAN_CFG_VLAN_FID,
+ sparx5,
+ ANA_L3_VLAN_CFG(vid));
+}
+
+void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno)
+{
+ struct sparx5_port *port = sparx5->ports[portno];
+
+ /* Configure PVID */
+ spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) |
+ ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid),
+ ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA |
+ ANA_CL_VLAN_CTRL_PORT_VID,
+ sparx5,
+ ANA_CL_VLAN_CTRL(port->portno));
+}
+
+int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ int ret;
+
+ /* Untagged egress vlan classification */
+ if (untagged && port->vid != vid) {
+ if (port->vid) {
+ netdev_err(port->ndev,
+ "Port already has a native VLAN: %d\n",
+ port->vid);
+ return -EBUSY;
+ }
+ port->vid = vid;
+ }
+
+ /* Make the port a member of the VLAN */
+ set_bit(port->portno, sparx5->vlan_mask[vid]);
+ ret = sparx5_vlant_set_mask(sparx5, vid);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ sparx5_vlan_port_apply(sparx5, port);
+
+ return 0;
+}
+
+int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ int ret;
+
+ /* 8021q removes VID 0 on module unload for all interfaces
+ * with VLAN filtering feature. We need to keep it to receive
+ * untagged traffic.
+ */
+ if (vid == 0)
+ return 0;
+
+ /* Stop the port from being a member of the vlan */
+ clear_bit(port->portno, sparx5->vlan_mask[vid]);
+ ret = sparx5_vlant_set_mask(sparx5, vid);
+ if (ret)
+ return ret;
+
+ /* Ingress */
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ /* Egress */
+ if (port->vid == vid)
+ port->vid = 0;
+
+ sparx5_vlan_port_apply(sparx5, port);
+
+ return 0;
+}
+
+void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u32 val, mask;
+
+ /* mask is spread across 3 registers x 32 bit */
+ if (port->portno < 32) {
+ mask = BIT(port->portno);
+ val = enable ? mask : 0;
+ spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid));
+ } else if (port->portno < 64) {
+ mask = BIT(port->portno - 32);
+ val = enable ? mask : 0;
+ spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid));
+ } else if (port->portno < SPX5_PORTS) {
+ mask = BIT(port->portno - 64);
+ val = enable ? mask : 0;
+ spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid));
+ } else {
+ netdev_err(port->ndev, "Invalid port no: %d\n", port->portno);
+ }
+}
+
+void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
+{
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
+}
+
+void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
+{
+ portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
+ portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
+ portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
+}
+
+void sparx5_update_fwd(struct sparx5 *sparx5)
+{
+ DECLARE_BITMAP(workmask, SPX5_PORTS);
+ u32 mask[3];
+ int port;
+
+ /* Divide up fwd mask in 32 bit words */
+ bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
+
+ /* Update flood masks */
+ for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) {
+ spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
+ spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
+ spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
+ }
+
+ /* Update SRC masks */
+ for (port = 0; port < SPX5_PORTS; port++) {
+ if (test_bit(port, sparx5->bridge_fwd_mask)) {
+ /* Allow to send to all bridged but self */
+ bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
+ clear_bit(port, workmask);
+ bitmap_to_arr32(mask, workmask, SPX5_PORTS);
+ spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
+ spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
+ spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
+ } else {
+ spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
+ spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
+ spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
+ }
+ }
+
+ /* Learning enabled only for bridged ports */
+ bitmap_and(workmask, sparx5->bridge_fwd_mask,
+ sparx5->bridge_lrn_mask, SPX5_PORTS);
+ bitmap_to_arr32(mask, workmask, SPX5_PORTS);
+
+ /* Apply learning mask */
+ spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
+ spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
+ spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
+}
+
+void sparx5_vlan_port_apply(struct sparx5 *sparx5,
+ struct sparx5_port *port)
+
+{
+ u32 val;
+
+ /* Configure PVID, vlan aware */
+ val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) |
+ ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) |
+ ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid);
+ spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno));
+
+ val = 0;
+ if (port->vlan_aware && !port->pvid)
+ /* If port is vlan-aware and tagged, drop untagged and
+ * priority tagged frames.
+ */
+ val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) |
+ ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) |
+ ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1);
+ spx5_wr(val, sparx5,
+ ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag selected via IFH */
+ val = REW_TAG_CTRL_TAG_TPID_CFG_SET(5);
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CTRL_TAG_CFG_SET(1);
+ else
+ val |= REW_TAG_CTRL_TAG_CFG_SET(3);
+ }
+ spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno));
+
+ /* Egress VID */
+ spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
+ REW_PORT_VLAN_CFG_PORT_VID,
+ sparx5,
+ REW_PORT_VLAN_CFG(port->portno));
+}
diff --git a/drivers/net/ethernet/microchip/vcap/Kconfig b/drivers/net/ethernet/microchip/vcap/Kconfig
new file mode 100644
index 0000000000..97f43fd447
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/Kconfig
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microchip VCAP API configuration
+#
+
+if NET_VENDOR_MICROCHIP
+
+config VCAP
+ bool "VCAP (Versatile Content-Aware Processor) library"
+ help
+ Provides the basic VCAP functionality for multiple Microchip switchcores
+
+ A VCAP is essentially a TCAM with rules consisting of
+
+ - Programmable key fields
+ - Programmable action fields
+ - A counter (which may be only one bit wide)
+
+ Besides this each VCAP has:
+
+ - A number of lookups
+ - A keyset configuration per port per lookup
+
+ The VCAP implementation provides switchcore independent handling of rules
+ and supports:
+
+ - Creating and deleting rules
+ - Updating and getting rules
+
+ The platform specific configuration as well as the platform specific model
+ of the VCAP instances are attached to the VCAP API and a client can then
+ access rules via the API in a platform independent way, with the
+ limitations that each VCAP has in terms of its supported keys and actions.
+
+ Different switchcores will have different VCAP instances with different
+ characteristics. Look in the datasheet for the VCAP specifications for the
+ specific switchcore.
+
+config VCAP_KUNIT_TEST
+ bool "KUnit test for VCAP library" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on KUNIT=y && VCAP=y && y
+ select DEBUG_FS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the VCAP library.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/vcap/Makefile b/drivers/net/ethernet/microchip/vcap/Makefile
new file mode 100644
index 0000000000..c86f20e649
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip VCAP API
+#
+
+obj-$(CONFIG_VCAP) += vcap.o
+obj-$(CONFIG_VCAP_KUNIT_TEST) += vcap_model_kunit.o
+vcap-$(CONFIG_DEBUG_FS) += vcap_api_debugfs.o
+
+vcap-y += vcap_api.o vcap_tc.o
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
new file mode 100644
index 0000000000..c3569a4c7b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
@@ -0,0 +1,906 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+/* This file is autogenerated by cml-utils 2023-03-13 10:16:42 +0100.
+ * Commit ID: 259f0efd6d6d91bfbf62858de153cc757b6bffa3 (dirty)
+ */
+
+#ifndef __VCAP_AG_API__
+#define __VCAP_AG_API__
+
+enum vcap_type {
+ VCAP_TYPE_ES0,
+ VCAP_TYPE_ES2,
+ VCAP_TYPE_IS0,
+ VCAP_TYPE_IS1,
+ VCAP_TYPE_IS2,
+ VCAP_TYPE_MAX
+};
+
+/* Keyfieldset names with origin information */
+enum vcap_keyfield_set {
+ VCAP_KFS_NO_VALUE, /* initial value */
+ VCAP_KFS_5TUPLE_IP4, /* lan966x is1 X2 */
+ VCAP_KFS_5TUPLE_IP6, /* lan966x is1 X4 */
+ VCAP_KFS_7TUPLE, /* lan966x is1 X4 */
+ VCAP_KFS_ARP, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
+ VCAP_KFS_DBL_VID, /* lan966x is1 X1 */
+ VCAP_KFS_DMAC_VID, /* lan966x is1 X1 */
+ VCAP_KFS_ETAG, /* sparx5 is0 X2 */
+ VCAP_KFS_IP4_OTHER, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
+ VCAP_KFS_IP4_TCP_UDP, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
+ VCAP_KFS_IP4_VID, /* sparx5 es2 X3 */
+ VCAP_KFS_IP6_OTHER, /* lan966x is2 X4 */
+ VCAP_KFS_IP6_STD, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
+ VCAP_KFS_IP6_TCP_UDP, /* lan966x is2 X4 */
+ VCAP_KFS_IP6_VID, /* sparx5 es2 X6 */
+ VCAP_KFS_IP_7TUPLE, /* sparx5 is2 X12, sparx5 es2 X12 */
+ VCAP_KFS_ISDX, /* sparx5 es0 X1 */
+ VCAP_KFS_LL_FULL, /* sparx5 is0 X6 */
+ VCAP_KFS_MAC_ETYPE, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
+ VCAP_KFS_MAC_LLC, /* lan966x is2 X2 */
+ VCAP_KFS_MAC_SNAP, /* lan966x is2 X2 */
+ VCAP_KFS_NORMAL, /* lan966x is1 X2 */
+ VCAP_KFS_NORMAL_5TUPLE_IP4, /* sparx5 is0 X6 */
+ VCAP_KFS_NORMAL_7TUPLE, /* sparx5 is0 X12 */
+ VCAP_KFS_NORMAL_IP6, /* lan966x is1 X4 */
+ VCAP_KFS_OAM, /* lan966x is2 X2 */
+ VCAP_KFS_PURE_5TUPLE_IP4, /* sparx5 is0 X3 */
+ VCAP_KFS_RT, /* lan966x is1 X1 */
+ VCAP_KFS_SMAC_SIP4, /* lan966x is2 X1 */
+ VCAP_KFS_SMAC_SIP6, /* lan966x is2 X2 */
+ VCAP_KFS_VID, /* lan966x es0 X1 */
+};
+
+/* List of keyfields with description
+ *
+ * Keys ending in _IS are booleans derived from frame data
+ * Keys ending in _CLS are classified frame data
+ *
+ * VCAP_KF_8021BR_ECID_BASE: W12, sparx5: is0
+ * Used by 802.1BR Bridge Port Extension in an E-Tag
+ * VCAP_KF_8021BR_ECID_EXT: W8, sparx5: is0
+ * Used by 802.1BR Bridge Port Extension in an E-Tag
+ * VCAP_KF_8021BR_E_TAGGED: W1, sparx5: is0
+ * Set for frames containing an E-TAG (802.1BR Ethertype 893f)
+ * VCAP_KF_8021BR_GRP: W2, sparx5: is0
+ * E-Tag group bits in 802.1BR Bridge Port Extension
+ * VCAP_KF_8021BR_IGR_ECID_BASE: W12, sparx5: is0
+ * Used by 802.1BR Bridge Port Extension in an E-Tag
+ * VCAP_KF_8021BR_IGR_ECID_EXT: W8, sparx5: is0
+ * Used by 802.1BR Bridge Port Extension in an E-Tag
+ * VCAP_KF_8021CB_R_TAGGED_IS: W1, lan966x: is1
+ * Set if frame contains an RTAG: IEEE 802.1CB (FRER Redundancy tag, Ethertype
+ * 0xf1c1)
+ * VCAP_KF_8021Q_DEI0: W1, sparx5: is0, lan966x: is1
+ * First DEI in multiple vlan tags (outer tag or default port tag)
+ * VCAP_KF_8021Q_DEI1: W1, sparx5: is0, lan966x: is1
+ * Second DEI in multiple vlan tags (inner tag)
+ * VCAP_KF_8021Q_DEI2: W1, sparx5: is0
+ * Third DEI in multiple vlan tags (not always available)
+ * VCAP_KF_8021Q_DEI_CLS: W1, sparx5: is2/es2, lan966x: is2/es0
+ * Classified DEI
+ * VCAP_KF_8021Q_PCP0: W3, sparx5: is0, lan966x: is1
+ * First PCP in multiple vlan tags (outer tag or default port tag)
+ * VCAP_KF_8021Q_PCP1: W3, sparx5: is0, lan966x: is1
+ * Second PCP in multiple vlan tags (inner tag)
+ * VCAP_KF_8021Q_PCP2: W3, sparx5: is0
+ * Third PCP in multiple vlan tags (not always available)
+ * VCAP_KF_8021Q_PCP_CLS: W3, sparx5: is2/es2, lan966x: is2/es0
+ * Classified PCP
+ * VCAP_KF_8021Q_TPID: W3, sparx5: es0
+ * TPID for outer tag: 0: Customer TPID 1: Service TPID (88A8 or programmable)
+ * VCAP_KF_8021Q_TPID0: sparx5 is0 W3, lan966x is1 W1
+ * First TPIC in multiple vlan tags (outer tag or default port tag)
+ * VCAP_KF_8021Q_TPID1: sparx5 is0 W3, lan966x is1 W1
+ * Second TPID in multiple vlan tags (inner tag)
+ * VCAP_KF_8021Q_TPID2: W3, sparx5: is0
+ * Third TPID in multiple vlan tags (not always available)
+ * VCAP_KF_8021Q_VID0: W12, sparx5: is0, lan966x: is1
+ * First VID in multiple vlan tags (outer tag or default port tag)
+ * VCAP_KF_8021Q_VID1: W12, sparx5: is0, lan966x: is1
+ * Second VID in multiple vlan tags (inner tag)
+ * VCAP_KF_8021Q_VID2: W12, sparx5: is0
+ * Third VID in multiple vlan tags (not always available)
+ * VCAP_KF_8021Q_VID_CLS: sparx5 is2 W13, sparx5 es0 W13, sparx5 es2 W13,
+ * lan966x is2 W12, lan966x es0 W12
+ * Classified VID
+ * VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS: W1, lan966x: is1
+ * Set if frame has two or more Q-tags. Independent of port VLAN awareness
+ * VCAP_KF_8021Q_VLAN_TAGGED_IS: W1, sparx5: is2/es2, lan966x: is1/is2
+ * Sparx5: Set if frame was received with a VLAN tag, LAN966x: Set if frame has
+ * one or more Q-tags. Independent of port VLAN awareness
+ * VCAP_KF_8021Q_VLAN_TAGS: W3, sparx5: is0
+ * Number of VLAN tags in frame: 0: Untagged, 1: Single tagged, 3: Double
+ * tagged, 7: Triple tagged
+ * VCAP_KF_ACL_GRP_ID: W8, sparx5: es2
+ * Used in interface map table
+ * VCAP_KF_ARP_ADDR_SPACE_OK_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if hardware address is Ethernet
+ * VCAP_KF_ARP_LEN_OK_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if hardware address length = 6 (Ethernet) and IP address length = 4 (IP).
+ * VCAP_KF_ARP_OPCODE: W2, sparx5: is2/es2, lan966x: is2
+ * ARP opcode
+ * VCAP_KF_ARP_OPCODE_UNKNOWN_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if not one of the codes defined in VCAP_KF_ARP_OPCODE
+ * VCAP_KF_ARP_PROTO_SPACE_OK_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if protocol address space is 0x0800
+ * VCAP_KF_ARP_SENDER_MATCH_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Sender Hardware Address = SMAC (ARP)
+ * VCAP_KF_ARP_TGT_MATCH_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Target Hardware Address = SMAC (RARP)
+ * VCAP_KF_COSID_CLS: W3, sparx5: es0/es2
+ * Class of service
+ * VCAP_KF_ES0_ISDX_KEY_ENA: W1, sparx5: es2
+ * The value taken from the IFH .FWD.ES0_ISDX_KEY_ENA
+ * VCAP_KF_ETYPE: W16, sparx5: is0/is2/es2, lan966x: is1/is2
+ * Ethernet type
+ * VCAP_KF_ETYPE_LEN_IS: W1, sparx5: is0/is2/es2, lan966x: is1
+ * Set if frame has EtherType >= 0x600
+ * VCAP_KF_HOST_MATCH: W1, lan966x: is2
+ * The action from the SMAC_SIP4 or SMAC_SIP6 lookups. Used for IP source
+ * guarding.
+ * VCAP_KF_IF_EGR_PORT_MASK: W32, sparx5: es2
+ * Egress port mask, one bit per port
+ * VCAP_KF_IF_EGR_PORT_MASK_RNG: W3, sparx5: es2
+ * Select which 32 port group is available in IF_EGR_PORT (or virtual ports or
+ * CPU queue)
+ * VCAP_KF_IF_EGR_PORT_NO: sparx5 es0 W7, lan966x es0 W4
+ * Egress port number
+ * VCAP_KF_IF_IGR_PORT: sparx5 is0 W7, sparx5 es2 W9, lan966x is1 W3, lan966x
+ * is2 W4, lan966x es0 W4
+ * Sparx5: Logical ingress port number retrieved from
+ * ANA_CL::PORT_ID_CFG.LPORT_NUM or ERLEG, LAN966x: ingress port nunmber
+ * VCAP_KF_IF_IGR_PORT_MASK: sparx5 is0 W65, sparx5 is2 W32, sparx5 is2 W65,
+ * lan966x is1 W9, lan966x is2 W9
+ * Ingress port mask, one bit per port/erleg
+ * VCAP_KF_IF_IGR_PORT_MASK_L3: W1, sparx5: is2
+ * If set, IF_IGR_PORT_MASK, IF_IGR_PORT_MASK_RNG, and IF_IGR_PORT_MASK_SEL are
+ * used to specify L3 interfaces
+ * VCAP_KF_IF_IGR_PORT_MASK_RNG: W4, sparx5: is2
+ * Range selector for IF_IGR_PORT_MASK. Specifies which group of 32 ports are
+ * available in IF_IGR_PORT_MASK
+ * VCAP_KF_IF_IGR_PORT_MASK_SEL: W2, sparx5: is0/is2
+ * Mode selector for IF_IGR_PORT_MASK, applicable when IF_IGR_PORT_MASK_L3 == 0.
+ * Mapping: 0: DEFAULT 1: LOOPBACK 2: MASQUERADE 3: CPU_VD
+ * VCAP_KF_IF_IGR_PORT_SEL: W1, sparx5: es2
+ * Selector for IF_IGR_PORT: physical port number or ERLEG
+ * VCAP_KF_IP4_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2
+ * Set if frame has EtherType = 0x800 and IP version = 4
+ * VCAP_KF_IP_MC_IS: W1, sparx5: is0, lan966x: is1
+ * Set if frame is IPv4 frame and frame's destination MAC address is an IPv4
+ * multicast address (0x01005E0 /25). Set if frame is IPv6 frame and frame's
+ * destination MAC address is an IPv6 multicast address (0x3333/16).
+ * VCAP_KF_IP_PAYLOAD_5TUPLE: W32, sparx5: is0, lan966x: is1
+ * Payload bytes after IP header
+ * VCAP_KF_IP_PAYLOAD_S1_IP6: W112, lan966x: is1
+ * Payload after IPv6 header
+ * VCAP_KF_IP_SNAP_IS: W1, sparx5: is0, lan966x: is1
+ * Set if frame is IPv4, IPv6, or SNAP frame
+ * VCAP_KF_ISDX_CLS: sparx5 is2 W12, sparx5 es0 W12, sparx5 es2 W12, lan966x es0
+ * W8
+ * Classified ISDX
+ * VCAP_KF_ISDX_GT0_IS: W1, sparx5: is2/es0/es2, lan966x: is2/es0
+ * Set if classified ISDX > 0
+ * VCAP_KF_L2_BC_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2/es0
+ * Set if frame's destination MAC address is the broadcast address
+ * (FF-FF-FF-FF-FF-FF).
+ * VCAP_KF_L2_DMAC: W48, sparx5: is0/is2/es2, lan966x: is1/is2
+ * Destination MAC address
+ * VCAP_KF_L2_FRM_TYPE: W4, lan966x: is2
+ * Frame subtype for specific EtherTypes (MRP, DLR)
+ * VCAP_KF_L2_FWD_IS: W1, sparx5: is2
+ * Set if the frame is allowed to be forwarded to front ports
+ * VCAP_KF_L2_LLC: W40, lan966x: is2
+ * LLC header and data after up to two VLAN tags and the type/length field
+ * VCAP_KF_L2_MAC: W48, lan966x: is1
+ * MAC address (FIRST=1: SMAC, FIRST=0: DMAC)
+ * VCAP_KF_L2_MC_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2/es0
+ * Set if frame's destination MAC address is a multicast address (bit 40 = 1).
+ * VCAP_KF_L2_PAYLOAD0: W16, lan966x: is2
+ * Payload bytes 0-1 after the frame's EtherType
+ * VCAP_KF_L2_PAYLOAD1: W8, lan966x: is2
+ * Payload byte 4 after the frame's EtherType. This is specifically for PTP
+ * frames.
+ * VCAP_KF_L2_PAYLOAD2: W3, lan966x: is2
+ * Bits 7, 2, and 1 from payload byte 6 after the frame's EtherType. This is
+ * specifically for PTP frames.
+ * VCAP_KF_L2_PAYLOAD_ETYPE: W64, sparx5: is2/es2
+ * Byte 0-7 of L2 payload after Type/Len field and overloading for OAM
+ * VCAP_KF_L2_SMAC: W48, sparx5: is0/is2/es2, lan966x: is1/is2
+ * Source MAC address
+ * VCAP_KF_L2_SNAP: W40, lan966x: is2
+ * SNAP header after LLC header (AA-AA-03)
+ * VCAP_KF_L3_DIP_EQ_SIP_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if Src IP matches Dst IP address
+ * VCAP_KF_L3_DPL_CLS: W1, sparx5: es0/es2, lan966x: es0
+ * The frames drop precedence level
+ * VCAP_KF_L3_DSCP: W6, sparx5: is0, lan966x: is1
+ * Frame's DSCP value
+ * VCAP_KF_L3_DST_IS: W1, sparx5: is2
+ * Set if lookup is done for egress router leg
+ * VCAP_KF_L3_FRAGMENT: W1, lan966x: is1/is2
+ * Set if IPv4 frame is fragmented
+ * VCAP_KF_L3_FRAGMENT_TYPE: W2, sparx5: is0/is2/es2
+ * L3 Fragmentation type (none, initial, suspicious, valid follow up)
+ * VCAP_KF_L3_FRAG_INVLD_L4_LEN: W1, sparx5: is0/is2
+ * Set if frame's L4 length is less than ANA_CL:COMMON:CLM_FRAGMENT_CFG.L4_MIN_L
+ * EN
+ * VCAP_KF_L3_FRAG_OFS_GT0: W1, lan966x: is1/is2
+ * Set if IPv4 frame is fragmented and it is not the first fragment
+ * VCAP_KF_L3_IP4_DIP: W32, sparx5: is0/is2/es2, lan966x: is1/is2
+ * Destination IPv4 Address
+ * VCAP_KF_L3_IP4_SIP: W32, sparx5: is0/is2/es2, lan966x: is1/is2
+ * Source IPv4 Address
+ * VCAP_KF_L3_IP6_DIP: sparx5 is0 W128, sparx5 is2 W128, sparx5 es2 W128,
+ * lan966x is1 W64, lan966x is1 W128, lan966x is2 W128
+ * Sparx5: Full IPv6 DIP, LAN966x: Either Full IPv6 DIP or a subset depending on
+ * frame type
+ * VCAP_KF_L3_IP6_DIP_MSB: W16, lan966x: is1
+ * MS 16bits of IPv6 DIP
+ * VCAP_KF_L3_IP6_SIP: sparx5 is0 W128, sparx5 is2 W128, sparx5 es2 W128,
+ * lan966x is1 W128, lan966x is1 W64, lan966x is2 W128
+ * Sparx5: Full IPv6 SIP, LAN966x: Either Full IPv6 SIP or a subset depending on
+ * frame type
+ * VCAP_KF_L3_IP6_SIP_MSB: W16, lan966x: is1
+ * MS 16bits of IPv6 DIP
+ * VCAP_KF_L3_IP_PROTO: W8, sparx5: is0/is2/es2, lan966x: is1/is2
+ * IPv4 frames: IP protocol. IPv6 frames: Next header, same as for IPV4
+ * VCAP_KF_L3_OPTIONS_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2
+ * Set if IPv4 frame contains options (IP len > 5)
+ * VCAP_KF_L3_PAYLOAD: sparx5 is2 W96, sparx5 is2 W40, sparx5 es2 W96, sparx5
+ * es2 W40, lan966x is2 W56
+ * Sparx5: Payload bytes after IP header. IPv4: IPv4 options are not parsed so
+ * payload is always taken 20 bytes after the start of the IPv4 header, LAN966x:
+ * Bytes 0-6 after IP header
+ * VCAP_KF_L3_RT_IS: W1, sparx5: is2/es2
+ * Set if frame has hit a router leg
+ * VCAP_KF_L3_TOS: W8, sparx5: is2/es2, lan966x: is2
+ * Sparx5: Frame's IPv4/IPv6 DSCP and ECN fields, LAN966x: IP TOS field
+ * VCAP_KF_L3_TTL_GT0: W1, sparx5: is2/es2, lan966x: is2
+ * Set if IPv4 TTL / IPv6 hop limit is greater than 0
+ * VCAP_KF_L4_1588_DOM: W8, lan966x: is2
+ * PTP over UDP: domainNumber
+ * VCAP_KF_L4_1588_VER: W4, lan966x: is2
+ * PTP over UDP: version
+ * VCAP_KF_L4_ACK: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5 and LAN966x: TCP flag ACK, LAN966x only: PTP over UDP: flagField bit 2
+ * (unicastFlag)
+ * VCAP_KF_L4_DPORT: W16, sparx5: is2/es2, lan966x: is2
+ * Sparx5: TCP/UDP destination port. Overloading for IP_7TUPLE: Non-TCP/UDP IP
+ * frames: L4_DPORT = L3_IP_PROTO, LAN966x: TCP/UDP destination port
+ * VCAP_KF_L4_FIN: W1, sparx5: is2/es2, lan966x: is2
+ * TCP flag FIN, LAN966x: TCP flag FIN, and for PTP over UDP: messageType bit 1
+ * VCAP_KF_L4_PAYLOAD: W64, sparx5: is2/es2
+ * Payload bytes after TCP/UDP header Overloading for IP_7TUPLE: Non TCP/UDP
+ * frames: Payload bytes 0-7 after IP header. IPv4 options are not parsed so
+ * payload is always taken 20 bytes after the start of the IPv4 header for non
+ * TCP/UDP IPv4 frames
+ * VCAP_KF_L4_PSH: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5: TCP flag PSH, LAN966x: TCP: TCP flag PSH. PTP over UDP: flagField bit
+ * 1 (twoStepFlag)
+ * VCAP_KF_L4_RNG: sparx5 is0 W8, sparx5 is2 W16, sparx5 es2 W16, lan966x is1
+ * W8, lan966x is2 W8
+ * Range checker bitmask (one for each range checker). Input into range checkers
+ * is taken from classified results (VID, DSCP) and frame (SPORT, DPORT, ETYPE,
+ * outer VID, inner VID)
+ * VCAP_KF_L4_RST: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5: TCP flag RST , LAN966x: TCP: TCP flag RST. PTP over UDP: messageType
+ * bit 3
+ * VCAP_KF_L4_SEQUENCE_EQ0_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if TCP sequence number is 0, LAN966x: Overlayed with PTP over UDP:
+ * messageType bit 0
+ * VCAP_KF_L4_SPORT: W16, sparx5: is0/is2/es2, lan966x: is1/is2
+ * TCP/UDP source port
+ * VCAP_KF_L4_SPORT_EQ_DPORT_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if UDP or TCP source port equals UDP or TCP destination port
+ * VCAP_KF_L4_SYN: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5: TCP flag SYN, LAN966x: TCP: TCP flag SYN. PTP over UDP: messageType
+ * bit 2
+ * VCAP_KF_L4_URG: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5: TCP flag URG, LAN966x: TCP: TCP flag URG. PTP over UDP: flagField bit
+ * 7 (reserved)
+ * VCAP_KF_LOOKUP_FIRST_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2
+ * Selects between entries relevant for first and second lookup. Set for first
+ * lookup, cleared for second lookup.
+ * VCAP_KF_LOOKUP_GEN_IDX: W12, sparx5: is0
+ * Generic index - for chaining CLM instances
+ * VCAP_KF_LOOKUP_GEN_IDX_SEL: W2, sparx5: is0
+ * Select the mode of the Generic Index
+ * VCAP_KF_LOOKUP_INDEX: W2, lan966x: is1
+ * 0: First lookup, 1: Second lookup, 2: Third lookup, Similar to VCAP_KF_FIRST
+ * but with extra info
+ * VCAP_KF_LOOKUP_PAG: W8, sparx5: is2, lan966x: is2
+ * Classified Policy Association Group: chains rules from IS1/CLM to IS2
+ * VCAP_KF_MIRROR_PROBE: W2, sparx5: es2
+ * Identifies frame copies generated as a result of mirroring
+ * VCAP_KF_OAM_CCM_CNTS_EQ0: W1, sparx5: is2/es2, lan966x: is2
+ * Dual-ended loss measurement counters in CCM frames are all zero
+ * VCAP_KF_OAM_DETECTED: W1, lan966x: is2
+ * This is missing in the datasheet, but present in the OAM keyset in XML
+ * VCAP_KF_OAM_FLAGS: W8, lan966x: is2
+ * Frame's OAM flags
+ * VCAP_KF_OAM_MEL_FLAGS: W7, lan966x: is2
+ * Encoding of MD level/MEG level (MEL)
+ * VCAP_KF_OAM_MEPID: W16, lan966x: is2
+ * CCM frame's OAM MEP ID
+ * VCAP_KF_OAM_OPCODE: W8, lan966x: is2
+ * Frame's OAM opcode
+ * VCAP_KF_OAM_VER: W5, lan966x: is2
+ * Frame's OAM version
+ * VCAP_KF_OAM_Y1731_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if frame's EtherType = 0x8902
+ * VCAP_KF_PDU_TYPE: W4, lan966x: es0
+ * PDU type value (none, OAM CCM, MRP, DLR, RTE, IPv4, IPv6, OAM non-CCM)
+ * VCAP_KF_PROT_ACTIVE: W1, sparx5: es0/es2
+ * Protection is active
+ * VCAP_KF_RTP_ID: W10, lan966x: es0
+ * Classified RTP_ID
+ * VCAP_KF_RT_FRMID: W32, lan966x: is1
+ * Profinet or OPC-UA FrameId
+ * VCAP_KF_RT_TYPE: W2, lan966x: is1
+ * Encoding of frame's EtherType: 0: Other, 1: Profinet, 2: OPC-UA, 3: Custom
+ * (ANA::RT_CUSTOM)
+ * VCAP_KF_RT_VLAN_IDX: W3, lan966x: is1
+ * Real-time VLAN index from ANA::RT_VLAN_PCP
+ * VCAP_KF_TCP_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2
+ * Set if frame is IPv4 TCP frame (IP protocol = 6) or IPv6 TCP frames (Next
+ * header = 6)
+ * VCAP_KF_TCP_UDP_IS: W1, sparx5: is0/is2/es2, lan966x: is1
+ * Set if frame is IPv4/IPv6 TCP or UDP frame (IP protocol/next header equals 6
+ * or 17)
+ * VCAP_KF_TYPE: sparx5 is0 W2, sparx5 is0 W1, sparx5 is2 W4, sparx5 is2 W2,
+ * sparx5 es0 W1, sparx5 es2 W3, lan966x is1 W1, lan966x is1 W2, lan966x is2 W4,
+ * lan966x is2 W2
+ * Keyset type id - set by the API
+ */
+
+/* Keyfield names */
+enum vcap_key_field {
+ VCAP_KF_NO_VALUE, /* initial value */
+ VCAP_KF_8021BR_ECID_BASE,
+ VCAP_KF_8021BR_ECID_EXT,
+ VCAP_KF_8021BR_E_TAGGED,
+ VCAP_KF_8021BR_GRP,
+ VCAP_KF_8021BR_IGR_ECID_BASE,
+ VCAP_KF_8021BR_IGR_ECID_EXT,
+ VCAP_KF_8021CB_R_TAGGED_IS,
+ VCAP_KF_8021Q_DEI0,
+ VCAP_KF_8021Q_DEI1,
+ VCAP_KF_8021Q_DEI2,
+ VCAP_KF_8021Q_DEI_CLS,
+ VCAP_KF_8021Q_PCP0,
+ VCAP_KF_8021Q_PCP1,
+ VCAP_KF_8021Q_PCP2,
+ VCAP_KF_8021Q_PCP_CLS,
+ VCAP_KF_8021Q_TPID,
+ VCAP_KF_8021Q_TPID0,
+ VCAP_KF_8021Q_TPID1,
+ VCAP_KF_8021Q_TPID2,
+ VCAP_KF_8021Q_VID0,
+ VCAP_KF_8021Q_VID1,
+ VCAP_KF_8021Q_VID2,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS,
+ VCAP_KF_8021Q_VLAN_TAGGED_IS,
+ VCAP_KF_8021Q_VLAN_TAGS,
+ VCAP_KF_ACL_GRP_ID,
+ VCAP_KF_ARP_ADDR_SPACE_OK_IS,
+ VCAP_KF_ARP_LEN_OK_IS,
+ VCAP_KF_ARP_OPCODE,
+ VCAP_KF_ARP_OPCODE_UNKNOWN_IS,
+ VCAP_KF_ARP_PROTO_SPACE_OK_IS,
+ VCAP_KF_ARP_SENDER_MATCH_IS,
+ VCAP_KF_ARP_TGT_MATCH_IS,
+ VCAP_KF_COSID_CLS,
+ VCAP_KF_ES0_ISDX_KEY_ENA,
+ VCAP_KF_ETYPE,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_KF_HOST_MATCH,
+ VCAP_KF_IF_EGR_PORT_MASK,
+ VCAP_KF_IF_EGR_PORT_MASK_RNG,
+ VCAP_KF_IF_EGR_PORT_NO,
+ VCAP_KF_IF_IGR_PORT,
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_IF_IGR_PORT_MASK_L3,
+ VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_SEL,
+ VCAP_KF_IP4_IS,
+ VCAP_KF_IP_MC_IS,
+ VCAP_KF_IP_PAYLOAD_5TUPLE,
+ VCAP_KF_IP_PAYLOAD_S1_IP6,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_KF_ISDX_CLS,
+ VCAP_KF_ISDX_GT0_IS,
+ VCAP_KF_L2_BC_IS,
+ VCAP_KF_L2_DMAC,
+ VCAP_KF_L2_FRM_TYPE,
+ VCAP_KF_L2_FWD_IS,
+ VCAP_KF_L2_LLC,
+ VCAP_KF_L2_MAC,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_PAYLOAD0,
+ VCAP_KF_L2_PAYLOAD1,
+ VCAP_KF_L2_PAYLOAD2,
+ VCAP_KF_L2_PAYLOAD_ETYPE,
+ VCAP_KF_L2_SMAC,
+ VCAP_KF_L2_SNAP,
+ VCAP_KF_L3_DIP_EQ_SIP_IS,
+ VCAP_KF_L3_DPL_CLS,
+ VCAP_KF_L3_DSCP,
+ VCAP_KF_L3_DST_IS,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ VCAP_KF_L3_FRAG_INVLD_L4_LEN,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_KF_L3_IP4_DIP,
+ VCAP_KF_L3_IP4_SIP,
+ VCAP_KF_L3_IP6_DIP,
+ VCAP_KF_L3_IP6_DIP_MSB,
+ VCAP_KF_L3_IP6_SIP,
+ VCAP_KF_L3_IP6_SIP_MSB,
+ VCAP_KF_L3_IP_PROTO,
+ VCAP_KF_L3_OPTIONS_IS,
+ VCAP_KF_L3_PAYLOAD,
+ VCAP_KF_L3_RT_IS,
+ VCAP_KF_L3_TOS,
+ VCAP_KF_L3_TTL_GT0,
+ VCAP_KF_L4_1588_DOM,
+ VCAP_KF_L4_1588_VER,
+ VCAP_KF_L4_ACK,
+ VCAP_KF_L4_DPORT,
+ VCAP_KF_L4_FIN,
+ VCAP_KF_L4_PAYLOAD,
+ VCAP_KF_L4_PSH,
+ VCAP_KF_L4_RNG,
+ VCAP_KF_L4_RST,
+ VCAP_KF_L4_SEQUENCE_EQ0_IS,
+ VCAP_KF_L4_SPORT,
+ VCAP_KF_L4_SPORT_EQ_DPORT_IS,
+ VCAP_KF_L4_SYN,
+ VCAP_KF_L4_URG,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_LOOKUP_GEN_IDX,
+ VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ VCAP_KF_LOOKUP_INDEX,
+ VCAP_KF_LOOKUP_PAG,
+ VCAP_KF_MIRROR_PROBE,
+ VCAP_KF_OAM_CCM_CNTS_EQ0,
+ VCAP_KF_OAM_DETECTED,
+ VCAP_KF_OAM_FLAGS,
+ VCAP_KF_OAM_MEL_FLAGS,
+ VCAP_KF_OAM_MEPID,
+ VCAP_KF_OAM_OPCODE,
+ VCAP_KF_OAM_VER,
+ VCAP_KF_OAM_Y1731_IS,
+ VCAP_KF_PDU_TYPE,
+ VCAP_KF_PROT_ACTIVE,
+ VCAP_KF_RTP_ID,
+ VCAP_KF_RT_FRMID,
+ VCAP_KF_RT_TYPE,
+ VCAP_KF_RT_VLAN_IDX,
+ VCAP_KF_TCP_IS,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_KF_TYPE,
+};
+
+/* Actionset names with origin information */
+enum vcap_actionfield_set {
+ VCAP_AFS_NO_VALUE, /* initial value */
+ VCAP_AFS_BASE_TYPE, /* sparx5 is2 X3, sparx5 es2 X3, lan966x is2 X2 */
+ VCAP_AFS_CLASSIFICATION, /* sparx5 is0 X2 */
+ VCAP_AFS_CLASS_REDUCED, /* sparx5 is0 X1 */
+ VCAP_AFS_ES0, /* sparx5 es0 X1 */
+ VCAP_AFS_FULL, /* sparx5 is0 X3 */
+ VCAP_AFS_S1, /* lan966x is1 X1 */
+ VCAP_AFS_SMAC_SIP, /* lan966x is2 X1 */
+ VCAP_AFS_VID, /* lan966x es0 X1 */
+};
+
+/* List of actionfields with description
+ *
+ * VCAP_AF_ACL_ID: W6, lan966x: is2
+ * Logical ID for the entry. This ID is extracted together with the frame in the
+ * CPU extraction header. Only applicable to actions with CPU_COPY_ENA or
+ * HIT_ME_ONCE set.
+ * VCAP_AF_CLS_VID_SEL: W3, sparx5: is0
+ * Controls the classified VID: 0: VID_NONE: No action. 1: VID_ADD: New VID =
+ * old VID + VID_VAL. 2: VID_REPLACE: New VID = VID_VAL. 3: VID_FIRST_TAG: New
+ * VID = VID from frame's first tag (outer tag) if available, otherwise VID_VAL.
+ * 4: VID_SECOND_TAG: New VID = VID from frame's second tag (middle tag) if
+ * available, otherwise VID_VAL. 5: VID_THIRD_TAG: New VID = VID from frame's
+ * third tag (inner tag) if available, otherwise VID_VAL.
+ * VCAP_AF_CNT_ID: sparx5 is2 W12, sparx5 es2 W11
+ * Counter ID, used per lookup to index the 4K frame counters (ANA_ACL:CNT_TBL).
+ * Multiple VCAP IS2 entries can use the same counter.
+ * VCAP_AF_COPY_PORT_NUM: W7, sparx5: es2
+ * QSYS port number when FWD_MODE is redirect or copy
+ * VCAP_AF_COPY_QUEUE_NUM: W16, sparx5: es2
+ * QSYS queue number when FWD_MODE is redirect or copy
+ * VCAP_AF_CPU_COPY_ENA: W1, sparx5: is2/es2, lan966x: is2
+ * Setting this bit to 1 causes all frames that hit this action to be copied to
+ * the CPU extraction queue specified in CPU_QUEUE_NUM.
+ * VCAP_AF_CPU_QU: W3, sparx5: es0
+ * CPU extraction queue. Used when FWD_SEL >0 and PIPELINE_ACT = XTR.
+ * VCAP_AF_CPU_QUEUE_NUM: W3, sparx5: is2/es2, lan966x: is2
+ * CPU queue number. Used when CPU_COPY_ENA is set.
+ * VCAP_AF_CUSTOM_ACE_TYPE_ENA: W4, lan966x: is1
+ * Enables use of custom keys in IS2. Bits 3:2 control second lookup in IS2
+ * while bits 1:0 control first lookup. Encoding per lookup: 0: Disabled. 1:
+ * Extract 40 bytes after position corresponding to the location of the IPv4
+ * header and use as key. 2: Extract 40 bytes after SMAC and use as key
+ * VCAP_AF_DEI_A_VAL: W1, sparx5: es0, lan966x: es0
+ * DEI used in ES0 tag A. See TAG_A_DEI_SEL.
+ * VCAP_AF_DEI_B_VAL: W1, sparx5: es0, lan966x: es0
+ * DEI used in ES0 tag B. See TAG_B_DEI_SEL.
+ * VCAP_AF_DEI_C_VAL: W1, sparx5: es0
+ * DEI used in ES0 tag C. See TAG_C_DEI_SEL.
+ * VCAP_AF_DEI_ENA: W1, sparx5: is0, lan966x: is1
+ * If set, use DEI_VAL as classified DEI value. Otherwise, DEI from basic
+ * classification is used
+ * VCAP_AF_DEI_VAL: W1, sparx5: is0, lan966x: is1
+ * See DEI_ENA
+ * VCAP_AF_DLR_SEL: W2, lan966x: is1
+ * 0: No changes to port-based selection in ANA:PORT:OAM_CFG.DLR_ENA. 1: Enable
+ * DLR frame processing 2: Disable DLR processing
+ * VCAP_AF_DP_ENA: W1, sparx5: is0, lan966x: is1
+ * If set, use DP_VAL as classified drop precedence level. Otherwise, drop
+ * precedence level from basic classification is used.
+ * VCAP_AF_DP_VAL: sparx5 is0 W2, lan966x is1 W1
+ * See DP_ENA.
+ * VCAP_AF_DSCP_ENA: W1, sparx5: is0, lan966x: is1
+ * If set, use DSCP_VAL as classified DSCP value. Otherwise, DSCP value from
+ * basic classification is used.
+ * VCAP_AF_DSCP_SEL: W3, sparx5: es0
+ * Selects source for DSCP. 0: Controlled by port configuration and IFH. 1:
+ * Classified DSCP via IFH. 2: DSCP_VAL. 3: Reserved. 4: Mapped using mapping
+ * table 0, otherwise use DSCP_VAL. 5: Mapped using mapping table 1, otherwise
+ * use mapping table 0. 6: Mapped using mapping table 2, otherwise use DSCP_VAL.
+ * 7: Mapped using mapping table 3, otherwise use mapping table 2
+ * VCAP_AF_DSCP_VAL: W6, sparx5: is0/es0, lan966x: is1
+ * See DSCP_ENA.
+ * VCAP_AF_ES2_REW_CMD: W3, sparx5: es2
+ * Command forwarded to REW: 0: No action. 1: SWAP MAC addresses. 2: Do L2CP
+ * DMAC translation when entering or leaving a tunnel.
+ * VCAP_AF_ESDX: sparx5 es0 W13, lan966x es0 W8
+ * Egress counter index. Used to index egress counter set as defined in
+ * REW::STAT_CFG.
+ * VCAP_AF_FWD_KILL_ENA: W1, lan966x: is2
+ * Setting this bit to 1 denies forwarding of the frame forwarding to any front
+ * port. The frame can still be copied to the CPU by other actions.
+ * VCAP_AF_FWD_MODE: W2, sparx5: es2
+ * Forward selector: 0: Forward. 1: Discard. 2: Redirect. 3: Copy.
+ * VCAP_AF_FWD_SEL: W2, sparx5: es0
+ * ES0 Forward selector. 0: No action. 1: Copy to loopback interface. 2:
+ * Redirect to loopback interface. 3: Discard
+ * VCAP_AF_HIT_ME_ONCE: W1, sparx5: is2/es2, lan966x: is2
+ * Setting this bit to 1 causes the first frame that hits this action where the
+ * HIT_CNT counter is zero to be copied to the CPU extraction queue specified in
+ * CPU_QUEUE_NUM. The HIT_CNT counter is then incremented and any frames that
+ * hit this action later are not copied to the CPU. To re-enable the HIT_ME_ONCE
+ * functionality, the HIT_CNT counter must be cleared.
+ * VCAP_AF_HOST_MATCH: W1, lan966x: is2
+ * Used for IP source guarding. If set, it signals that the host is a valid (for
+ * instance a valid combination of source MAC address and source IP address).
+ * HOST_MATCH is input to the IS2 keys.
+ * VCAP_AF_IGNORE_PIPELINE_CTRL: W1, sparx5: is2/es2
+ * Ignore ingress pipeline control. This enforces the use of the VCAP IS2 action
+ * even when the pipeline control has terminated the frame before VCAP IS2.
+ * VCAP_AF_INTR_ENA: W1, sparx5: is2/es2
+ * If set, an interrupt is triggered when this rule is hit
+ * VCAP_AF_ISDX_ADD_REPLACE_SEL: W1, sparx5: is0
+ * Controls the classified ISDX. 0: New ISDX = old ISDX + ISDX_VAL. 1: New ISDX
+ * = ISDX_VAL.
+ * VCAP_AF_ISDX_ADD_VAL: W8, lan966x: is1
+ * If ISDX_REPLACE_ENA is set, ISDX_ADD_VAL is used directly as the new ISDX.
+ * Encoding: ISDX_REPLACE_ENA=0, ISDX_ADD_VAL=0: Disabled ISDX_EPLACE_ENA=0,
+ * ISDX_ADD_VAL>0: Add value to classified ISDX. ISDX_REPLACE_ENA=1: Replace
+ * with ISDX_ADD_VAL value.
+ * VCAP_AF_ISDX_ENA: W1, lan966x: is2
+ * Setting this bit to 1 causes the classified ISDX to be set to the value of
+ * POLICE_IDX[8:0].
+ * VCAP_AF_ISDX_REPLACE_ENA: W1, lan966x: is1
+ * If set, classified ISDX is set to ISDX_ADD_VAL.
+ * VCAP_AF_ISDX_VAL: W12, sparx5: is0
+ * See isdx_add_replace_sel
+ * VCAP_AF_LOOP_ENA: W1, sparx5: es0
+ * 0: Forward based on PIPELINE_PT and FWD_SEL
+ * VCAP_AF_LRN_DIS: W1, sparx5: is2, lan966x: is2
+ * Setting this bit to 1 disables learning of frames hitting this action.
+ * VCAP_AF_MAP_IDX: W9, sparx5: is0
+ * Index for QoS mapping table lookup
+ * VCAP_AF_MAP_KEY: W3, sparx5: is0
+ * Key type for QoS mapping table lookup. 0: DEI0, PCP0 (outer tag). 1: DEI1,
+ * PCP1 (middle tag). 2: DEI2, PCP2 (inner tag). 3: MPLS TC. 4: PCP0 (outer
+ * tag). 5: E-DEI, E-PCP (E-TAG). 6: DSCP if available, otherwise none. 7: DSCP
+ * if available, otherwise DEI0, PCP0 (outer tag) if available using MAP_IDX+8,
+ * otherwise none
+ * VCAP_AF_MAP_LOOKUP_SEL: W2, sparx5: is0
+ * Selects which of the two QoS Mapping Table lookups that MAP_KEY and MAP_IDX
+ * are applied to. 0: No changes to the QoS Mapping Table lookup. 1: Update key
+ * type and index for QoS Mapping Table lookup #0. 2: Update key type and index
+ * for QoS Mapping Table lookup #1. 3: Reserved.
+ * VCAP_AF_MASK_MODE: sparx5 is0 W3, sparx5 is2 W3, lan966x is2 W2
+ * Controls the PORT_MASK use. Sparx5: 0: OR_DSTMASK, 1: AND_VLANMASK, 2:
+ * REPLACE_PGID, 3: REPLACE_ALL, 4: REDIR_PGID, 5: OR_PGID_MASK, 6: VSTAX, 7:
+ * Not applicable. LAN966X: 0: No action, 1: Permit/deny (AND), 2: Policy
+ * forwarding (DMAC lookup), 3: Redirect. The CPU port is untouched by
+ * MASK_MODE.
+ * VCAP_AF_MATCH_ID: W16, sparx5: is2
+ * Logical ID for the entry. The MATCH_ID is extracted together with the frame
+ * if the frame is forwarded to the CPU (CPU_COPY_ENA). The result is placed in
+ * IFH.CL_RSLT.
+ * VCAP_AF_MATCH_ID_MASK: W16, sparx5: is2
+ * Mask used by MATCH_ID.
+ * VCAP_AF_MIRROR_ENA: W1, lan966x: is2
+ * Setting this bit to 1 causes frames to be mirrored to the mirror target port
+ * (ANA::MIRRPORPORTS).
+ * VCAP_AF_MIRROR_PROBE: W2, sparx5: is2
+ * Mirroring performed according to configuration of a mirror probe. 0: No
+ * mirroring. 1: Mirror probe 0. 2: Mirror probe 1. 3: Mirror probe 2
+ * VCAP_AF_MIRROR_PROBE_ID: W2, sparx5: es2
+ * Signals a mirror probe to be placed in the IFH. Only possible when FWD_MODE
+ * is copy. 0: No mirroring. 1-3: Use mirror probe 0-2.
+ * VCAP_AF_MRP_SEL: W2, lan966x: is1
+ * 0: No changes to port-based selection in ANA:PORT:OAM_CFG.MRP_ENA. 1: Enable
+ * MRP frame processing 2: Disable MRP processing
+ * VCAP_AF_NXT_IDX: W12, sparx5: is0
+ * Index used as part of key (field G_IDX) in the next lookup.
+ * VCAP_AF_NXT_IDX_CTRL: W3, sparx5: is0
+ * Controls the generation of the G_IDX used in the VCAP CLM next lookup
+ * VCAP_AF_OAM_SEL: W3, lan966x: is1
+ * 0: No changes to port-based selection in ANA:PORT:OAM_CFG.OAM_CFG 1: Enable
+ * OAM frame processing for untagged frames 2: Enable OAM frame processing for
+ * single frames 3: Enable OAM frame processing for double frames 4: Disable OAM
+ * frame processing
+ * VCAP_AF_PAG_OVERRIDE_MASK: W8, sparx5: is0, lan966x: is1
+ * Bits set in this mask will override PAG_VAL from port profile. New PAG = (PAG
+ * (input) AND ~PAG_OVERRIDE_MASK) OR (PAG_VAL AND PAG_OVERRIDE_MASK)
+ * VCAP_AF_PAG_VAL: W8, sparx5: is0, lan966x: is1
+ * See PAG_OVERRIDE_MASK.
+ * VCAP_AF_PCP_A_VAL: W3, sparx5: es0, lan966x: es0
+ * PCP used in ES0 tag A. See TAG_A_PCP_SEL.
+ * VCAP_AF_PCP_B_VAL: W3, sparx5: es0, lan966x: es0
+ * PCP used in ES0 tag B. See TAG_B_PCP_SEL.
+ * VCAP_AF_PCP_C_VAL: W3, sparx5: es0
+ * PCP used in ES0 tag C. See TAG_C_PCP_SEL.
+ * VCAP_AF_PCP_ENA: W1, sparx5: is0, lan966x: is1
+ * If set, use PCP_VAL as classified PCP value. Otherwise, PCP from basic
+ * classification is used.
+ * VCAP_AF_PCP_VAL: W3, sparx5: is0, lan966x: is1
+ * See PCP_ENA.
+ * VCAP_AF_PIPELINE_ACT: W1, sparx5: es0
+ * Pipeline action when FWD_SEL > 0. 0: XTR. CPU_QU selects CPU extraction queue
+ * 1: LBK_ASM.
+ * VCAP_AF_PIPELINE_FORCE_ENA: W1, sparx5: is2
+ * If set, use PIPELINE_PT unconditionally and set PIPELINE_ACT = NONE if
+ * PIPELINE_PT == NONE. Overrules previous settings of pipeline point.
+ * VCAP_AF_PIPELINE_PT: sparx5 is2 W5, sparx5 es0 W2
+ * Pipeline point used if PIPELINE_FORCE_ENA is set
+ * VCAP_AF_POLICE_ENA: W1, sparx5: is2/es2, lan966x: is1/is2
+ * If set, POLICE_IDX is used to lookup ANA::POL.
+ * VCAP_AF_POLICE_IDX: sparx5 is2 W6, sparx5 es2 W6, lan966x is1 W9, lan966x is2
+ * W9
+ * Policer index.
+ * VCAP_AF_POLICE_REMARK: W1, sparx5: es2
+ * If set, frames exceeding policer rates are marked as yellow but not
+ * discarded.
+ * VCAP_AF_POLICE_VCAP_ONLY: W1, lan966x: is2
+ * Disable policing from QoS, and port policers. Only the VCAP policer selected
+ * by POLICE_IDX is active. Only applies to the second lookup.
+ * VCAP_AF_POP_VAL: W2, sparx5: es0
+ * Controls popping of Q-tags. The final number of Q-tags popped is calculated
+ * as shown in section 4.28.7.2 VLAN Pop Decision.
+ * VCAP_AF_PORT_MASK: sparx5 is0 W65, sparx5 is2 W68, lan966x is2 W8
+ * Port mask applied to the forwarding decision based on MASK_MODE.
+ * VCAP_AF_PUSH_CUSTOMER_TAG: W2, sparx5: es0
+ * Selects tag C mode: 0: Do not push tag C. 1: Push tag C if
+ * IFH.VSTAX.TAG.WAS_TAGGED = 1. 2: Push tag C if IFH.VSTAX.TAG.WAS_TAGGED = 0.
+ * 3: Push tag C if UNTAG_VID_ENA = 0 or (C-TAG.VID ! = VID_C_VAL).
+ * VCAP_AF_PUSH_INNER_TAG: W1, sparx5: es0, lan966x: es0
+ * Controls inner tagging. 0: Do not push ES0 tag B as inner tag. 1: Push ES0
+ * tag B as inner tag.
+ * VCAP_AF_PUSH_OUTER_TAG: W2, sparx5: es0, lan966x: es0
+ * Controls outer tagging. 0: No ES0 tag A: Port tag is allowed if enabled on
+ * port. 1: ES0 tag A: Push ES0 tag A. No port tag. 2: Force port tag: Always
+ * push port tag. No ES0 tag A. 3: Force untag: Never push port tag or ES0 tag
+ * A.
+ * VCAP_AF_QOS_ENA: W1, sparx5: is0, lan966x: is1
+ * If set, use QOS_VAL as classified QoS class. Otherwise, QoS class from basic
+ * classification is used.
+ * VCAP_AF_QOS_VAL: W3, sparx5: is0, lan966x: is1
+ * See QOS_ENA.
+ * VCAP_AF_REW_OP: W16, lan966x: is2
+ * Rewriter operation command.
+ * VCAP_AF_RT_DIS: W1, sparx5: is2
+ * If set, routing is disallowed. Only applies when IS_INNER_ACL is 0. See also
+ * IGR_ACL_ENA, EGR_ACL_ENA, and RLEG_STAT_IDX.
+ * VCAP_AF_SFID_ENA: W1, lan966x: is1
+ * If set, SFID_VAL is used to lookup ANA::SFID.
+ * VCAP_AF_SFID_VAL: W8, lan966x: is1
+ * Stream filter identifier.
+ * VCAP_AF_SGID_ENA: W1, lan966x: is1
+ * If set, SGID_VAL is used to lookup ANA::SGID.
+ * VCAP_AF_SGID_VAL: W8, lan966x: is1
+ * Stream gate identifier.
+ * VCAP_AF_SWAP_MACS_ENA: W1, sparx5: es0
+ * This setting is only active when FWD_SEL = 1 or FWD_SEL = 2 and PIPELINE_ACT
+ * = LBK_ASM. 0: No action. 1: Swap MACs and clear bit 40 in new SMAC.
+ * VCAP_AF_TAG_A_DEI_SEL: sparx5 es0 W3, lan966x es0 W2
+ * Selects PCP for ES0 tag A. 0: Classified DEI. 1: DEI_A_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: DP.
+ * VCAP_AF_TAG_A_PCP_SEL: sparx5 es0 W3, lan966x es0 W2
+ * Selects PCP for ES0 tag A. 0: Classified PCP. 1: PCP_A_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: QoS class.
+ * VCAP_AF_TAG_A_TPID_SEL: sparx5 es0 W3, lan966x es0 W2
+ * Selects TPID for ES0 tag A: 0: 0x8100. 1: 0x88A8. 2: Custom
+ * (REW:PORT:PORT_VLAN_CFG.PORT_TPID). 3: If IFH.TAG_TYPE = 0 then 0x8100 else
+ * custom.
+ * VCAP_AF_TAG_A_VID_SEL: sparx5 es0 W2, lan966x es0 W1
+ * Selects VID for ES0 tag A. 0: Classified VID + VID_A_VAL. 1: VID_A_VAL.
+ * VCAP_AF_TAG_B_DEI_SEL: sparx5 es0 W3, lan966x es0 W2
+ * Selects PCP for ES0 tag B. 0: Classified DEI. 1: DEI_B_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: DP.
+ * VCAP_AF_TAG_B_PCP_SEL: sparx5 es0 W3, lan966x es0 W2
+ * Selects PCP for ES0 tag B. 0: Classified PCP. 1: PCP_B_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: QoS class.
+ * VCAP_AF_TAG_B_TPID_SEL: sparx5 es0 W3, lan966x es0 W2
+ * Selects TPID for ES0 tag B. 0: 0x8100. 1: 0x88A8. 2: Custom
+ * (REW:PORT:PORT_VLAN_CFG.PORT_TPID). 3: If IFH.TAG_TYPE = 0 then 0x8100 else
+ * custom.
+ * VCAP_AF_TAG_B_VID_SEL: sparx5 es0 W2, lan966x es0 W1
+ * Selects VID for ES0 tag B. 0: Classified VID + VID_B_VAL. 1: VID_B_VAL.
+ * VCAP_AF_TAG_C_DEI_SEL: W3, sparx5: es0
+ * Selects DEI source for ES0 tag C. 0: Classified DEI. 1: DEI_C_VAL. 2:
+ * REW::DP_MAP.DP [IFH.VSTAX.QOS.DP]. 3: DEI of popped VLAN tag if available
+ * (IFH.VSTAX.TAG.WAS_TAGGED = 1 and tot_pop_cnt>0) else DEI_C_VAL. 4: Mapped
+ * using mapping table 0, otherwise use DEI_C_VAL. 5: Mapped using mapping table
+ * 1, otherwise use mapping table 0. 6: Mapped using mapping table 2, otherwise
+ * use DEI_C_VAL. 7: Mapped using mapping table 3, otherwise use mapping table
+ * 2.
+ * VCAP_AF_TAG_C_PCP_SEL: W3, sparx5: es0
+ * Selects PCP source for ES0 tag C. 0: Classified PCP. 1: PCP_C_VAL. 2:
+ * Reserved. 3: PCP of popped VLAN tag if available (IFH.VSTAX.TAG.WAS_TAGGED=1
+ * and tot_pop_cnt>0) else PCP_C_VAL. 4: Mapped using mapping table 0, otherwise
+ * use PCP_C_VAL. 5: Mapped using mapping table 1, otherwise use mapping table
+ * 0. 6: Mapped using mapping table 2, otherwise use PCP_C_VAL. 7: Mapped using
+ * mapping table 3, otherwise use mapping table 2.
+ * VCAP_AF_TAG_C_TPID_SEL: W3, sparx5: es0
+ * Selects TPID for ES0 tag C. 0: 0x8100. 1: 0x88A8. 2: Custom 1. 3: Custom 2.
+ * 4: Custom 3. 5: See TAG_A_TPID_SEL.
+ * VCAP_AF_TAG_C_VID_SEL: W2, sparx5: es0
+ * Selects VID for ES0 tag C. The resulting VID is termed C-TAG.VID. 0:
+ * Classified VID. 1: VID_C_VAL. 2: IFH.ENCAP.GVID. 3: Reserved.
+ * VCAP_AF_TYPE: W1, sparx5: is0, lan966x: is1
+ * Actionset type id - Set by the API
+ * VCAP_AF_UNTAG_VID_ENA: W1, sparx5: es0
+ * Controls insertion of tag C. Untag or insert mode can be selected. See
+ * PUSH_CUSTOMER_TAG.
+ * VCAP_AF_VID_A_VAL: W12, sparx5: es0, lan966x: es0
+ * VID used in ES0 tag A. See TAG_A_VID_SEL.
+ * VCAP_AF_VID_B_VAL: W12, sparx5: es0, lan966x: es0
+ * VID used in ES0 tag B. See TAG_B_VID_SEL.
+ * VCAP_AF_VID_C_VAL: W12, sparx5: es0
+ * VID used in ES0 tag C. See TAG_C_VID_SEL.
+ * VCAP_AF_VID_REPLACE_ENA: W1, lan966x: is1
+ * Controls the classified VID: VID_REPLACE_ENA=0: Add VID_ADD_VAL to basic
+ * classified VID and use result as new classified VID. VID_REPLACE_ENA = 1:
+ * Replace basic classified VID with VID_VAL value and use as new classified
+ * VID.
+ * VCAP_AF_VID_VAL: sparx5 is0 W13, lan966x is1 W12
+ * New VID Value
+ * VCAP_AF_VLAN_POP_CNT: W2, lan966x: is1
+ * See VLAN_POP_CNT_ENA
+ * VCAP_AF_VLAN_POP_CNT_ENA: W1, lan966x: is1
+ * If set, use VLAN_POP_CNT as the number of VLAN tags to pop from the incoming
+ * frame. This number is used by the Rewriter. Otherwise, VLAN_POP_CNT from
+ * ANA:PORT:VLAN_CFG.VLAN_POP_CNT is used
+ */
+
+/* Actionfield names */
+enum vcap_action_field {
+ VCAP_AF_NO_VALUE, /* initial value */
+ VCAP_AF_ACL_ID,
+ VCAP_AF_CLS_VID_SEL,
+ VCAP_AF_CNT_ID,
+ VCAP_AF_COPY_PORT_NUM,
+ VCAP_AF_COPY_QUEUE_NUM,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_AF_CPU_QU,
+ VCAP_AF_CPU_QUEUE_NUM,
+ VCAP_AF_CUSTOM_ACE_TYPE_ENA,
+ VCAP_AF_DEI_A_VAL,
+ VCAP_AF_DEI_B_VAL,
+ VCAP_AF_DEI_C_VAL,
+ VCAP_AF_DEI_ENA,
+ VCAP_AF_DEI_VAL,
+ VCAP_AF_DLR_SEL,
+ VCAP_AF_DP_ENA,
+ VCAP_AF_DP_VAL,
+ VCAP_AF_DSCP_ENA,
+ VCAP_AF_DSCP_SEL,
+ VCAP_AF_DSCP_VAL,
+ VCAP_AF_ES2_REW_CMD,
+ VCAP_AF_ESDX,
+ VCAP_AF_FWD_KILL_ENA,
+ VCAP_AF_FWD_MODE,
+ VCAP_AF_FWD_SEL,
+ VCAP_AF_HIT_ME_ONCE,
+ VCAP_AF_HOST_MATCH,
+ VCAP_AF_IGNORE_PIPELINE_CTRL,
+ VCAP_AF_INTR_ENA,
+ VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_AF_ISDX_ADD_VAL,
+ VCAP_AF_ISDX_ENA,
+ VCAP_AF_ISDX_REPLACE_ENA,
+ VCAP_AF_ISDX_VAL,
+ VCAP_AF_LOOP_ENA,
+ VCAP_AF_LRN_DIS,
+ VCAP_AF_MAP_IDX,
+ VCAP_AF_MAP_KEY,
+ VCAP_AF_MAP_LOOKUP_SEL,
+ VCAP_AF_MASK_MODE,
+ VCAP_AF_MATCH_ID,
+ VCAP_AF_MATCH_ID_MASK,
+ VCAP_AF_MIRROR_ENA,
+ VCAP_AF_MIRROR_PROBE,
+ VCAP_AF_MIRROR_PROBE_ID,
+ VCAP_AF_MRP_SEL,
+ VCAP_AF_NXT_IDX,
+ VCAP_AF_NXT_IDX_CTRL,
+ VCAP_AF_OAM_SEL,
+ VCAP_AF_PAG_OVERRIDE_MASK,
+ VCAP_AF_PAG_VAL,
+ VCAP_AF_PCP_A_VAL,
+ VCAP_AF_PCP_B_VAL,
+ VCAP_AF_PCP_C_VAL,
+ VCAP_AF_PCP_ENA,
+ VCAP_AF_PCP_VAL,
+ VCAP_AF_PIPELINE_ACT,
+ VCAP_AF_PIPELINE_FORCE_ENA,
+ VCAP_AF_PIPELINE_PT,
+ VCAP_AF_POLICE_ENA,
+ VCAP_AF_POLICE_IDX,
+ VCAP_AF_POLICE_REMARK,
+ VCAP_AF_POLICE_VCAP_ONLY,
+ VCAP_AF_POP_VAL,
+ VCAP_AF_PORT_MASK,
+ VCAP_AF_PUSH_CUSTOMER_TAG,
+ VCAP_AF_PUSH_INNER_TAG,
+ VCAP_AF_PUSH_OUTER_TAG,
+ VCAP_AF_QOS_ENA,
+ VCAP_AF_QOS_VAL,
+ VCAP_AF_REW_OP,
+ VCAP_AF_RT_DIS,
+ VCAP_AF_SFID_ENA,
+ VCAP_AF_SFID_VAL,
+ VCAP_AF_SGID_ENA,
+ VCAP_AF_SGID_VAL,
+ VCAP_AF_SWAP_MACS_ENA,
+ VCAP_AF_TAG_A_DEI_SEL,
+ VCAP_AF_TAG_A_PCP_SEL,
+ VCAP_AF_TAG_A_TPID_SEL,
+ VCAP_AF_TAG_A_VID_SEL,
+ VCAP_AF_TAG_B_DEI_SEL,
+ VCAP_AF_TAG_B_PCP_SEL,
+ VCAP_AF_TAG_B_TPID_SEL,
+ VCAP_AF_TAG_B_VID_SEL,
+ VCAP_AF_TAG_C_DEI_SEL,
+ VCAP_AF_TAG_C_PCP_SEL,
+ VCAP_AF_TAG_C_TPID_SEL,
+ VCAP_AF_TAG_C_VID_SEL,
+ VCAP_AF_TYPE,
+ VCAP_AF_UNTAG_VID_ENA,
+ VCAP_AF_VID_A_VAL,
+ VCAP_AF_VID_B_VAL,
+ VCAP_AF_VID_C_VAL,
+ VCAP_AF_VID_REPLACE_ENA,
+ VCAP_AF_VID_VAL,
+ VCAP_AF_VLAN_POP_CNT,
+ VCAP_AF_VLAN_POP_CNT_ENA,
+};
+
+#endif /* __VCAP_AG_API__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
new file mode 100644
index 0000000000..ef980e4e5b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -0,0 +1,3599 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP API
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/types.h>
+
+#include "vcap_api_private.h"
+
+static int keyfield_size_table[] = {
+ [VCAP_FIELD_BIT] = sizeof(struct vcap_u1_key),
+ [VCAP_FIELD_U32] = sizeof(struct vcap_u32_key),
+ [VCAP_FIELD_U48] = sizeof(struct vcap_u48_key),
+ [VCAP_FIELD_U56] = sizeof(struct vcap_u56_key),
+ [VCAP_FIELD_U64] = sizeof(struct vcap_u64_key),
+ [VCAP_FIELD_U72] = sizeof(struct vcap_u72_key),
+ [VCAP_FIELD_U112] = sizeof(struct vcap_u112_key),
+ [VCAP_FIELD_U128] = sizeof(struct vcap_u128_key),
+};
+
+static int actionfield_size_table[] = {
+ [VCAP_FIELD_BIT] = sizeof(struct vcap_u1_action),
+ [VCAP_FIELD_U32] = sizeof(struct vcap_u32_action),
+ [VCAP_FIELD_U48] = sizeof(struct vcap_u48_action),
+ [VCAP_FIELD_U56] = sizeof(struct vcap_u56_action),
+ [VCAP_FIELD_U64] = sizeof(struct vcap_u64_action),
+ [VCAP_FIELD_U72] = sizeof(struct vcap_u72_action),
+ [VCAP_FIELD_U112] = sizeof(struct vcap_u112_action),
+ [VCAP_FIELD_U128] = sizeof(struct vcap_u128_action),
+};
+
+/* Moving a rule in the VCAP address space */
+struct vcap_rule_move {
+ int addr; /* address to move */
+ int offset; /* change in address */
+ int count; /* blocksize of addresses to move */
+};
+
+/* Stores the filter cookie and chain id that enabled the port */
+struct vcap_enabled_port {
+ struct list_head list; /* for insertion in enabled ports list */
+ struct net_device *ndev; /* the enabled port */
+ unsigned long cookie; /* filter that enabled the port */
+ int src_cid; /* source chain id */
+ int dst_cid; /* destination chain id */
+};
+
+void vcap_iter_set(struct vcap_stream_iter *itr, int sw_width,
+ const struct vcap_typegroup *tg, u32 offset)
+{
+ memset(itr, 0, sizeof(*itr));
+ itr->offset = offset;
+ itr->sw_width = sw_width;
+ itr->regs_per_sw = DIV_ROUND_UP(sw_width, 32);
+ itr->tg = tg;
+}
+
+static void vcap_iter_skip_tg(struct vcap_stream_iter *itr)
+{
+ /* Compensate the field offset for preceding typegroups.
+ * A typegroup table ends with an all-zero terminator.
+ */
+ while (itr->tg->width && itr->offset >= itr->tg->offset) {
+ itr->offset += itr->tg->width;
+ itr->tg++; /* next typegroup */
+ }
+}
+
+void vcap_iter_update(struct vcap_stream_iter *itr)
+{
+ int sw_idx, sw_bitpos;
+
+ /* Calculate the subword index and bitposition for current bit */
+ sw_idx = itr->offset / itr->sw_width;
+ sw_bitpos = itr->offset % itr->sw_width;
+ /* Calculate the register index and bitposition for current bit */
+ itr->reg_idx = (sw_idx * itr->regs_per_sw) + (sw_bitpos / 32);
+ itr->reg_bitpos = sw_bitpos % 32;
+}
+
+void vcap_iter_init(struct vcap_stream_iter *itr, int sw_width,
+ const struct vcap_typegroup *tg, u32 offset)
+{
+ vcap_iter_set(itr, sw_width, tg, offset);
+ vcap_iter_skip_tg(itr);
+ vcap_iter_update(itr);
+}
+
+void vcap_iter_next(struct vcap_stream_iter *itr)
+{
+ itr->offset++;
+ vcap_iter_skip_tg(itr);
+ vcap_iter_update(itr);
+}
+
+static void vcap_set_bit(u32 *stream, struct vcap_stream_iter *itr, bool value)
+{
+ u32 mask = BIT(itr->reg_bitpos);
+ u32 *p = &stream[itr->reg_idx];
+
+ if (value)
+ *p |= mask;
+ else
+ *p &= ~mask;
+}
+
+static void vcap_encode_bit(u32 *stream, struct vcap_stream_iter *itr, bool val)
+{
+ /* When intersected by a type group field, stream the type group bits
+ * before continuing with the value bit
+ */
+ while (itr->tg->width &&
+ itr->offset >= itr->tg->offset &&
+ itr->offset < itr->tg->offset + itr->tg->width) {
+ int tg_bitpos = itr->tg->offset - itr->offset;
+
+ vcap_set_bit(stream, itr, (itr->tg->value >> tg_bitpos) & 0x1);
+ itr->offset++;
+ vcap_iter_update(itr);
+ }
+ vcap_set_bit(stream, itr, val);
+}
+
+static void vcap_encode_field(u32 *stream, struct vcap_stream_iter *itr,
+ int width, const u8 *value)
+{
+ int idx;
+
+ /* Loop over the field value bits and add the value bits one by one to
+ * the output stream.
+ */
+ for (idx = 0; idx < width; idx++) {
+ u8 bidx = idx & GENMASK(2, 0);
+
+ /* Encode one field value bit */
+ vcap_encode_bit(stream, itr, (value[idx / 8] >> bidx) & 0x1);
+ vcap_iter_next(itr);
+ }
+}
+
+static void vcap_encode_typegroups(u32 *stream, int sw_width,
+ const struct vcap_typegroup *tg,
+ bool mask)
+{
+ struct vcap_stream_iter iter;
+ int idx;
+
+ /* Mask bits must be set to zeros (inverted later when writing to the
+ * mask cache register), so that the mask typegroup bits consist of
+ * match-1 or match-0, or both
+ */
+ vcap_iter_set(&iter, sw_width, tg, 0);
+ while (iter.tg->width) {
+ /* Set position to current typegroup bit */
+ iter.offset = iter.tg->offset;
+ vcap_iter_update(&iter);
+ for (idx = 0; idx < iter.tg->width; idx++) {
+ /* Iterate over current typegroup bits. Mask typegroup
+ * bits are always set
+ */
+ if (mask)
+ vcap_set_bit(stream, &iter, 0x1);
+ else
+ vcap_set_bit(stream, &iter,
+ (iter.tg->value >> idx) & 0x1);
+ iter.offset++;
+ vcap_iter_update(&iter);
+ }
+ iter.tg++; /* next typegroup */
+ }
+}
+
+static bool vcap_bitarray_zero(int width, u8 *value)
+{
+ int bytes = DIV_ROUND_UP(width, BITS_PER_BYTE);
+ u8 total = 0, bmask = 0xff;
+ int rwidth = width;
+ int idx;
+
+ for (idx = 0; idx < bytes; ++idx, rwidth -= BITS_PER_BYTE) {
+ if (rwidth && rwidth < BITS_PER_BYTE)
+ bmask = (1 << rwidth) - 1;
+ total += value[idx] & bmask;
+ }
+ return total == 0;
+}
+
+static bool vcap_get_bit(u32 *stream, struct vcap_stream_iter *itr)
+{
+ u32 mask = BIT(itr->reg_bitpos);
+ u32 *p = &stream[itr->reg_idx];
+
+ return !!(*p & mask);
+}
+
+static void vcap_decode_field(u32 *stream, struct vcap_stream_iter *itr,
+ int width, u8 *value)
+{
+ int idx;
+
+ /* Loop over the field value bits and get the field bits and
+ * set them in the output value byte array
+ */
+ for (idx = 0; idx < width; idx++) {
+ u8 bidx = idx & 0x7;
+
+ /* Decode one field value bit */
+ if (vcap_get_bit(stream, itr))
+ *value |= 1 << bidx;
+ vcap_iter_next(itr);
+ if (bidx == 7)
+ value++;
+ }
+}
+
+/* Verify that the type id in the stream matches the type id of the keyset */
+static bool vcap_verify_keystream_keyset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ u32 *keystream,
+ u32 *mskstream,
+ enum vcap_keyfield_set keyset)
+{
+ const struct vcap_info *vcap = &vctrl->vcaps[vt];
+ const struct vcap_field *typefld;
+ const struct vcap_typegroup *tgt;
+ const struct vcap_field *fields;
+ struct vcap_stream_iter iter;
+ const struct vcap_set *info;
+ u32 value = 0;
+ u32 mask = 0;
+
+ if (vcap_keyfield_count(vctrl, vt, keyset) == 0)
+ return false;
+
+ info = vcap_keyfieldset(vctrl, vt, keyset);
+ /* Check that the keyset is valid */
+ if (!info)
+ return false;
+
+ /* a type_id of value -1 means that there is no type field */
+ if (info->type_id == (u8)-1)
+ return true;
+
+ /* Get a valid typegroup for the specific keyset */
+ tgt = vcap_keyfield_typegroup(vctrl, vt, keyset);
+ if (!tgt)
+ return false;
+
+ fields = vcap_keyfields(vctrl, vt, keyset);
+ if (!fields)
+ return false;
+
+ typefld = &fields[VCAP_KF_TYPE];
+ vcap_iter_init(&iter, vcap->sw_width, tgt, typefld->offset);
+ vcap_decode_field(mskstream, &iter, typefld->width, (u8 *)&mask);
+ /* no type info if there are no mask bits */
+ if (vcap_bitarray_zero(typefld->width, (u8 *)&mask))
+ return false;
+
+ /* Get the value of the type field in the stream and compare to the
+ * one define in the vcap keyset
+ */
+ vcap_iter_init(&iter, vcap->sw_width, tgt, typefld->offset);
+ vcap_decode_field(keystream, &iter, typefld->width, (u8 *)&value);
+
+ return (value & mask) == (info->type_id & mask);
+}
+
+/* Verify that the typegroup bits have the correct values */
+static int vcap_verify_typegroups(u32 *stream, int sw_width,
+ const struct vcap_typegroup *tgt, bool mask,
+ int sw_max)
+{
+ struct vcap_stream_iter iter;
+ int sw_cnt, idx;
+
+ vcap_iter_set(&iter, sw_width, tgt, 0);
+ sw_cnt = 0;
+ while (iter.tg->width) {
+ u32 value = 0;
+ u32 tg_value = iter.tg->value;
+
+ if (mask)
+ tg_value = (1 << iter.tg->width) - 1;
+ /* Set position to current typegroup bit */
+ iter.offset = iter.tg->offset;
+ vcap_iter_update(&iter);
+ for (idx = 0; idx < iter.tg->width; idx++) {
+ /* Decode one typegroup bit */
+ if (vcap_get_bit(stream, &iter))
+ value |= 1 << idx;
+ iter.offset++;
+ vcap_iter_update(&iter);
+ }
+ if (value != tg_value)
+ return -EINVAL;
+ iter.tg++; /* next typegroup */
+ sw_cnt++;
+ /* Stop checking more typegroups */
+ if (sw_max && sw_cnt >= sw_max)
+ break;
+ }
+ return 0;
+}
+
+/* Find the subword width of the key typegroup that matches the stream data */
+static int vcap_find_keystream_typegroup_sw(struct vcap_control *vctrl,
+ enum vcap_type vt, u32 *stream,
+ bool mask, int sw_max)
+{
+ const struct vcap_typegroup **tgt;
+ int sw_idx, res;
+
+ tgt = vctrl->vcaps[vt].keyfield_set_typegroups;
+ /* Try the longest subword match first */
+ for (sw_idx = vctrl->vcaps[vt].sw_count; sw_idx >= 0; sw_idx--) {
+ if (!tgt[sw_idx])
+ continue;
+
+ res = vcap_verify_typegroups(stream, vctrl->vcaps[vt].sw_width,
+ tgt[sw_idx], mask, sw_max);
+ if (res == 0)
+ return sw_idx;
+ }
+ return -EINVAL;
+}
+
+/* Verify that the typegroup information, subword count, keyset and type id
+ * are in sync and correct, return the list of matchin keysets
+ */
+int
+vcap_find_keystream_keysets(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ u32 *keystream,
+ u32 *mskstream,
+ bool mask, int sw_max,
+ struct vcap_keyset_list *kslist)
+{
+ const struct vcap_set *keyfield_set;
+ int sw_count, idx;
+
+ sw_count = vcap_find_keystream_typegroup_sw(vctrl, vt, keystream, mask,
+ sw_max);
+ if (sw_count < 0)
+ return sw_count;
+
+ keyfield_set = vctrl->vcaps[vt].keyfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].keyfield_set_size; ++idx) {
+ if (keyfield_set[idx].sw_per_item != sw_count)
+ continue;
+
+ if (vcap_verify_keystream_keyset(vctrl, vt, keystream,
+ mskstream, idx))
+ vcap_keyset_list_add(kslist, idx);
+ }
+ if (kslist->cnt > 0)
+ return 0;
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(vcap_find_keystream_keysets);
+
+/* Read key data from a VCAP address and discover if there are any rule keysets
+ * here
+ */
+int vcap_addr_keysets(struct vcap_control *vctrl,
+ struct net_device *ndev,
+ struct vcap_admin *admin,
+ int addr,
+ struct vcap_keyset_list *kslist)
+{
+ enum vcap_type vt = admin->vtype;
+ int keyset_sw_regs, idx;
+ u32 key = 0, mask = 0;
+
+ /* Read the cache at the specified address */
+ keyset_sw_regs = DIV_ROUND_UP(vctrl->vcaps[vt].sw_width, 32);
+ vctrl->ops->update(ndev, admin, VCAP_CMD_READ, VCAP_SEL_ALL, addr);
+ vctrl->ops->cache_read(ndev, admin, VCAP_SEL_ENTRY, 0,
+ keyset_sw_regs);
+ /* Skip uninitialized key/mask entries */
+ for (idx = 0; idx < keyset_sw_regs; ++idx) {
+ key |= ~admin->cache.keystream[idx];
+ mask |= admin->cache.maskstream[idx];
+ }
+ if (key == 0 && mask == 0)
+ return -EINVAL;
+ /* Decode and locate the keysets */
+ return vcap_find_keystream_keysets(vctrl, vt, admin->cache.keystream,
+ admin->cache.maskstream, false, 0,
+ kslist);
+}
+EXPORT_SYMBOL_GPL(vcap_addr_keysets);
+
+/* Return the list of keyfields for the keyset */
+const struct vcap_field *vcap_keyfields(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_keyfield_set keyset)
+{
+ /* Check that the keyset exists in the vcap keyset list */
+ if (keyset >= vctrl->vcaps[vt].keyfield_set_size)
+ return NULL;
+ return vctrl->vcaps[vt].keyfield_set_map[keyset];
+}
+
+/* Return the keyset information for the keyset */
+const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_keyfield_set keyset)
+{
+ const struct vcap_set *kset;
+
+ /* Check that the keyset exists in the vcap keyset list */
+ if (keyset >= vctrl->vcaps[vt].keyfield_set_size)
+ return NULL;
+ kset = &vctrl->vcaps[vt].keyfield_set[keyset];
+ if (kset->sw_per_item == 0 || kset->sw_per_item > vctrl->vcaps[vt].sw_count)
+ return NULL;
+ return kset;
+}
+EXPORT_SYMBOL_GPL(vcap_keyfieldset);
+
+/* Return the typegroup table for the matching keyset (using subword size) */
+const struct vcap_typegroup *
+vcap_keyfield_typegroup(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_keyfield_set keyset)
+{
+ const struct vcap_set *kset = vcap_keyfieldset(vctrl, vt, keyset);
+
+ /* Check that the keyset is valid */
+ if (!kset)
+ return NULL;
+ return vctrl->vcaps[vt].keyfield_set_typegroups[kset->sw_per_item];
+}
+
+/* Return the number of keyfields in the keyset */
+int vcap_keyfield_count(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_keyfield_set keyset)
+{
+ /* Check that the keyset exists in the vcap keyset list */
+ if (keyset >= vctrl->vcaps[vt].keyfield_set_size)
+ return 0;
+ return vctrl->vcaps[vt].keyfield_set_map_size[keyset];
+}
+
+static void vcap_encode_keyfield(struct vcap_rule_internal *ri,
+ const struct vcap_client_keyfield *kf,
+ const struct vcap_field *rf,
+ const struct vcap_typegroup *tgt)
+{
+ int sw_width = ri->vctrl->vcaps[ri->admin->vtype].sw_width;
+ struct vcap_cache_data *cache = &ri->admin->cache;
+ struct vcap_stream_iter iter;
+ const u8 *value, *mask;
+
+ /* Encode the fields for the key and the mask in their respective
+ * streams, respecting the subword width.
+ */
+ switch (kf->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ value = &kf->data.u1.value;
+ mask = &kf->data.u1.mask;
+ break;
+ case VCAP_FIELD_U32:
+ value = (const u8 *)&kf->data.u32.value;
+ mask = (const u8 *)&kf->data.u32.mask;
+ break;
+ case VCAP_FIELD_U48:
+ value = kf->data.u48.value;
+ mask = kf->data.u48.mask;
+ break;
+ case VCAP_FIELD_U56:
+ value = kf->data.u56.value;
+ mask = kf->data.u56.mask;
+ break;
+ case VCAP_FIELD_U64:
+ value = kf->data.u64.value;
+ mask = kf->data.u64.mask;
+ break;
+ case VCAP_FIELD_U72:
+ value = kf->data.u72.value;
+ mask = kf->data.u72.mask;
+ break;
+ case VCAP_FIELD_U112:
+ value = kf->data.u112.value;
+ mask = kf->data.u112.mask;
+ break;
+ case VCAP_FIELD_U128:
+ value = kf->data.u128.value;
+ mask = kf->data.u128.mask;
+ break;
+ }
+ vcap_iter_init(&iter, sw_width, tgt, rf->offset);
+ vcap_encode_field(cache->keystream, &iter, rf->width, value);
+ vcap_iter_init(&iter, sw_width, tgt, rf->offset);
+ vcap_encode_field(cache->maskstream, &iter, rf->width, mask);
+}
+
+static void vcap_encode_keyfield_typegroups(struct vcap_control *vctrl,
+ struct vcap_rule_internal *ri,
+ const struct vcap_typegroup *tgt)
+{
+ int sw_width = vctrl->vcaps[ri->admin->vtype].sw_width;
+ struct vcap_cache_data *cache = &ri->admin->cache;
+
+ /* Encode the typegroup bits for the key and the mask in their streams,
+ * respecting the subword width.
+ */
+ vcap_encode_typegroups(cache->keystream, sw_width, tgt, false);
+ vcap_encode_typegroups(cache->maskstream, sw_width, tgt, true);
+}
+
+/* Copy data from src to dst but reverse the data in chunks of 32bits.
+ * For example if src is 00:11:22:33:44:55 where 55 is LSB the dst will
+ * have the value 22:33:44:55:00:11.
+ */
+static void vcap_copy_to_w32be(u8 *dst, const u8 *src, int size)
+{
+ for (int idx = 0; idx < size; ++idx) {
+ int first_byte_index = 0;
+ int nidx;
+
+ first_byte_index = size - (((idx >> 2) + 1) << 2);
+ if (first_byte_index < 0)
+ first_byte_index = 0;
+ nidx = idx + first_byte_index - (idx & ~0x3);
+ dst[nidx] = src[idx];
+ }
+}
+
+static void
+vcap_copy_from_client_keyfield(struct vcap_rule *rule,
+ struct vcap_client_keyfield *dst,
+ const struct vcap_client_keyfield *src)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_client_keyfield_data *sdata;
+ struct vcap_client_keyfield_data *ddata;
+ int size;
+
+ dst->ctrl.type = src->ctrl.type;
+ dst->ctrl.key = src->ctrl.key;
+ INIT_LIST_HEAD(&dst->ctrl.list);
+ sdata = &src->data;
+ ddata = &dst->data;
+
+ if (!ri->admin->w32be) {
+ memcpy(ddata, sdata, sizeof(dst->data));
+ return;
+ }
+
+ size = keyfield_size_table[dst->ctrl.type] / 2;
+
+ switch (dst->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ case VCAP_FIELD_U32:
+ memcpy(ddata, sdata, sizeof(dst->data));
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_to_w32be(ddata->u48.value, src->data.u48.value, size);
+ vcap_copy_to_w32be(ddata->u48.mask, src->data.u48.mask, size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_to_w32be(ddata->u56.value, sdata->u56.value, size);
+ vcap_copy_to_w32be(ddata->u56.mask, sdata->u56.mask, size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_to_w32be(ddata->u64.value, sdata->u64.value, size);
+ vcap_copy_to_w32be(ddata->u64.mask, sdata->u64.mask, size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_to_w32be(ddata->u72.value, sdata->u72.value, size);
+ vcap_copy_to_w32be(ddata->u72.mask, sdata->u72.mask, size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_to_w32be(ddata->u112.value, sdata->u112.value, size);
+ vcap_copy_to_w32be(ddata->u112.mask, sdata->u112.mask, size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_to_w32be(ddata->u128.value, sdata->u128.value, size);
+ vcap_copy_to_w32be(ddata->u128.mask, sdata->u128.mask, size);
+ break;
+ }
+}
+
+static void
+vcap_copy_from_client_actionfield(struct vcap_rule *rule,
+ struct vcap_client_actionfield *dst,
+ const struct vcap_client_actionfield *src)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_client_actionfield_data *sdata;
+ struct vcap_client_actionfield_data *ddata;
+ int size;
+
+ dst->ctrl.type = src->ctrl.type;
+ dst->ctrl.action = src->ctrl.action;
+ INIT_LIST_HEAD(&dst->ctrl.list);
+ sdata = &src->data;
+ ddata = &dst->data;
+
+ if (!ri->admin->w32be) {
+ memcpy(ddata, sdata, sizeof(dst->data));
+ return;
+ }
+
+ size = actionfield_size_table[dst->ctrl.type];
+
+ switch (dst->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ case VCAP_FIELD_U32:
+ memcpy(ddata, sdata, sizeof(dst->data));
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_to_w32be(ddata->u48.value, sdata->u48.value, size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_to_w32be(ddata->u56.value, sdata->u56.value, size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_to_w32be(ddata->u64.value, sdata->u64.value, size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_to_w32be(ddata->u72.value, sdata->u72.value, size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_to_w32be(ddata->u112.value, sdata->u112.value, size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_to_w32be(ddata->u128.value, sdata->u128.value, size);
+ break;
+ }
+}
+
+static int vcap_encode_rule_keyset(struct vcap_rule_internal *ri)
+{
+ const struct vcap_client_keyfield *ckf;
+ const struct vcap_typegroup *tg_table;
+ struct vcap_client_keyfield tempkf;
+ const struct vcap_field *kf_table;
+ int keyset_size;
+
+ /* Get a valid set of fields for the specific keyset */
+ kf_table = vcap_keyfields(ri->vctrl, ri->admin->vtype, ri->data.keyset);
+ if (!kf_table) {
+ pr_err("%s:%d: no fields available for this keyset: %d\n",
+ __func__, __LINE__, ri->data.keyset);
+ return -EINVAL;
+ }
+ /* Get a valid typegroup for the specific keyset */
+ tg_table = vcap_keyfield_typegroup(ri->vctrl, ri->admin->vtype,
+ ri->data.keyset);
+ if (!tg_table) {
+ pr_err("%s:%d: no typegroups available for this keyset: %d\n",
+ __func__, __LINE__, ri->data.keyset);
+ return -EINVAL;
+ }
+ /* Get a valid size for the specific keyset */
+ keyset_size = vcap_keyfield_count(ri->vctrl, ri->admin->vtype,
+ ri->data.keyset);
+ if (keyset_size == 0) {
+ pr_err("%s:%d: zero field count for this keyset: %d\n",
+ __func__, __LINE__, ri->data.keyset);
+ return -EINVAL;
+ }
+ /* Iterate over the keyfields (key, mask) in the rule
+ * and encode these bits
+ */
+ if (list_empty(&ri->data.keyfields)) {
+ pr_err("%s:%d: no keyfields in the rule\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ /* Check that the client entry exists in the keyset */
+ if (ckf->ctrl.key >= keyset_size) {
+ pr_err("%s:%d: key %d is not in vcap\n",
+ __func__, __LINE__, ckf->ctrl.key);
+ return -EINVAL;
+ }
+ vcap_copy_from_client_keyfield(&ri->data, &tempkf, ckf);
+ vcap_encode_keyfield(ri, &tempkf, &kf_table[ckf->ctrl.key],
+ tg_table);
+ }
+ /* Add typegroup bits to the key/mask bitstreams */
+ vcap_encode_keyfield_typegroups(ri->vctrl, ri, tg_table);
+ return 0;
+}
+
+/* Return the list of actionfields for the actionset */
+const struct vcap_field *
+vcap_actionfields(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_actionfield_set actionset)
+{
+ /* Check that the actionset exists in the vcap actionset list */
+ if (actionset >= vctrl->vcaps[vt].actionfield_set_size)
+ return NULL;
+ return vctrl->vcaps[vt].actionfield_set_map[actionset];
+}
+
+const struct vcap_set *
+vcap_actionfieldset(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_actionfield_set actionset)
+{
+ const struct vcap_set *aset;
+
+ /* Check that the actionset exists in the vcap actionset list */
+ if (actionset >= vctrl->vcaps[vt].actionfield_set_size)
+ return NULL;
+ aset = &vctrl->vcaps[vt].actionfield_set[actionset];
+ if (aset->sw_per_item == 0 || aset->sw_per_item > vctrl->vcaps[vt].sw_count)
+ return NULL;
+ return aset;
+}
+
+/* Return the typegroup table for the matching actionset (using subword size) */
+const struct vcap_typegroup *
+vcap_actionfield_typegroup(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_actionfield_set actionset)
+{
+ const struct vcap_set *aset = vcap_actionfieldset(vctrl, vt, actionset);
+
+ /* Check that the actionset is valid */
+ if (!aset)
+ return NULL;
+ return vctrl->vcaps[vt].actionfield_set_typegroups[aset->sw_per_item];
+}
+
+/* Return the number of actionfields in the actionset */
+int vcap_actionfield_count(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_actionfield_set actionset)
+{
+ /* Check that the actionset exists in the vcap actionset list */
+ if (actionset >= vctrl->vcaps[vt].actionfield_set_size)
+ return 0;
+ return vctrl->vcaps[vt].actionfield_set_map_size[actionset];
+}
+
+static void vcap_encode_actionfield(struct vcap_rule_internal *ri,
+ const struct vcap_client_actionfield *af,
+ const struct vcap_field *rf,
+ const struct vcap_typegroup *tgt)
+{
+ int act_width = ri->vctrl->vcaps[ri->admin->vtype].act_width;
+
+ struct vcap_cache_data *cache = &ri->admin->cache;
+ struct vcap_stream_iter iter;
+ const u8 *value;
+
+ /* Encode the action field in the stream, respecting the subword width */
+ switch (af->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ value = &af->data.u1.value;
+ break;
+ case VCAP_FIELD_U32:
+ value = (const u8 *)&af->data.u32.value;
+ break;
+ case VCAP_FIELD_U48:
+ value = af->data.u48.value;
+ break;
+ case VCAP_FIELD_U56:
+ value = af->data.u56.value;
+ break;
+ case VCAP_FIELD_U64:
+ value = af->data.u64.value;
+ break;
+ case VCAP_FIELD_U72:
+ value = af->data.u72.value;
+ break;
+ case VCAP_FIELD_U112:
+ value = af->data.u112.value;
+ break;
+ case VCAP_FIELD_U128:
+ value = af->data.u128.value;
+ break;
+ }
+ vcap_iter_init(&iter, act_width, tgt, rf->offset);
+ vcap_encode_field(cache->actionstream, &iter, rf->width, value);
+}
+
+static void vcap_encode_actionfield_typegroups(struct vcap_rule_internal *ri,
+ const struct vcap_typegroup *tgt)
+{
+ int sw_width = ri->vctrl->vcaps[ri->admin->vtype].act_width;
+ struct vcap_cache_data *cache = &ri->admin->cache;
+
+ /* Encode the typegroup bits for the actionstream respecting the subword
+ * width.
+ */
+ vcap_encode_typegroups(cache->actionstream, sw_width, tgt, false);
+}
+
+static int vcap_encode_rule_actionset(struct vcap_rule_internal *ri)
+{
+ const struct vcap_client_actionfield *caf;
+ const struct vcap_typegroup *tg_table;
+ struct vcap_client_actionfield tempaf;
+ const struct vcap_field *af_table;
+ int actionset_size;
+
+ /* Get a valid set of actionset fields for the specific actionset */
+ af_table = vcap_actionfields(ri->vctrl, ri->admin->vtype,
+ ri->data.actionset);
+ if (!af_table) {
+ pr_err("%s:%d: no fields available for this actionset: %d\n",
+ __func__, __LINE__, ri->data.actionset);
+ return -EINVAL;
+ }
+ /* Get a valid typegroup for the specific actionset */
+ tg_table = vcap_actionfield_typegroup(ri->vctrl, ri->admin->vtype,
+ ri->data.actionset);
+ if (!tg_table) {
+ pr_err("%s:%d: no typegroups available for this actionset: %d\n",
+ __func__, __LINE__, ri->data.actionset);
+ return -EINVAL;
+ }
+ /* Get a valid actionset size for the specific actionset */
+ actionset_size = vcap_actionfield_count(ri->vctrl, ri->admin->vtype,
+ ri->data.actionset);
+ if (actionset_size == 0) {
+ pr_err("%s:%d: zero field count for this actionset: %d\n",
+ __func__, __LINE__, ri->data.actionset);
+ return -EINVAL;
+ }
+ /* Iterate over the actionfields in the rule
+ * and encode these bits
+ */
+ if (list_empty(&ri->data.actionfields))
+ pr_warn("%s:%d: no actionfields in the rule\n",
+ __func__, __LINE__);
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ /* Check that the client action exists in the actionset */
+ if (caf->ctrl.action >= actionset_size) {
+ pr_err("%s:%d: action %d is not in vcap\n",
+ __func__, __LINE__, caf->ctrl.action);
+ return -EINVAL;
+ }
+ vcap_copy_from_client_actionfield(&ri->data, &tempaf, caf);
+ vcap_encode_actionfield(ri, &tempaf,
+ &af_table[caf->ctrl.action], tg_table);
+ }
+ /* Add typegroup bits to the entry bitstreams */
+ vcap_encode_actionfield_typegroups(ri, tg_table);
+ return 0;
+}
+
+static int vcap_encode_rule(struct vcap_rule_internal *ri)
+{
+ int err;
+
+ err = vcap_encode_rule_keyset(ri);
+ if (err)
+ return err;
+ err = vcap_encode_rule_actionset(ri);
+ if (err)
+ return err;
+ return 0;
+}
+
+int vcap_api_check(struct vcap_control *ctrl)
+{
+ if (!ctrl) {
+ pr_err("%s:%d: vcap control is missing\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!ctrl->ops || !ctrl->ops->validate_keyset ||
+ !ctrl->ops->add_default_fields || !ctrl->ops->cache_erase ||
+ !ctrl->ops->cache_write || !ctrl->ops->cache_read ||
+ !ctrl->ops->init || !ctrl->ops->update || !ctrl->ops->move ||
+ !ctrl->ops->port_info) {
+ pr_err("%s:%d: client operations are missing\n",
+ __func__, __LINE__);
+ return -ENOENT;
+ }
+ return 0;
+}
+
+void vcap_erase_cache(struct vcap_rule_internal *ri)
+{
+ ri->vctrl->ops->cache_erase(ri->admin);
+}
+
+/* Update the keyset for the rule */
+int vcap_set_rule_set_keyset(struct vcap_rule *rule,
+ enum vcap_keyfield_set keyset)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_set *kset;
+ int sw_width;
+
+ kset = vcap_keyfieldset(ri->vctrl, ri->admin->vtype, keyset);
+ /* Check that the keyset is valid */
+ if (!kset)
+ return -EINVAL;
+ ri->keyset_sw = kset->sw_per_item;
+ sw_width = ri->vctrl->vcaps[ri->admin->vtype].sw_width;
+ ri->keyset_sw_regs = DIV_ROUND_UP(sw_width, 32);
+ ri->data.keyset = keyset;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_set_rule_set_keyset);
+
+/* Update the actionset for the rule */
+int vcap_set_rule_set_actionset(struct vcap_rule *rule,
+ enum vcap_actionfield_set actionset)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_set *aset;
+ int act_width;
+
+ aset = vcap_actionfieldset(ri->vctrl, ri->admin->vtype, actionset);
+ /* Check that the actionset is valid */
+ if (!aset)
+ return -EINVAL;
+ ri->actionset_sw = aset->sw_per_item;
+ act_width = ri->vctrl->vcaps[ri->admin->vtype].act_width;
+ ri->actionset_sw_regs = DIV_ROUND_UP(act_width, 32);
+ ri->data.actionset = actionset;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_set_rule_set_actionset);
+
+/* Check if a rule with this id exists */
+static bool vcap_rule_exists(struct vcap_control *vctrl, u32 id)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+
+ /* Look for the rule id in all vcaps */
+ list_for_each_entry(admin, &vctrl->list, list)
+ list_for_each_entry(ri, &admin->rules, list)
+ if (ri->data.id == id)
+ return true;
+ return false;
+}
+
+/* Find a rule with a provided rule id return a locked vcap */
+static struct vcap_rule_internal *
+vcap_get_locked_rule(struct vcap_control *vctrl, u32 id)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+
+ /* Look for the rule id in all vcaps */
+ list_for_each_entry(admin, &vctrl->list, list) {
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list)
+ if (ri->data.id == id)
+ return ri;
+ mutex_unlock(&admin->lock);
+ }
+ return NULL;
+}
+
+/* Find a rule id with a provided cookie */
+int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+ int id = 0;
+
+ /* Look for the rule id in all vcaps */
+ list_for_each_entry(admin, &vctrl->list, list) {
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ if (ri->data.cookie == cookie) {
+ id = ri->data.id;
+ break;
+ }
+ }
+ mutex_unlock(&admin->lock);
+ if (id)
+ return id;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(vcap_lookup_rule_by_cookie);
+
+/* Get number of rules in a vcap instance lookup chain id range */
+int vcap_admin_rule_count(struct vcap_admin *admin, int cid)
+{
+ int max_cid = roundup(cid + 1, VCAP_CID_LOOKUP_SIZE);
+ int min_cid = rounddown(cid, VCAP_CID_LOOKUP_SIZE);
+ struct vcap_rule_internal *elem;
+ int count = 0;
+
+ list_for_each_entry(elem, &admin->rules, list) {
+ mutex_lock(&admin->lock);
+ if (elem->data.vcap_chain_id >= min_cid &&
+ elem->data.vcap_chain_id < max_cid)
+ ++count;
+ mutex_unlock(&admin->lock);
+ }
+ return count;
+}
+EXPORT_SYMBOL_GPL(vcap_admin_rule_count);
+
+/* Make a copy of the rule, shallow or full */
+static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri,
+ bool full)
+{
+ struct vcap_client_actionfield *caf, *newcaf;
+ struct vcap_client_keyfield *ckf, *newckf;
+ struct vcap_rule_internal *duprule;
+
+ /* Allocate the client part */
+ duprule = kzalloc(sizeof(*duprule), GFP_KERNEL);
+ if (!duprule)
+ return ERR_PTR(-ENOMEM);
+ *duprule = *ri;
+ /* Not inserted in the VCAP */
+ INIT_LIST_HEAD(&duprule->list);
+ /* No elements in these lists */
+ INIT_LIST_HEAD(&duprule->data.keyfields);
+ INIT_LIST_HEAD(&duprule->data.actionfields);
+
+ /* A full rule copy includes keys and actions */
+ if (!full)
+ return duprule;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL);
+ if (!newckf)
+ goto err;
+ list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields);
+ }
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL);
+ if (!newcaf)
+ goto err;
+ list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields);
+ }
+
+ return duprule;
+
+err:
+ list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ }
+
+ list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) {
+ list_del(&caf->ctrl.list);
+ kfree(caf);
+ }
+
+ kfree(duprule);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void vcap_apply_width(u8 *dst, int width, int bytes)
+{
+ u8 bmask;
+ int idx;
+
+ for (idx = 0; idx < bytes; idx++) {
+ if (width > 0)
+ if (width < 8)
+ bmask = (1 << width) - 1;
+ else
+ bmask = ~0;
+ else
+ bmask = 0;
+ dst[idx] &= bmask;
+ width -= 8;
+ }
+}
+
+static void vcap_copy_from_w32be(u8 *dst, u8 *src, int size, int width)
+{
+ int idx, ridx, wstart, nidx;
+ int tail_bytes = (((size + 4) >> 2) << 2) - size;
+
+ for (idx = 0, ridx = size - 1; idx < size; ++idx, --ridx) {
+ wstart = (idx >> 2) << 2;
+ nidx = wstart + 3 - (idx & 0x3);
+ if (nidx >= size)
+ nidx -= tail_bytes;
+ dst[nidx] = src[ridx];
+ }
+
+ vcap_apply_width(dst, width, size);
+}
+
+static void vcap_copy_action_bit_field(struct vcap_u1_action *field, u8 *value)
+{
+ field->value = (*value) & 0x1;
+}
+
+static void vcap_copy_limited_actionfield(u8 *dstvalue, u8 *srcvalue,
+ int width, int bytes)
+{
+ memcpy(dstvalue, srcvalue, bytes);
+ vcap_apply_width(dstvalue, width, bytes);
+}
+
+static void vcap_copy_to_client_actionfield(struct vcap_rule_internal *ri,
+ struct vcap_client_actionfield *field,
+ u8 *value, u16 width)
+{
+ int field_size = actionfield_size_table[field->ctrl.type];
+
+ if (ri->admin->w32be) {
+ switch (field->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ vcap_copy_action_bit_field(&field->data.u1, value);
+ break;
+ case VCAP_FIELD_U32:
+ vcap_copy_limited_actionfield((u8 *)&field->data.u32.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_from_w32be(field->data.u48.value, value,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_from_w32be(field->data.u56.value, value,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_from_w32be(field->data.u64.value, value,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_from_w32be(field->data.u72.value, value,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_from_w32be(field->data.u112.value, value,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_from_w32be(field->data.u128.value, value,
+ field_size, width);
+ break;
+ }
+ } else {
+ switch (field->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ vcap_copy_action_bit_field(&field->data.u1, value);
+ break;
+ case VCAP_FIELD_U32:
+ vcap_copy_limited_actionfield((u8 *)&field->data.u32.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_limited_actionfield(field->data.u48.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_limited_actionfield(field->data.u56.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_limited_actionfield(field->data.u64.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_limited_actionfield(field->data.u72.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_limited_actionfield(field->data.u112.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_limited_actionfield(field->data.u128.value,
+ value,
+ width, field_size);
+ break;
+ }
+ }
+}
+
+static void vcap_copy_key_bit_field(struct vcap_u1_key *field,
+ u8 *value, u8 *mask)
+{
+ field->value = (*value) & 0x1;
+ field->mask = (*mask) & 0x1;
+}
+
+static void vcap_copy_limited_keyfield(u8 *dstvalue, u8 *dstmask,
+ u8 *srcvalue, u8 *srcmask,
+ int width, int bytes)
+{
+ memcpy(dstvalue, srcvalue, bytes);
+ vcap_apply_width(dstvalue, width, bytes);
+ memcpy(dstmask, srcmask, bytes);
+ vcap_apply_width(dstmask, width, bytes);
+}
+
+static void vcap_copy_to_client_keyfield(struct vcap_rule_internal *ri,
+ struct vcap_client_keyfield *field,
+ u8 *value, u8 *mask, u16 width)
+{
+ int field_size = keyfield_size_table[field->ctrl.type] / 2;
+
+ if (ri->admin->w32be) {
+ switch (field->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ vcap_copy_key_bit_field(&field->data.u1, value, mask);
+ break;
+ case VCAP_FIELD_U32:
+ vcap_copy_limited_keyfield((u8 *)&field->data.u32.value,
+ (u8 *)&field->data.u32.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_from_w32be(field->data.u48.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u48.mask, mask,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_from_w32be(field->data.u56.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u56.mask, mask,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_from_w32be(field->data.u64.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u64.mask, mask,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_from_w32be(field->data.u72.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u72.mask, mask,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_from_w32be(field->data.u112.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u112.mask, mask,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_from_w32be(field->data.u128.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u128.mask, mask,
+ field_size, width);
+ break;
+ }
+ } else {
+ switch (field->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ vcap_copy_key_bit_field(&field->data.u1, value, mask);
+ break;
+ case VCAP_FIELD_U32:
+ vcap_copy_limited_keyfield((u8 *)&field->data.u32.value,
+ (u8 *)&field->data.u32.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_limited_keyfield(field->data.u48.value,
+ field->data.u48.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_limited_keyfield(field->data.u56.value,
+ field->data.u56.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_limited_keyfield(field->data.u64.value,
+ field->data.u64.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_limited_keyfield(field->data.u72.value,
+ field->data.u72.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_limited_keyfield(field->data.u112.value,
+ field->data.u112.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_limited_keyfield(field->data.u128.value,
+ field->data.u128.mask,
+ value, mask,
+ width, field_size);
+ break;
+ }
+ }
+}
+
+static void vcap_rule_alloc_keyfield(struct vcap_rule_internal *ri,
+ const struct vcap_field *keyfield,
+ enum vcap_key_field key,
+ u8 *value, u8 *mask)
+{
+ struct vcap_client_keyfield *field;
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ return;
+ INIT_LIST_HEAD(&field->ctrl.list);
+ field->ctrl.key = key;
+ field->ctrl.type = keyfield->type;
+ vcap_copy_to_client_keyfield(ri, field, value, mask, keyfield->width);
+ list_add_tail(&field->ctrl.list, &ri->data.keyfields);
+}
+
+/* Read key data from a VCAP address and discover if there is a rule keyset
+ * here
+ */
+static bool
+vcap_verify_actionstream_actionset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ u32 *actionstream,
+ enum vcap_actionfield_set actionset)
+{
+ const struct vcap_typegroup *tgt;
+ const struct vcap_field *fields;
+ const struct vcap_set *info;
+
+ if (vcap_actionfield_count(vctrl, vt, actionset) == 0)
+ return false;
+
+ info = vcap_actionfieldset(vctrl, vt, actionset);
+ /* Check that the actionset is valid */
+ if (!info)
+ return false;
+
+ /* a type_id of value -1 means that there is no type field */
+ if (info->type_id == (u8)-1)
+ return true;
+
+ /* Get a valid typegroup for the specific actionset */
+ tgt = vcap_actionfield_typegroup(vctrl, vt, actionset);
+ if (!tgt)
+ return false;
+
+ fields = vcap_actionfields(vctrl, vt, actionset);
+ if (!fields)
+ return false;
+
+ /* Later this will be expanded with a check of the type id */
+ return true;
+}
+
+/* Find the subword width of the action typegroup that matches the stream data
+ */
+static int vcap_find_actionstream_typegroup_sw(struct vcap_control *vctrl,
+ enum vcap_type vt, u32 *stream,
+ int sw_max)
+{
+ const struct vcap_typegroup **tgt;
+ int sw_idx, res;
+
+ tgt = vctrl->vcaps[vt].actionfield_set_typegroups;
+ /* Try the longest subword match first */
+ for (sw_idx = vctrl->vcaps[vt].sw_count; sw_idx >= 0; sw_idx--) {
+ if (!tgt[sw_idx])
+ continue;
+ res = vcap_verify_typegroups(stream, vctrl->vcaps[vt].act_width,
+ tgt[sw_idx], false, sw_max);
+ if (res == 0)
+ return sw_idx;
+ }
+ return -EINVAL;
+}
+
+/* Verify that the typegroup information, subword count, actionset and type id
+ * are in sync and correct, return the actionset
+ */
+static enum vcap_actionfield_set
+vcap_find_actionstream_actionset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ u32 *stream,
+ int sw_max)
+{
+ const struct vcap_set *actionfield_set;
+ int sw_count, idx;
+ bool res;
+
+ sw_count = vcap_find_actionstream_typegroup_sw(vctrl, vt, stream,
+ sw_max);
+ if (sw_count < 0)
+ return sw_count;
+
+ actionfield_set = vctrl->vcaps[vt].actionfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].actionfield_set_size; ++idx) {
+ if (actionfield_set[idx].sw_per_item != sw_count)
+ continue;
+
+ res = vcap_verify_actionstream_actionset(vctrl, vt,
+ stream, idx);
+ if (res)
+ return idx;
+ }
+ return -EINVAL;
+}
+
+/* Store action value in an element in a list for the client */
+static void vcap_rule_alloc_actionfield(struct vcap_rule_internal *ri,
+ const struct vcap_field *actionfield,
+ enum vcap_action_field action,
+ u8 *value)
+{
+ struct vcap_client_actionfield *field;
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ return;
+ INIT_LIST_HEAD(&field->ctrl.list);
+ field->ctrl.action = action;
+ field->ctrl.type = actionfield->type;
+ vcap_copy_to_client_actionfield(ri, field, value, actionfield->width);
+ list_add_tail(&field->ctrl.list, &ri->data.actionfields);
+}
+
+static int vcap_decode_actionset(struct vcap_rule_internal *ri)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ struct vcap_admin *admin = ri->admin;
+ const struct vcap_field *actionfield;
+ enum vcap_actionfield_set actionset;
+ enum vcap_type vt = admin->vtype;
+ const struct vcap_typegroup *tgt;
+ struct vcap_stream_iter iter;
+ int idx, res, actfield_count;
+ u32 *actstream;
+ u8 value[16];
+
+ actstream = admin->cache.actionstream;
+ res = vcap_find_actionstream_actionset(vctrl, vt, actstream, 0);
+ if (res < 0) {
+ pr_err("%s:%d: could not find valid actionset: %d\n",
+ __func__, __LINE__, res);
+ return -EINVAL;
+ }
+ actionset = res;
+ actfield_count = vcap_actionfield_count(vctrl, vt, actionset);
+ actionfield = vcap_actionfields(vctrl, vt, actionset);
+ tgt = vcap_actionfield_typegroup(vctrl, vt, actionset);
+ /* Start decoding the stream */
+ for (idx = 0; idx < actfield_count; ++idx) {
+ if (actionfield[idx].width <= 0)
+ continue;
+ /* Get the action */
+ memset(value, 0, DIV_ROUND_UP(actionfield[idx].width, 8));
+ vcap_iter_init(&iter, vctrl->vcaps[vt].act_width, tgt,
+ actionfield[idx].offset);
+ vcap_decode_field(actstream, &iter, actionfield[idx].width,
+ value);
+ /* Skip if no bits are set */
+ if (vcap_bitarray_zero(actionfield[idx].width, value))
+ continue;
+ vcap_rule_alloc_actionfield(ri, &actionfield[idx], idx, value);
+ /* Later the action id will also be checked */
+ }
+ return vcap_set_rule_set_actionset((struct vcap_rule *)ri, actionset);
+}
+
+static int vcap_decode_keyset(struct vcap_rule_internal *ri)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ struct vcap_stream_iter kiter, miter;
+ struct vcap_admin *admin = ri->admin;
+ enum vcap_keyfield_set keysets[10];
+ const struct vcap_field *keyfield;
+ enum vcap_type vt = admin->vtype;
+ const struct vcap_typegroup *tgt;
+ struct vcap_keyset_list matches;
+ enum vcap_keyfield_set keyset;
+ int idx, res, keyfield_count;
+ u32 *maskstream;
+ u32 *keystream;
+ u8 value[16];
+ u8 mask[16];
+
+ keystream = admin->cache.keystream;
+ maskstream = admin->cache.maskstream;
+ matches.keysets = keysets;
+ matches.cnt = 0;
+ matches.max = ARRAY_SIZE(keysets);
+ res = vcap_find_keystream_keysets(vctrl, vt, keystream, maskstream,
+ false, 0, &matches);
+ if (res < 0) {
+ pr_err("%s:%d: could not find valid keysets: %d\n",
+ __func__, __LINE__, res);
+ return -EINVAL;
+ }
+ keyset = matches.keysets[0];
+ keyfield_count = vcap_keyfield_count(vctrl, vt, keyset);
+ keyfield = vcap_keyfields(vctrl, vt, keyset);
+ tgt = vcap_keyfield_typegroup(vctrl, vt, keyset);
+ /* Start decoding the streams */
+ for (idx = 0; idx < keyfield_count; ++idx) {
+ if (keyfield[idx].width <= 0)
+ continue;
+ /* First get the mask */
+ memset(mask, 0, DIV_ROUND_UP(keyfield[idx].width, 8));
+ vcap_iter_init(&miter, vctrl->vcaps[vt].sw_width, tgt,
+ keyfield[idx].offset);
+ vcap_decode_field(maskstream, &miter, keyfield[idx].width,
+ mask);
+ /* Skip if no mask bits are set */
+ if (vcap_bitarray_zero(keyfield[idx].width, mask))
+ continue;
+ /* Get the key */
+ memset(value, 0, DIV_ROUND_UP(keyfield[idx].width, 8));
+ vcap_iter_init(&kiter, vctrl->vcaps[vt].sw_width, tgt,
+ keyfield[idx].offset);
+ vcap_decode_field(keystream, &kiter, keyfield[idx].width,
+ value);
+ vcap_rule_alloc_keyfield(ri, &keyfield[idx], idx, value, mask);
+ }
+ return vcap_set_rule_set_keyset((struct vcap_rule *)ri, keyset);
+}
+
+/* Read VCAP content into the VCAP cache */
+static int vcap_read_rule(struct vcap_rule_internal *ri)
+{
+ struct vcap_admin *admin = ri->admin;
+ int sw_idx, ent_idx = 0, act_idx = 0;
+ u32 addr = ri->addr;
+
+ if (!ri->size || !ri->keyset_sw_regs || !ri->actionset_sw_regs) {
+ pr_err("%s:%d: rule is empty\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ vcap_erase_cache(ri);
+ /* Use the values in the streams to read the VCAP cache */
+ for (sw_idx = 0; sw_idx < ri->size; sw_idx++, addr++) {
+ ri->vctrl->ops->update(ri->ndev, admin, VCAP_CMD_READ,
+ VCAP_SEL_ALL, addr);
+ ri->vctrl->ops->cache_read(ri->ndev, admin,
+ VCAP_SEL_ENTRY, ent_idx,
+ ri->keyset_sw_regs);
+ ri->vctrl->ops->cache_read(ri->ndev, admin,
+ VCAP_SEL_ACTION, act_idx,
+ ri->actionset_sw_regs);
+ if (sw_idx == 0)
+ ri->vctrl->ops->cache_read(ri->ndev, admin,
+ VCAP_SEL_COUNTER,
+ ri->counter_id, 0);
+ ent_idx += ri->keyset_sw_regs;
+ act_idx += ri->actionset_sw_regs;
+ }
+ return 0;
+}
+
+/* Write VCAP cache content to the VCAP HW instance */
+static int vcap_write_rule(struct vcap_rule_internal *ri)
+{
+ struct vcap_admin *admin = ri->admin;
+ int sw_idx, ent_idx = 0, act_idx = 0;
+ u32 addr = ri->addr;
+
+ if (!ri->size || !ri->keyset_sw_regs || !ri->actionset_sw_regs) {
+ pr_err("%s:%d: rule is empty\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ /* Use the values in the streams to write the VCAP cache */
+ for (sw_idx = 0; sw_idx < ri->size; sw_idx++, addr++) {
+ ri->vctrl->ops->cache_write(ri->ndev, admin,
+ VCAP_SEL_ENTRY, ent_idx,
+ ri->keyset_sw_regs);
+ ri->vctrl->ops->cache_write(ri->ndev, admin,
+ VCAP_SEL_ACTION, act_idx,
+ ri->actionset_sw_regs);
+ ri->vctrl->ops->update(ri->ndev, admin, VCAP_CMD_WRITE,
+ VCAP_SEL_ALL, addr);
+ ent_idx += ri->keyset_sw_regs;
+ act_idx += ri->actionset_sw_regs;
+ }
+ return 0;
+}
+
+static int vcap_write_counter(struct vcap_rule_internal *ri,
+ struct vcap_counter *ctr)
+{
+ struct vcap_admin *admin = ri->admin;
+
+ admin->cache.counter = ctr->value;
+ admin->cache.sticky = ctr->sticky;
+ ri->vctrl->ops->cache_write(ri->ndev, admin, VCAP_SEL_COUNTER,
+ ri->counter_id, 0);
+ ri->vctrl->ops->update(ri->ndev, admin, VCAP_CMD_WRITE,
+ VCAP_SEL_COUNTER, ri->addr);
+ return 0;
+}
+
+/* Convert a chain id to a VCAP lookup index */
+int vcap_chain_id_to_lookup(struct vcap_admin *admin, int cur_cid)
+{
+ int lookup_first = admin->vinst * admin->lookups_per_instance;
+ int lookup_last = lookup_first + admin->lookups_per_instance;
+ int cid_next = admin->first_cid + VCAP_CID_LOOKUP_SIZE;
+ int cid = admin->first_cid;
+ int lookup;
+
+ for (lookup = lookup_first; lookup < lookup_last; ++lookup,
+ cid += VCAP_CID_LOOKUP_SIZE, cid_next += VCAP_CID_LOOKUP_SIZE)
+ if (cur_cid >= cid && cur_cid < cid_next)
+ return lookup;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_chain_id_to_lookup);
+
+/* Lookup a vcap instance using chain id */
+struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid)
+{
+ struct vcap_admin *admin;
+
+ if (vcap_api_check(vctrl))
+ return NULL;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ if (cid >= admin->first_cid && cid <= admin->last_cid)
+ return admin;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vcap_find_admin);
+
+/* Is this the last admin instance ordered by chain id and direction */
+static bool vcap_admin_is_last(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ bool ingress)
+{
+ struct vcap_admin *iter, *last = NULL;
+ int max_cid = 0;
+
+ list_for_each_entry(iter, &vctrl->list, list) {
+ if (iter->first_cid > max_cid &&
+ iter->ingress == ingress) {
+ last = iter;
+ max_cid = iter->first_cid;
+ }
+ }
+ if (!last)
+ return false;
+
+ return admin == last;
+}
+
+/* Calculate the value used for chaining VCAP rules */
+int vcap_chain_offset(struct vcap_control *vctrl, int from_cid, int to_cid)
+{
+ int diff = to_cid - from_cid;
+
+ if (diff < 0) /* Wrong direction */
+ return diff;
+ to_cid %= VCAP_CID_LOOKUP_SIZE;
+ if (to_cid == 0) /* Destination aligned to a lookup == no chaining */
+ return 0;
+ diff %= VCAP_CID_LOOKUP_SIZE; /* Limit to a value within a lookup */
+ return diff;
+}
+EXPORT_SYMBOL_GPL(vcap_chain_offset);
+
+/* Is the next chain id in one of the following lookups
+ * For now this does not support filters linked to other filters using
+ * keys and actions. That will be added later.
+ */
+bool vcap_is_next_lookup(struct vcap_control *vctrl, int src_cid, int dst_cid)
+{
+ struct vcap_admin *admin;
+ int next_cid;
+
+ if (vcap_api_check(vctrl))
+ return false;
+
+ /* The offset must be at least one lookup so round up one chain */
+ next_cid = roundup(src_cid + 1, VCAP_CID_LOOKUP_SIZE);
+
+ if (dst_cid < next_cid)
+ return false;
+
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(vcap_is_next_lookup);
+
+/* Check if there is room for a new rule */
+static int vcap_rule_space(struct vcap_admin *admin, int size)
+{
+ if (admin->last_used_addr - size < admin->first_valid_addr) {
+ pr_err("%s:%d: No room for rule size: %u, %u\n",
+ __func__, __LINE__, size, admin->first_valid_addr);
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+/* Add the keyset typefield to the list of rule keyfields */
+static int vcap_add_type_keyfield(struct vcap_rule *rule)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_keyfield_set keyset = rule->keyset;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+ const struct vcap_set *kset;
+ int ret = -EINVAL;
+
+ kset = vcap_keyfieldset(ri->vctrl, vt, keyset);
+ if (!kset)
+ return ret;
+ if (kset->type_id == (u8)-1) /* No type field is needed */
+ return 0;
+
+ fields = vcap_keyfields(ri->vctrl, vt, keyset);
+ if (!fields)
+ return -EINVAL;
+ if (fields[VCAP_KF_TYPE].width > 1) {
+ ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE,
+ kset->type_id, 0xff);
+ } else {
+ if (kset->type_id)
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_TYPE,
+ VCAP_BIT_1);
+ else
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_TYPE,
+ VCAP_BIT_0);
+ }
+ return 0;
+}
+
+/* Add the actionset typefield to the list of rule actionfields */
+static int vcap_add_type_actionfield(struct vcap_rule *rule)
+{
+ enum vcap_actionfield_set actionset = rule->actionset;
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+ const struct vcap_set *aset;
+ int ret = -EINVAL;
+
+ aset = vcap_actionfieldset(ri->vctrl, vt, actionset);
+ if (!aset)
+ return ret;
+ if (aset->type_id == (u8)-1) /* No type field is needed */
+ return 0;
+
+ fields = vcap_actionfields(ri->vctrl, vt, actionset);
+ if (!fields)
+ return -EINVAL;
+ if (fields[VCAP_AF_TYPE].width > 1) {
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE,
+ aset->type_id);
+ } else {
+ if (aset->type_id)
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_TYPE,
+ VCAP_BIT_1);
+ else
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_TYPE,
+ VCAP_BIT_0);
+ }
+ return ret;
+}
+
+/* Add a keyset to a keyset list */
+bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
+ enum vcap_keyfield_set keyset)
+{
+ int idx;
+
+ if (keysetlist->cnt < keysetlist->max) {
+ /* Avoid duplicates */
+ for (idx = 0; idx < keysetlist->cnt; ++idx)
+ if (keysetlist->keysets[idx] == keyset)
+ return keysetlist->cnt < keysetlist->max;
+ keysetlist->keysets[keysetlist->cnt++] = keyset;
+ }
+ return keysetlist->cnt < keysetlist->max;
+}
+EXPORT_SYMBOL_GPL(vcap_keyset_list_add);
+
+/* Add a actionset to a actionset list */
+static bool vcap_actionset_list_add(struct vcap_actionset_list *actionsetlist,
+ enum vcap_actionfield_set actionset)
+{
+ int idx;
+
+ if (actionsetlist->cnt < actionsetlist->max) {
+ /* Avoid duplicates */
+ for (idx = 0; idx < actionsetlist->cnt; ++idx)
+ if (actionsetlist->actionsets[idx] == actionset)
+ return actionsetlist->cnt < actionsetlist->max;
+ actionsetlist->actionsets[actionsetlist->cnt++] = actionset;
+ }
+ return actionsetlist->cnt < actionsetlist->max;
+}
+
+/* map keyset id to a string with the keyset name */
+const char *vcap_keyset_name(struct vcap_control *vctrl,
+ enum vcap_keyfield_set keyset)
+{
+ return vctrl->stats->keyfield_set_names[keyset];
+}
+EXPORT_SYMBOL_GPL(vcap_keyset_name);
+
+/* map key field id to a string with the key name */
+const char *vcap_keyfield_name(struct vcap_control *vctrl,
+ enum vcap_key_field key)
+{
+ return vctrl->stats->keyfield_names[key];
+}
+EXPORT_SYMBOL_GPL(vcap_keyfield_name);
+
+/* map actionset id to a string with the actionset name */
+const char *vcap_actionset_name(struct vcap_control *vctrl,
+ enum vcap_actionfield_set actionset)
+{
+ return vctrl->stats->actionfield_set_names[actionset];
+}
+
+/* map action field id to a string with the action name */
+const char *vcap_actionfield_name(struct vcap_control *vctrl,
+ enum vcap_action_field action)
+{
+ return vctrl->stats->actionfield_names[action];
+}
+
+/* Return the keyfield that matches a key in a keyset */
+static const struct vcap_field *
+vcap_find_keyset_keyfield(struct vcap_control *vctrl,
+ enum vcap_type vtype,
+ enum vcap_keyfield_set keyset,
+ enum vcap_key_field key)
+{
+ const struct vcap_field *fields;
+ int idx, count;
+
+ fields = vcap_keyfields(vctrl, vtype, keyset);
+ if (!fields)
+ return NULL;
+
+ /* Iterate the keyfields of the keyset */
+ count = vcap_keyfield_count(vctrl, vtype, keyset);
+ for (idx = 0; idx < count; ++idx) {
+ if (fields[idx].width == 0)
+ continue;
+
+ if (key == idx)
+ return &fields[idx];
+ }
+
+ return NULL;
+}
+
+/* Match a list of keys against the keysets available in a vcap type */
+static bool _vcap_rule_find_keysets(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches)
+{
+ const struct vcap_client_keyfield *ckf;
+ int keyset, found, keycount, map_size;
+ const struct vcap_field **map;
+ enum vcap_type vtype;
+
+ vtype = ri->admin->vtype;
+ map = ri->vctrl->vcaps[vtype].keyfield_set_map;
+ map_size = ri->vctrl->vcaps[vtype].keyfield_set_size;
+
+ /* Get a count of the keyfields we want to match */
+ keycount = 0;
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
+ ++keycount;
+
+ matches->cnt = 0;
+ /* Iterate the keysets of the VCAP */
+ for (keyset = 0; keyset < map_size; ++keyset) {
+ if (!map[keyset])
+ continue;
+
+ /* Iterate the keys in the rule */
+ found = 0;
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
+ if (vcap_find_keyset_keyfield(ri->vctrl, vtype,
+ keyset, ckf->ctrl.key))
+ ++found;
+
+ /* Save the keyset if all keyfields were found */
+ if (found == keycount)
+ if (!vcap_keyset_list_add(matches, keyset))
+ /* bail out when the quota is filled */
+ break;
+ }
+
+ return matches->cnt > 0;
+}
+
+/* Match a list of keys against the keysets available in a vcap type */
+bool vcap_rule_find_keysets(struct vcap_rule *rule,
+ struct vcap_keyset_list *matches)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+
+ return _vcap_rule_find_keysets(ri, matches);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_find_keysets);
+
+/* Return the actionfield that matches a action in a actionset */
+static const struct vcap_field *
+vcap_find_actionset_actionfield(struct vcap_control *vctrl,
+ enum vcap_type vtype,
+ enum vcap_actionfield_set actionset,
+ enum vcap_action_field action)
+{
+ const struct vcap_field *fields;
+ int idx, count;
+
+ fields = vcap_actionfields(vctrl, vtype, actionset);
+ if (!fields)
+ return NULL;
+
+ /* Iterate the actionfields of the actionset */
+ count = vcap_actionfield_count(vctrl, vtype, actionset);
+ for (idx = 0; idx < count; ++idx) {
+ if (fields[idx].width == 0)
+ continue;
+
+ if (action == idx)
+ return &fields[idx];
+ }
+
+ return NULL;
+}
+
+/* Match a list of actions against the actionsets available in a vcap type */
+static bool vcap_rule_find_actionsets(struct vcap_rule_internal *ri,
+ struct vcap_actionset_list *matches)
+{
+ int actionset, found, actioncount, map_size;
+ const struct vcap_client_actionfield *ckf;
+ const struct vcap_field **map;
+ enum vcap_type vtype;
+
+ vtype = ri->admin->vtype;
+ map = ri->vctrl->vcaps[vtype].actionfield_set_map;
+ map_size = ri->vctrl->vcaps[vtype].actionfield_set_size;
+
+ /* Get a count of the actionfields we want to match */
+ actioncount = 0;
+ list_for_each_entry(ckf, &ri->data.actionfields, ctrl.list)
+ ++actioncount;
+
+ matches->cnt = 0;
+ /* Iterate the actionsets of the VCAP */
+ for (actionset = 0; actionset < map_size; ++actionset) {
+ if (!map[actionset])
+ continue;
+
+ /* Iterate the actions in the rule */
+ found = 0;
+ list_for_each_entry(ckf, &ri->data.actionfields, ctrl.list)
+ if (vcap_find_actionset_actionfield(ri->vctrl, vtype,
+ actionset,
+ ckf->ctrl.action))
+ ++found;
+
+ /* Save the actionset if all actionfields were found */
+ if (found == actioncount)
+ if (!vcap_actionset_list_add(matches, actionset))
+ /* bail out when the quota is filled */
+ break;
+ }
+
+ return matches->cnt > 0;
+}
+
+/* Validate a rule with respect to available port keys */
+int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_keyset_list matches = {};
+ enum vcap_keyfield_set keysets[10];
+ int ret;
+
+ ret = vcap_api_check(ri->vctrl);
+ if (ret)
+ return ret;
+ if (!ri->admin) {
+ ri->data.exterr = VCAP_ERR_NO_ADMIN;
+ return -EINVAL;
+ }
+ if (!ri->ndev) {
+ ri->data.exterr = VCAP_ERR_NO_NETDEV;
+ return -EINVAL;
+ }
+
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+ if (ri->data.keyset == VCAP_KFS_NO_VALUE) {
+ /* Iterate over rule keyfields and select keysets that fits */
+ if (!_vcap_rule_find_keysets(ri, &matches)) {
+ ri->data.exterr = VCAP_ERR_NO_KEYSET_MATCH;
+ return -EINVAL;
+ }
+ } else {
+ /* prepare for keyset validation */
+ keysets[0] = ri->data.keyset;
+ matches.cnt = 1;
+ }
+
+ /* Pick a keyset that is supported in the port lookups */
+ ret = ri->vctrl->ops->validate_keyset(ri->ndev, ri->admin, rule,
+ &matches, l3_proto);
+ if (ret < 0) {
+ pr_err("%s:%d: keyset validation failed: %d\n",
+ __func__, __LINE__, ret);
+ ri->data.exterr = VCAP_ERR_NO_PORT_KEYSET_MATCH;
+ return ret;
+ }
+ /* use the keyset that is supported in the port lookups */
+ ret = vcap_set_rule_set_keyset(rule, ret);
+ if (ret < 0) {
+ pr_err("%s:%d: keyset was not updated: %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ if (ri->data.actionset == VCAP_AFS_NO_VALUE) {
+ struct vcap_actionset_list matches = {};
+ enum vcap_actionfield_set actionsets[10];
+
+ matches.actionsets = actionsets;
+ matches.max = ARRAY_SIZE(actionsets);
+
+ /* Find an actionset that fits the rule actions */
+ if (!vcap_rule_find_actionsets(ri, &matches)) {
+ ri->data.exterr = VCAP_ERR_NO_ACTIONSET_MATCH;
+ return -EINVAL;
+ }
+ ret = vcap_set_rule_set_actionset(rule, actionsets[0]);
+ if (ret < 0) {
+ pr_err("%s:%d: actionset was not updated: %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ }
+ vcap_add_type_keyfield(rule);
+ vcap_add_type_actionfield(rule);
+ /* Add default fields to this rule */
+ ri->vctrl->ops->add_default_fields(ri->ndev, ri->admin, rule);
+
+ /* Rule size is the maximum of the entry and action subword count */
+ ri->size = max(ri->keyset_sw, ri->actionset_sw);
+
+ /* Finally check if there is room for the rule in the VCAP */
+ return vcap_rule_space(ri->admin, ri->size);
+}
+EXPORT_SYMBOL_GPL(vcap_val_rule);
+
+/* Entries are sorted with increasing values of sort_key.
+ * I.e. Lowest numerical sort_key is first in list.
+ * In order to locate largest keys first in list we negate the key size with
+ * (max_size - size).
+ */
+static u32 vcap_sort_key(u32 max_size, u32 size, u8 user, u16 prio)
+{
+ return ((max_size - size) << 24) | (user << 16) | prio;
+}
+
+/* calculate the address of the next rule after this (lower address and prio) */
+static u32 vcap_next_rule_addr(u32 addr, struct vcap_rule_internal *ri)
+{
+ return ((addr - ri->size) / ri->size) * ri->size;
+}
+
+/* Assign a unique rule id and autogenerate one if id == 0 */
+static u32 vcap_set_rule_id(struct vcap_rule_internal *ri)
+{
+ if (ri->data.id != 0)
+ return ri->data.id;
+
+ for (u32 next_id = 1; next_id < ~0; ++next_id) {
+ if (!vcap_rule_exists(ri->vctrl, next_id)) {
+ ri->data.id = next_id;
+ break;
+ }
+ }
+ return ri->data.id;
+}
+
+static int vcap_insert_rule(struct vcap_rule_internal *ri,
+ struct vcap_rule_move *move)
+{
+ int sw_count = ri->vctrl->vcaps[ri->admin->vtype].sw_count;
+ struct vcap_rule_internal *duprule, *iter, *elem = NULL;
+ struct vcap_admin *admin = ri->admin;
+ u32 addr;
+
+ ri->sort_key = vcap_sort_key(sw_count, ri->size, ri->data.user,
+ ri->data.priority);
+
+ /* Insert the new rule in the list of rule based on the sort key
+ * If the rule needs to be inserted between existing rules then move
+ * these rules to make room for the new rule and update their start
+ * address.
+ */
+ list_for_each_entry(iter, &admin->rules, list) {
+ if (ri->sort_key < iter->sort_key) {
+ elem = iter;
+ break;
+ }
+ }
+
+ if (!elem) {
+ ri->addr = vcap_next_rule_addr(admin->last_used_addr, ri);
+ admin->last_used_addr = ri->addr;
+
+ /* Add a copy of the rule to the VCAP list */
+ duprule = vcap_dup_rule(ri, ri->state == VCAP_RS_DISABLED);
+ if (IS_ERR(duprule))
+ return PTR_ERR(duprule);
+
+ list_add_tail(&duprule->list, &admin->rules);
+ return 0;
+ }
+
+ /* Reuse the space of the current rule */
+ addr = elem->addr + elem->size;
+ ri->addr = vcap_next_rule_addr(addr, ri);
+ addr = ri->addr;
+
+ /* Add a copy of the rule to the VCAP list */
+ duprule = vcap_dup_rule(ri, ri->state == VCAP_RS_DISABLED);
+ if (IS_ERR(duprule))
+ return PTR_ERR(duprule);
+
+ /* Add before the current entry */
+ list_add_tail(&duprule->list, &elem->list);
+
+ /* Update the current rule */
+ elem->addr = vcap_next_rule_addr(addr, elem);
+ addr = elem->addr;
+
+ /* Update the address in the remaining rules in the list */
+ list_for_each_entry_continue(elem, &admin->rules, list) {
+ elem->addr = vcap_next_rule_addr(addr, elem);
+ addr = elem->addr;
+ }
+
+ /* Update the move info */
+ move->addr = admin->last_used_addr;
+ move->count = ri->addr - addr;
+ move->offset = admin->last_used_addr - addr;
+ admin->last_used_addr = addr;
+ return 0;
+}
+
+static void vcap_move_rules(struct vcap_rule_internal *ri,
+ struct vcap_rule_move *move)
+{
+ ri->vctrl->ops->move(ri->ndev, ri->admin, move->addr,
+ move->offset, move->count);
+}
+
+/* Check if the chain is already used to enable a VCAP lookup for this port */
+static bool vcap_is_chain_used(struct vcap_control *vctrl,
+ struct net_device *ndev, int src_cid)
+{
+ struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ list_for_each_entry(admin, &vctrl->list, list)
+ list_for_each_entry(eport, &admin->enabled, list)
+ if (eport->src_cid == src_cid && eport->ndev == ndev)
+ return true;
+
+ return false;
+}
+
+/* Fetch the next chain in the enabled list for the port */
+static int vcap_get_next_chain(struct vcap_control *vctrl,
+ struct net_device *ndev,
+ int dst_cid)
+{
+ struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ list_for_each_entry(eport, &admin->enabled, list) {
+ if (eport->ndev != ndev)
+ continue;
+ if (eport->src_cid == dst_cid)
+ return eport->dst_cid;
+ }
+ }
+
+ return 0;
+}
+
+static bool vcap_path_exist(struct vcap_control *vctrl, struct net_device *ndev,
+ int dst_cid)
+{
+ int cid = rounddown(dst_cid, VCAP_CID_LOOKUP_SIZE);
+ struct vcap_enabled_port *eport = NULL;
+ struct vcap_enabled_port *elem;
+ struct vcap_admin *admin;
+ int tmp;
+
+ if (cid == 0) /* Chain zero is always available */
+ return true;
+
+ /* Find first entry that starts from chain 0*/
+ list_for_each_entry(admin, &vctrl->list, list) {
+ list_for_each_entry(elem, &admin->enabled, list) {
+ if (elem->src_cid == 0 && elem->ndev == ndev) {
+ eport = elem;
+ break;
+ }
+ }
+ if (eport)
+ break;
+ }
+
+ if (!eport)
+ return false;
+
+ tmp = eport->dst_cid;
+ while (tmp != cid && tmp != 0)
+ tmp = vcap_get_next_chain(vctrl, ndev, tmp);
+
+ return !!tmp;
+}
+
+/* Internal clients can always store their rules in HW
+ * External clients can store their rules if the chain is enabled all
+ * the way from chain 0, otherwise the rule will be cached until
+ * the chain is enabled.
+ */
+static void vcap_rule_set_state(struct vcap_rule_internal *ri)
+{
+ if (ri->data.user <= VCAP_USER_QOS)
+ ri->state = VCAP_RS_PERMANENT;
+ else if (vcap_path_exist(ri->vctrl, ri->ndev, ri->data.vcap_chain_id))
+ ri->state = VCAP_RS_ENABLED;
+ else
+ ri->state = VCAP_RS_DISABLED;
+}
+
+/* Encode and write a validated rule to the VCAP */
+int vcap_add_rule(struct vcap_rule *rule)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_rule_move move = {0};
+ struct vcap_counter ctr = {0};
+ int ret;
+
+ ret = vcap_api_check(ri->vctrl);
+ if (ret)
+ return ret;
+ /* Insert the new rule in the list of vcap rules */
+ mutex_lock(&ri->admin->lock);
+
+ vcap_rule_set_state(ri);
+ ret = vcap_insert_rule(ri, &move);
+ if (ret < 0) {
+ pr_err("%s:%d: could not insert rule in vcap list: %d\n",
+ __func__, __LINE__, ret);
+ goto out;
+ }
+ if (move.count > 0)
+ vcap_move_rules(ri, &move);
+
+ /* Set the counter to zero */
+ ret = vcap_write_counter(ri, &ctr);
+ if (ret)
+ goto out;
+
+ if (ri->state == VCAP_RS_DISABLED) {
+ /* Erase the rule area */
+ ri->vctrl->ops->init(ri->ndev, ri->admin, ri->addr, ri->size);
+ goto out;
+ }
+
+ vcap_erase_cache(ri);
+ ret = vcap_encode_rule(ri);
+ if (ret) {
+ pr_err("%s:%d: rule encoding error: %d\n", __func__, __LINE__, ret);
+ goto out;
+ }
+
+ ret = vcap_write_rule(ri);
+ if (ret) {
+ pr_err("%s:%d: rule write error: %d\n", __func__, __LINE__, ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&ri->admin->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vcap_add_rule);
+
+/* Allocate a new rule with the provided arguments */
+struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl,
+ struct net_device *ndev, int vcap_chain_id,
+ enum vcap_user user, u16 priority,
+ u32 id)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+ int err, maxsize;
+
+ err = vcap_api_check(vctrl);
+ if (err)
+ return ERR_PTR(err);
+ if (!ndev)
+ return ERR_PTR(-ENODEV);
+ /* Get the VCAP instance */
+ admin = vcap_find_admin(vctrl, vcap_chain_id);
+ if (!admin)
+ return ERR_PTR(-ENOENT);
+ /* Sanity check that this VCAP is supported on this platform */
+ if (vctrl->vcaps[admin->vtype].rows == 0)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&admin->lock);
+ /* Check if a rule with this id already exists */
+ if (vcap_rule_exists(vctrl, id)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Check if there is room for the rule in the block(s) of the VCAP */
+ maxsize = vctrl->vcaps[admin->vtype].sw_count; /* worst case rule size */
+ if (vcap_rule_space(admin, maxsize)) {
+ err = -ENOSPC;
+ goto out_unlock;
+ }
+
+ /* Create a container for the rule and return it */
+ ri = kzalloc(sizeof(*ri), GFP_KERNEL);
+ if (!ri) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ ri->data.vcap_chain_id = vcap_chain_id;
+ ri->data.user = user;
+ ri->data.priority = priority;
+ ri->data.id = id;
+ ri->data.keyset = VCAP_KFS_NO_VALUE;
+ ri->data.actionset = VCAP_AFS_NO_VALUE;
+ INIT_LIST_HEAD(&ri->list);
+ INIT_LIST_HEAD(&ri->data.keyfields);
+ INIT_LIST_HEAD(&ri->data.actionfields);
+ ri->ndev = ndev;
+ ri->admin = admin; /* refer to the vcap instance */
+ ri->vctrl = vctrl; /* refer to the client */
+
+ if (vcap_set_rule_id(ri) == 0) {
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ mutex_unlock(&admin->lock);
+ return (struct vcap_rule *)ri;
+
+out_free:
+ kfree(ri);
+out_unlock:
+ mutex_unlock(&admin->lock);
+ return ERR_PTR(err);
+
+}
+EXPORT_SYMBOL_GPL(vcap_alloc_rule);
+
+/* Free mem of a rule owned by client after the rule as been added to the VCAP */
+void vcap_free_rule(struct vcap_rule *rule)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_actionfield *caf, *next_caf;
+ struct vcap_client_keyfield *ckf, *next_ckf;
+
+ /* Deallocate the list of keys and actions */
+ list_for_each_entry_safe(ckf, next_ckf, &ri->data.keyfields, ctrl.list) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ }
+ list_for_each_entry_safe(caf, next_caf, &ri->data.actionfields, ctrl.list) {
+ list_del(&caf->ctrl.list);
+ kfree(caf);
+ }
+ /* Deallocate the rule */
+ kfree(rule);
+}
+EXPORT_SYMBOL_GPL(vcap_free_rule);
+
+/* Decode a rule from the VCAP cache and return a copy */
+struct vcap_rule *vcap_decode_rule(struct vcap_rule_internal *elem)
+{
+ struct vcap_rule_internal *ri;
+ int err;
+
+ ri = vcap_dup_rule(elem, elem->state == VCAP_RS_DISABLED);
+ if (IS_ERR(ri))
+ return ERR_CAST(ri);
+
+ if (ri->state == VCAP_RS_DISABLED)
+ goto out;
+
+ err = vcap_read_rule(ri);
+ if (err)
+ return ERR_PTR(err);
+
+ err = vcap_decode_keyset(ri);
+ if (err)
+ return ERR_PTR(err);
+
+ err = vcap_decode_actionset(ri);
+ if (err)
+ return ERR_PTR(err);
+
+out:
+ return &ri->data;
+}
+
+struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id)
+{
+ struct vcap_rule_internal *elem;
+ struct vcap_rule *rule;
+ int err;
+
+ err = vcap_api_check(vctrl);
+ if (err)
+ return ERR_PTR(err);
+
+ elem = vcap_get_locked_rule(vctrl, id);
+ if (!elem)
+ return ERR_PTR(-ENOENT);
+
+ rule = vcap_decode_rule(elem);
+ mutex_unlock(&elem->admin->lock);
+ return rule;
+}
+EXPORT_SYMBOL_GPL(vcap_get_rule);
+
+/* Update existing rule */
+int vcap_mod_rule(struct vcap_rule *rule)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_counter ctr;
+ int err;
+
+ err = vcap_api_check(ri->vctrl);
+ if (err)
+ return err;
+
+ if (!vcap_get_locked_rule(ri->vctrl, ri->data.id))
+ return -ENOENT;
+
+ vcap_rule_set_state(ri);
+ if (ri->state == VCAP_RS_DISABLED)
+ goto out;
+
+ /* Encode the bitstreams to the VCAP cache */
+ vcap_erase_cache(ri);
+ err = vcap_encode_rule(ri);
+ if (err)
+ goto out;
+
+ err = vcap_write_rule(ri);
+ if (err)
+ goto out;
+
+ memset(&ctr, 0, sizeof(ctr));
+ err = vcap_write_counter(ri, &ctr);
+
+out:
+ mutex_unlock(&ri->admin->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_mod_rule);
+
+/* Return the alignment offset for a new rule address */
+static int vcap_valid_rule_move(struct vcap_rule_internal *el, int offset)
+{
+ return (el->addr + offset) % el->size;
+}
+
+/* Update the rule address with an offset */
+static void vcap_adjust_rule_addr(struct vcap_rule_internal *el, int offset)
+{
+ el->addr += offset;
+}
+
+/* Rules needs to be moved to fill the gap of the deleted rule */
+static int vcap_fill_rule_gap(struct vcap_rule_internal *ri)
+{
+ struct vcap_admin *admin = ri->admin;
+ struct vcap_rule_internal *elem;
+ struct vcap_rule_move move;
+ int gap = 0, offset = 0;
+
+ /* If the first rule is deleted: Move other rules to the top */
+ if (list_is_first(&ri->list, &admin->rules))
+ offset = admin->last_valid_addr + 1 - ri->addr - ri->size;
+
+ /* Locate gaps between odd size rules and adjust the move */
+ elem = ri;
+ list_for_each_entry_continue(elem, &admin->rules, list)
+ gap += vcap_valid_rule_move(elem, ri->size);
+
+ /* Update the address in the remaining rules in the list */
+ elem = ri;
+ list_for_each_entry_continue(elem, &admin->rules, list)
+ vcap_adjust_rule_addr(elem, ri->size + gap + offset);
+
+ /* Update the move info */
+ move.addr = admin->last_used_addr;
+ move.count = ri->addr - admin->last_used_addr - gap;
+ move.offset = -(ri->size + gap + offset);
+
+ /* Do the actual move operation */
+ vcap_move_rules(ri, &move);
+
+ return gap + offset;
+}
+
+/* Delete rule in a VCAP instance */
+int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id)
+{
+ struct vcap_rule_internal *ri, *elem;
+ struct vcap_admin *admin;
+ int gap = 0, err;
+
+ /* This will later also handle rule moving */
+ if (!ndev)
+ return -ENODEV;
+ err = vcap_api_check(vctrl);
+ if (err)
+ return err;
+ /* Look for the rule id in all vcaps */
+ ri = vcap_get_locked_rule(vctrl, id);
+ if (!ri)
+ return -ENOENT;
+
+ admin = ri->admin;
+
+ if (ri->addr > admin->last_used_addr)
+ gap = vcap_fill_rule_gap(ri);
+
+ /* Delete the rule from the list of rules and the cache */
+ list_del(&ri->list);
+ vctrl->ops->init(ndev, admin, admin->last_used_addr, ri->size + gap);
+ vcap_free_rule(&ri->data);
+
+ /* Update the last used address, set to default when no rules */
+ if (list_empty(&admin->rules)) {
+ admin->last_used_addr = admin->last_valid_addr + 1;
+ } else {
+ elem = list_last_entry(&admin->rules, struct vcap_rule_internal,
+ list);
+ admin->last_used_addr = elem->addr;
+ }
+
+ mutex_unlock(&admin->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_del_rule);
+
+/* Delete all rules in the VCAP instance */
+int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin)
+{
+ struct vcap_enabled_port *eport, *next_eport;
+ struct vcap_rule_internal *ri, *next_ri;
+ int ret = vcap_api_check(vctrl);
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&admin->lock);
+ list_for_each_entry_safe(ri, next_ri, &admin->rules, list) {
+ vctrl->ops->init(ri->ndev, admin, ri->addr, ri->size);
+ list_del(&ri->list);
+ vcap_free_rule(&ri->data);
+ }
+ admin->last_used_addr = admin->last_valid_addr;
+
+ /* Remove list of enabled ports */
+ list_for_each_entry_safe(eport, next_eport, &admin->enabled, list) {
+ list_del(&eport->list);
+ kfree(eport);
+ }
+ mutex_unlock(&admin->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_del_rules);
+
+/* Find a client key field in a rule */
+static struct vcap_client_keyfield *
+vcap_find_keyfield(struct vcap_rule *rule, enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_keyfield *ckf;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
+ if (ckf->ctrl.key == key)
+ return ckf;
+ return NULL;
+}
+
+/* Find information on a key field in a rule */
+const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule,
+ enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_keyfield_set keyset = rule->keyset;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+
+ if (keyset == VCAP_KFS_NO_VALUE)
+ return NULL;
+ fields = vcap_keyfields(ri->vctrl, vt, keyset);
+ if (!fields)
+ return NULL;
+ return &fields[key];
+}
+EXPORT_SYMBOL_GPL(vcap_lookup_keyfield);
+
+/* Check if the keyfield is already in the rule */
+static bool vcap_keyfield_unique(struct vcap_rule *rule,
+ enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_client_keyfield *ckf;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
+ if (ckf->ctrl.key == key)
+ return false;
+ return true;
+}
+
+/* Check if the keyfield is in the keyset */
+static bool vcap_keyfield_match_keyset(struct vcap_rule *rule,
+ enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_keyfield_set keyset = rule->keyset;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+
+ /* the field is accepted if the rule has no keyset yet */
+ if (keyset == VCAP_KFS_NO_VALUE)
+ return true;
+ fields = vcap_keyfields(ri->vctrl, vt, keyset);
+ if (!fields)
+ return false;
+ /* if there is a width there is a way */
+ return fields[key].width > 0;
+}
+
+static int vcap_rule_add_key(struct vcap_rule *rule,
+ enum vcap_key_field key,
+ enum vcap_field_type ftype,
+ struct vcap_client_keyfield_data *data)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_keyfield *field;
+
+ if (!vcap_keyfield_unique(rule, key)) {
+ pr_warn("%s:%d: keyfield %s is already in the rule\n",
+ __func__, __LINE__,
+ vcap_keyfield_name(ri->vctrl, key));
+ return -EINVAL;
+ }
+
+ if (!vcap_keyfield_match_keyset(rule, key)) {
+ pr_err("%s:%d: keyfield %s does not belong in the rule keyset\n",
+ __func__, __LINE__,
+ vcap_keyfield_name(ri->vctrl, key));
+ return -EINVAL;
+ }
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ return -ENOMEM;
+ memcpy(&field->data, data, sizeof(field->data));
+ field->ctrl.key = key;
+ field->ctrl.type = ftype;
+ list_add_tail(&field->ctrl.list, &rule->keyfields);
+ return 0;
+}
+
+static void vcap_rule_set_key_bitsize(struct vcap_u1_key *u1, enum vcap_bit val)
+{
+ switch (val) {
+ case VCAP_BIT_0:
+ u1->value = 0;
+ u1->mask = 1;
+ break;
+ case VCAP_BIT_1:
+ u1->value = 1;
+ u1->mask = 1;
+ break;
+ case VCAP_BIT_ANY:
+ u1->value = 0;
+ u1->mask = 0;
+ break;
+ }
+}
+
+/* Add a bit key with value and mask to the rule */
+int vcap_rule_add_key_bit(struct vcap_rule *rule, enum vcap_key_field key,
+ enum vcap_bit val)
+{
+ struct vcap_client_keyfield_data data;
+
+ vcap_rule_set_key_bitsize(&data.u1, val);
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_BIT, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_bit);
+
+/* Add a 32 bit key field with value and mask to the rule */
+int vcap_rule_add_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 value, u32 mask)
+{
+ struct vcap_client_keyfield_data data;
+
+ data.u32.value = value;
+ data.u32.mask = mask;
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_U32, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_u32);
+
+/* Add a 48 bit key with value and mask to the rule */
+int vcap_rule_add_key_u48(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u48_key *fieldval)
+{
+ struct vcap_client_keyfield_data data;
+
+ memcpy(&data.u48, fieldval, sizeof(data.u48));
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_U48, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_u48);
+
+/* Add a 72 bit key with value and mask to the rule */
+int vcap_rule_add_key_u72(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u72_key *fieldval)
+{
+ struct vcap_client_keyfield_data data;
+
+ memcpy(&data.u72, fieldval, sizeof(data.u72));
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_U72, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_u72);
+
+/* Add a 128 bit key with value and mask to the rule */
+int vcap_rule_add_key_u128(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u128_key *fieldval)
+{
+ struct vcap_client_keyfield_data data;
+
+ memcpy(&data.u128, fieldval, sizeof(data.u128));
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_U128, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_u128);
+
+int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 *value, u32 *mask)
+{
+ struct vcap_client_keyfield *ckf;
+
+ ckf = vcap_find_keyfield(rule, key);
+ if (!ckf)
+ return -ENOENT;
+
+ *value = ckf->data.u32.value;
+ *mask = ckf->data.u32.mask;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_rule_get_key_u32);
+
+/* Find a client action field in a rule */
+struct vcap_client_actionfield *
+vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act)
+{
+ struct vcap_rule_internal *ri = (struct vcap_rule_internal *)rule;
+ struct vcap_client_actionfield *caf;
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list)
+ if (caf->ctrl.action == act)
+ return caf;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vcap_find_actionfield);
+
+/* Check if the actionfield is already in the rule */
+static bool vcap_actionfield_unique(struct vcap_rule *rule,
+ enum vcap_action_field act)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_client_actionfield *caf;
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list)
+ if (caf->ctrl.action == act)
+ return false;
+ return true;
+}
+
+/* Check if the actionfield is in the actionset */
+static bool vcap_actionfield_match_actionset(struct vcap_rule *rule,
+ enum vcap_action_field action)
+{
+ enum vcap_actionfield_set actionset = rule->actionset;
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+
+ /* the field is accepted if the rule has no actionset yet */
+ if (actionset == VCAP_AFS_NO_VALUE)
+ return true;
+ fields = vcap_actionfields(ri->vctrl, vt, actionset);
+ if (!fields)
+ return false;
+ /* if there is a width there is a way */
+ return fields[action].width > 0;
+}
+
+static int vcap_rule_add_action(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ enum vcap_field_type ftype,
+ struct vcap_client_actionfield_data *data)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_actionfield *field;
+
+ if (!vcap_actionfield_unique(rule, action)) {
+ pr_warn("%s:%d: actionfield %s is already in the rule\n",
+ __func__, __LINE__,
+ vcap_actionfield_name(ri->vctrl, action));
+ return -EINVAL;
+ }
+
+ if (!vcap_actionfield_match_actionset(rule, action)) {
+ pr_err("%s:%d: actionfield %s does not belong in the rule actionset\n",
+ __func__, __LINE__,
+ vcap_actionfield_name(ri->vctrl, action));
+ return -EINVAL;
+ }
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ return -ENOMEM;
+ memcpy(&field->data, data, sizeof(field->data));
+ field->ctrl.action = action;
+ field->ctrl.type = ftype;
+ list_add_tail(&field->ctrl.list, &rule->actionfields);
+ return 0;
+}
+
+static void vcap_rule_set_action_bitsize(struct vcap_u1_action *u1,
+ enum vcap_bit val)
+{
+ switch (val) {
+ case VCAP_BIT_0:
+ u1->value = 0;
+ break;
+ case VCAP_BIT_1:
+ u1->value = 1;
+ break;
+ case VCAP_BIT_ANY:
+ u1->value = 0;
+ break;
+ }
+}
+
+/* Add a bit action with value to the rule */
+int vcap_rule_add_action_bit(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ enum vcap_bit val)
+{
+ struct vcap_client_actionfield_data data;
+
+ vcap_rule_set_action_bitsize(&data.u1, val);
+ return vcap_rule_add_action(rule, action, VCAP_FIELD_BIT, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_action_bit);
+
+/* Add a 32 bit action field with value to the rule */
+int vcap_rule_add_action_u32(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ u32 value)
+{
+ struct vcap_client_actionfield_data data;
+
+ data.u32.value = value;
+ return vcap_rule_add_action(rule, action, VCAP_FIELD_U32, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_action_u32);
+
+static int vcap_read_counter(struct vcap_rule_internal *ri,
+ struct vcap_counter *ctr)
+{
+ struct vcap_admin *admin = ri->admin;
+
+ ri->vctrl->ops->update(ri->ndev, admin, VCAP_CMD_READ, VCAP_SEL_COUNTER,
+ ri->addr);
+ ri->vctrl->ops->cache_read(ri->ndev, admin, VCAP_SEL_COUNTER,
+ ri->counter_id, 0);
+ ctr->value = admin->cache.counter;
+ ctr->sticky = admin->cache.sticky;
+ return 0;
+}
+
+/* Copy to host byte order */
+void vcap_netbytes_copy(u8 *dst, u8 *src, int count)
+{
+ int idx;
+
+ for (idx = 0; idx < count; ++idx, ++dst)
+ *dst = src[count - idx - 1];
+}
+EXPORT_SYMBOL_GPL(vcap_netbytes_copy);
+
+/* Convert validation error code into tc extact error message */
+void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule)
+{
+ switch (vrule->exterr) {
+ case VCAP_ERR_NONE:
+ break;
+ case VCAP_ERR_NO_ADMIN:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Missing VCAP instance");
+ break;
+ case VCAP_ERR_NO_NETDEV:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Missing network interface");
+ break;
+ case VCAP_ERR_NO_KEYSET_MATCH:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "No keyset matched the filter keys");
+ break;
+ case VCAP_ERR_NO_ACTIONSET_MATCH:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "No actionset matched the filter actions");
+ break;
+ case VCAP_ERR_NO_PORT_KEYSET_MATCH:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "No port keyset matched the filter keys");
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(vcap_set_tc_exterr);
+
+/* Write a rule to VCAP HW to enable it */
+static int vcap_enable_rule(struct vcap_rule_internal *ri)
+{
+ struct vcap_client_actionfield *af, *naf;
+ struct vcap_client_keyfield *kf, *nkf;
+ int err;
+
+ vcap_erase_cache(ri);
+ err = vcap_encode_rule(ri);
+ if (err)
+ goto out;
+ err = vcap_write_rule(ri);
+ if (err)
+ goto out;
+
+ /* Deallocate the list of keys and actions */
+ list_for_each_entry_safe(kf, nkf, &ri->data.keyfields, ctrl.list) {
+ list_del(&kf->ctrl.list);
+ kfree(kf);
+ }
+ list_for_each_entry_safe(af, naf, &ri->data.actionfields, ctrl.list) {
+ list_del(&af->ctrl.list);
+ kfree(af);
+ }
+ ri->state = VCAP_RS_ENABLED;
+out:
+ return err;
+}
+
+/* Enable all disabled rules for a specific chain/port in the VCAP HW */
+static int vcap_enable_rules(struct vcap_control *vctrl,
+ struct net_device *ndev, int chain)
+{
+ int next_chain = chain + VCAP_CID_LOOKUP_SIZE;
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+ int err = 0;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ if (!(chain >= admin->first_cid && chain <= admin->last_cid))
+ continue;
+
+ /* Found the admin, now find the offloadable rules */
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ /* Is the rule in the lookup defined by the chain */
+ if (!(ri->data.vcap_chain_id >= chain &&
+ ri->data.vcap_chain_id < next_chain)) {
+ continue;
+ }
+
+ if (ri->ndev != ndev)
+ continue;
+
+ if (ri->state != VCAP_RS_DISABLED)
+ continue;
+
+ err = vcap_enable_rule(ri);
+ if (err)
+ break;
+ }
+ mutex_unlock(&admin->lock);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/* Read and erase a rule from VCAP HW to disable it */
+static int vcap_disable_rule(struct vcap_rule_internal *ri)
+{
+ int err;
+
+ err = vcap_read_rule(ri);
+ if (err)
+ return err;
+ err = vcap_decode_keyset(ri);
+ if (err)
+ return err;
+ err = vcap_decode_actionset(ri);
+ if (err)
+ return err;
+
+ ri->state = VCAP_RS_DISABLED;
+ ri->vctrl->ops->init(ri->ndev, ri->admin, ri->addr, ri->size);
+ return 0;
+}
+
+/* Disable all enabled rules for a specific chain/port in the VCAP HW */
+static int vcap_disable_rules(struct vcap_control *vctrl,
+ struct net_device *ndev, int chain)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+ int err = 0;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ if (!(chain >= admin->first_cid && chain <= admin->last_cid))
+ continue;
+
+ /* Found the admin, now find the rules on the chain */
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ if (ri->data.vcap_chain_id != chain)
+ continue;
+
+ if (ri->ndev != ndev)
+ continue;
+
+ if (ri->state != VCAP_RS_ENABLED)
+ continue;
+
+ err = vcap_disable_rule(ri);
+ if (err)
+ break;
+ }
+ mutex_unlock(&admin->lock);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/* Check if this port is already enabled for this VCAP instance */
+static bool vcap_is_enabled(struct vcap_control *vctrl, struct net_device *ndev,
+ int dst_cid)
+{
+ struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ list_for_each_entry(admin, &vctrl->list, list)
+ list_for_each_entry(eport, &admin->enabled, list)
+ if (eport->dst_cid == dst_cid && eport->ndev == ndev)
+ return true;
+
+ return false;
+}
+
+/* Enable this port and chain id in a VCAP instance */
+static int vcap_enable(struct vcap_control *vctrl, struct net_device *ndev,
+ unsigned long cookie, int src_cid, int dst_cid)
+{
+ struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ if (src_cid >= dst_cid)
+ return -EFAULT;
+
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return -ENOENT;
+
+ eport = kzalloc(sizeof(*eport), GFP_KERNEL);
+ if (!eport)
+ return -ENOMEM;
+
+ eport->ndev = ndev;
+ eport->cookie = cookie;
+ eport->src_cid = src_cid;
+ eport->dst_cid = dst_cid;
+ mutex_lock(&admin->lock);
+ list_add_tail(&eport->list, &admin->enabled);
+ mutex_unlock(&admin->lock);
+
+ if (vcap_path_exist(vctrl, ndev, src_cid)) {
+ /* Enable chained lookups */
+ while (dst_cid) {
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return -ENOENT;
+
+ vcap_enable_rules(vctrl, ndev, dst_cid);
+ dst_cid = vcap_get_next_chain(vctrl, ndev, dst_cid);
+ }
+ }
+ return 0;
+}
+
+/* Disable this port and chain id for a VCAP instance */
+static int vcap_disable(struct vcap_control *vctrl, struct net_device *ndev,
+ unsigned long cookie)
+{
+ struct vcap_enabled_port *elem, *eport = NULL;
+ struct vcap_admin *found = NULL, *admin;
+ int dst_cid;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ list_for_each_entry(elem, &admin->enabled, list) {
+ if (elem->cookie == cookie && elem->ndev == ndev) {
+ eport = elem;
+ found = admin;
+ break;
+ }
+ }
+ if (eport)
+ break;
+ }
+
+ if (!eport)
+ return -ENOENT;
+
+ /* Disable chained lookups */
+ dst_cid = eport->dst_cid;
+ while (dst_cid) {
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return -ENOENT;
+
+ vcap_disable_rules(vctrl, ndev, dst_cid);
+ dst_cid = vcap_get_next_chain(vctrl, ndev, dst_cid);
+ }
+
+ mutex_lock(&found->lock);
+ list_del(&eport->list);
+ mutex_unlock(&found->lock);
+ kfree(eport);
+ return 0;
+}
+
+/* Enable/Disable the VCAP instance lookups */
+int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
+ int src_cid, int dst_cid, unsigned long cookie,
+ bool enable)
+{
+ int err;
+
+ err = vcap_api_check(vctrl);
+ if (err)
+ return err;
+
+ if (!ndev)
+ return -ENODEV;
+
+ /* Source and destination must be the first chain in a lookup */
+ if (src_cid % VCAP_CID_LOOKUP_SIZE)
+ return -EFAULT;
+ if (dst_cid % VCAP_CID_LOOKUP_SIZE)
+ return -EFAULT;
+
+ if (enable) {
+ if (vcap_is_enabled(vctrl, ndev, dst_cid))
+ return -EADDRINUSE;
+ if (vcap_is_chain_used(vctrl, ndev, src_cid))
+ return -EADDRNOTAVAIL;
+ err = vcap_enable(vctrl, ndev, cookie, src_cid, dst_cid);
+ } else {
+ err = vcap_disable(vctrl, ndev, cookie);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_enable_lookups);
+
+/* Is this chain id the last lookup of all VCAPs */
+bool vcap_is_last_chain(struct vcap_control *vctrl, int cid, bool ingress)
+{
+ struct vcap_admin *admin;
+ int lookup;
+
+ if (vcap_api_check(vctrl))
+ return false;
+
+ admin = vcap_find_admin(vctrl, cid);
+ if (!admin)
+ return false;
+
+ if (!vcap_admin_is_last(vctrl, admin, ingress))
+ return false;
+
+ /* This must be the last lookup in this VCAP type */
+ lookup = vcap_chain_id_to_lookup(admin, cid);
+ return lookup == admin->lookups - 1;
+}
+EXPORT_SYMBOL_GPL(vcap_is_last_chain);
+
+/* Set a rule counter id (for certain vcaps only) */
+void vcap_rule_set_counter_id(struct vcap_rule *rule, u32 counter_id)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+
+ ri->counter_id = counter_id;
+}
+EXPORT_SYMBOL_GPL(vcap_rule_set_counter_id);
+
+int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ int err;
+
+ err = vcap_api_check(ri->vctrl);
+ if (err)
+ return err;
+ if (!ctr) {
+ pr_err("%s:%d: counter is missing\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ri->admin->lock);
+ err = vcap_write_counter(ri, ctr);
+ mutex_unlock(&ri->admin->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_rule_set_counter);
+
+int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ int err;
+
+ err = vcap_api_check(ri->vctrl);
+ if (err)
+ return err;
+ if (!ctr) {
+ pr_err("%s:%d: counter is missing\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ri->admin->lock);
+ err = vcap_read_counter(ri, ctr);
+ mutex_unlock(&ri->admin->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_rule_get_counter);
+
+/* Get a copy of a client key field */
+static int vcap_rule_get_key(struct vcap_rule *rule,
+ enum vcap_key_field key,
+ struct vcap_client_keyfield *ckf)
+{
+ struct vcap_client_keyfield *field;
+
+ field = vcap_find_keyfield(rule, key);
+ if (!field)
+ return -EINVAL;
+ memcpy(ckf, field, sizeof(*ckf));
+ INIT_LIST_HEAD(&ckf->ctrl.list);
+ return 0;
+}
+
+/* Find a keyset having the same size as the provided rule, where the keyset
+ * does not have a type id.
+ */
+static int vcap_rule_get_untyped_keyset(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_set *keyfield_set;
+ int idx;
+
+ keyfield_set = vctrl->vcaps[vt].keyfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].keyfield_set_size; ++idx) {
+ if (keyfield_set[idx].sw_per_item == ri->keyset_sw &&
+ keyfield_set[idx].type_id == (u8)-1) {
+ vcap_keyset_list_add(matches, idx);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+/* Get the keysets that matches the rule key type/mask */
+int vcap_rule_get_keysets(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_set *keyfield_set;
+ struct vcap_client_keyfield kf = {};
+ u32 value, mask;
+ int err, idx;
+
+ err = vcap_rule_get_key(&ri->data, VCAP_KF_TYPE, &kf);
+ if (err)
+ return vcap_rule_get_untyped_keyset(ri, matches);
+
+ if (kf.ctrl.type == VCAP_FIELD_BIT) {
+ value = kf.data.u1.value;
+ mask = kf.data.u1.mask;
+ } else if (kf.ctrl.type == VCAP_FIELD_U32) {
+ value = kf.data.u32.value;
+ mask = kf.data.u32.mask;
+ } else {
+ return -EINVAL;
+ }
+
+ keyfield_set = vctrl->vcaps[vt].keyfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].keyfield_set_size; ++idx) {
+ if (keyfield_set[idx].sw_per_item != ri->keyset_sw)
+ continue;
+
+ if (keyfield_set[idx].type_id == (u8)-1) {
+ vcap_keyset_list_add(matches, idx);
+ continue;
+ }
+
+ if ((keyfield_set[idx].type_id & mask) == value)
+ vcap_keyset_list_add(matches, idx);
+ }
+ if (matches->cnt > 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+/* Collect packet counts from all rules with the same cookie */
+int vcap_get_rule_count_by_cookie(struct vcap_control *vctrl,
+ struct vcap_counter *ctr, u64 cookie)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_counter temp = {};
+ struct vcap_admin *admin;
+ int err;
+
+ err = vcap_api_check(vctrl);
+ if (err)
+ return err;
+
+ /* Iterate all rules in each VCAP instance */
+ list_for_each_entry(admin, &vctrl->list, list) {
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ if (ri->data.cookie != cookie)
+ continue;
+
+ err = vcap_read_counter(ri, &temp);
+ if (err)
+ goto unlock;
+ ctr->value += temp.value;
+
+ /* Reset the rule counter */
+ temp.value = 0;
+ temp.sticky = 0;
+ err = vcap_write_counter(ri, &temp);
+ if (err)
+ goto unlock;
+ }
+ mutex_unlock(&admin->lock);
+ }
+ return err;
+
+unlock:
+ mutex_unlock(&admin->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_get_rule_count_by_cookie);
+
+static int vcap_rule_mod_key(struct vcap_rule *rule,
+ enum vcap_key_field key,
+ enum vcap_field_type ftype,
+ struct vcap_client_keyfield_data *data)
+{
+ struct vcap_client_keyfield *field;
+
+ field = vcap_find_keyfield(rule, key);
+ if (!field)
+ return vcap_rule_add_key(rule, key, ftype, data);
+ memcpy(&field->data, data, sizeof(field->data));
+ return 0;
+}
+
+/* Modify a 32 bit key field with value and mask in the rule */
+int vcap_rule_mod_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 value, u32 mask)
+{
+ struct vcap_client_keyfield_data data;
+
+ data.u32.value = value;
+ data.u32.mask = mask;
+ return vcap_rule_mod_key(rule, key, VCAP_FIELD_U32, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_mod_key_u32);
+
+/* Remove a key field with value and mask in the rule */
+int vcap_rule_rem_key(struct vcap_rule *rule, enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_keyfield *field;
+
+ field = vcap_find_keyfield(rule, key);
+ if (!field) {
+ pr_err("%s:%d: key %s is not in the rule\n",
+ __func__, __LINE__, vcap_keyfield_name(ri->vctrl, key));
+ return -EINVAL;
+ }
+ /* Deallocate the key field */
+ list_del(&field->ctrl.list);
+ kfree(field);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_rule_rem_key);
+
+static int vcap_rule_mod_action(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ enum vcap_field_type ftype,
+ struct vcap_client_actionfield_data *data)
+{
+ struct vcap_client_actionfield *field;
+
+ field = vcap_find_actionfield(rule, action);
+ if (!field)
+ return vcap_rule_add_action(rule, action, ftype, data);
+ memcpy(&field->data, data, sizeof(field->data));
+ return 0;
+}
+
+/* Modify a 32 bit action field with value in the rule */
+int vcap_rule_mod_action_u32(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ u32 value)
+{
+ struct vcap_client_actionfield_data data;
+
+ data.u32.value = value;
+ return vcap_rule_mod_action(rule, action, VCAP_FIELD_U32, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_mod_action_u32);
+
+/* Drop keys in a keylist and any keys that are not supported by the keyset */
+int vcap_filter_rule_keys(struct vcap_rule *rule,
+ enum vcap_key_field keylist[], int length,
+ bool drop_unsupported)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_keyfield *ckf, *next_ckf;
+ const struct vcap_field *fields;
+ enum vcap_key_field key;
+ int err = 0;
+ int idx;
+
+ if (length > 0) {
+ err = -EEXIST;
+ list_for_each_entry_safe(ckf, next_ckf,
+ &ri->data.keyfields, ctrl.list) {
+ key = ckf->ctrl.key;
+ for (idx = 0; idx < length; ++idx)
+ if (key == keylist[idx]) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ idx++;
+ err = 0;
+ }
+ }
+ }
+ if (drop_unsupported) {
+ err = -EEXIST;
+ fields = vcap_keyfields(ri->vctrl, ri->admin->vtype,
+ rule->keyset);
+ if (!fields)
+ return err;
+ list_for_each_entry_safe(ckf, next_ckf,
+ &ri->data.keyfields, ctrl.list) {
+ key = ckf->ctrl.key;
+ if (fields[key].width == 0) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ err = 0;
+ }
+ }
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_filter_rule_keys);
+
+/* Select the keyset from the list that results in the smallest rule size */
+enum vcap_keyfield_set
+vcap_select_min_rule_keyset(struct vcap_control *vctrl,
+ enum vcap_type vtype,
+ struct vcap_keyset_list *kslist)
+{
+ enum vcap_keyfield_set ret = VCAP_KFS_NO_VALUE;
+ const struct vcap_set *kset;
+ int max = 100, idx;
+
+ for (idx = 0; idx < kslist->cnt; ++idx) {
+ kset = vcap_keyfieldset(vctrl, vtype, kslist->keysets[idx]);
+ if (!kset)
+ continue;
+ if (kset->sw_per_item >= max)
+ continue;
+ max = kset->sw_per_item;
+ ret = kslist->keysets[idx];
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vcap_select_min_rule_keyset);
+
+/* Make a full copy of an existing rule with a new rule id */
+struct vcap_rule *vcap_copy_rule(struct vcap_rule *erule)
+{
+ struct vcap_rule_internal *ri = to_intrule(erule);
+ struct vcap_client_actionfield *caf;
+ struct vcap_client_keyfield *ckf;
+ struct vcap_rule *rule;
+ int err;
+
+ err = vcap_api_check(ri->vctrl);
+ if (err)
+ return ERR_PTR(err);
+
+ rule = vcap_alloc_rule(ri->vctrl, ri->ndev, ri->data.vcap_chain_id,
+ ri->data.user, ri->data.priority, 0);
+ if (IS_ERR(rule))
+ return rule;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ /* Add a key duplicate in the new rule */
+ err = vcap_rule_add_key(rule,
+ ckf->ctrl.key,
+ ckf->ctrl.type,
+ &ckf->data);
+ if (err)
+ goto err;
+ }
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ /* Add a action duplicate in the new rule */
+ err = vcap_rule_add_action(rule,
+ caf->ctrl.action,
+ caf->ctrl.type,
+ &caf->data);
+ if (err)
+ goto err;
+ }
+ return rule;
+err:
+ vcap_free_rule(rule);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(vcap_copy_rule);
+
+#ifdef CONFIG_VCAP_KUNIT_TEST
+#include "vcap_api_kunit.c"
+#endif
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h
new file mode 100644
index 0000000000..9eccfa633c
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+#ifndef __VCAP_API__
+#define __VCAP_API__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+
+/* Use the generated API model */
+#include "vcap_ag_api.h"
+
+#define VCAP_CID_LOOKUP_SIZE 100000 /* Chains in a lookup */
+#define VCAP_CID_INGRESS_L0 1000000 /* Ingress Stage 1 Lookup 0 */
+#define VCAP_CID_INGRESS_L1 1100000 /* Ingress Stage 1 Lookup 1 */
+#define VCAP_CID_INGRESS_L2 1200000 /* Ingress Stage 1 Lookup 2 */
+#define VCAP_CID_INGRESS_L3 1300000 /* Ingress Stage 1 Lookup 3 */
+#define VCAP_CID_INGRESS_L4 1400000 /* Ingress Stage 1 Lookup 4 */
+#define VCAP_CID_INGRESS_L5 1500000 /* Ingress Stage 1 Lookup 5 */
+
+#define VCAP_CID_PREROUTING_IPV6 3000000 /* Prerouting Stage */
+#define VCAP_CID_PREROUTING 6000000 /* Prerouting Stage */
+
+#define VCAP_CID_INGRESS_STAGE2_L0 8000000 /* Ingress Stage 2 Lookup 0 */
+#define VCAP_CID_INGRESS_STAGE2_L1 8100000 /* Ingress Stage 2 Lookup 1 */
+#define VCAP_CID_INGRESS_STAGE2_L2 8200000 /* Ingress Stage 2 Lookup 2 */
+#define VCAP_CID_INGRESS_STAGE2_L3 8300000 /* Ingress Stage 2 Lookup 3 */
+
+#define VCAP_CID_EGRESS_L0 10000000 /* Egress Lookup 0 */
+#define VCAP_CID_EGRESS_L1 10100000 /* Egress Lookup 1 */
+
+#define VCAP_CID_EGRESS_STAGE2_L0 20000000 /* Egress Stage 2 Lookup 0 */
+#define VCAP_CID_EGRESS_STAGE2_L1 20100000 /* Egress Stage 2 Lookup 1 */
+
+/* Known users of the VCAP API */
+enum vcap_user {
+ VCAP_USER_PTP,
+ VCAP_USER_MRP,
+ VCAP_USER_CFM,
+ VCAP_USER_VLAN,
+ VCAP_USER_QOS,
+ VCAP_USER_VCAP_UTIL,
+ VCAP_USER_TC,
+ VCAP_USER_TC_EXTRA,
+
+ /* add new users above here */
+
+ /* used to define VCAP_USER_MAX below */
+ __VCAP_USER_AFTER_LAST,
+ VCAP_USER_MAX = __VCAP_USER_AFTER_LAST - 1,
+};
+
+/* VCAP information used for displaying data */
+struct vcap_statistics {
+ char *name;
+ int count;
+ const char * const *keyfield_set_names;
+ const char * const *actionfield_set_names;
+ const char * const *keyfield_names;
+ const char * const *actionfield_names;
+};
+
+/* VCAP key/action field type, position and width */
+struct vcap_field {
+ u16 type;
+ u16 width;
+ u16 offset;
+};
+
+/* VCAP keyset or actionset type and width */
+struct vcap_set {
+ u8 type_id;
+ u8 sw_per_item;
+ u8 sw_cnt;
+};
+
+/* VCAP typegroup position and bitvalue */
+struct vcap_typegroup {
+ u16 offset;
+ u16 width;
+ u16 value;
+};
+
+/* VCAP model data */
+struct vcap_info {
+ char *name; /* user-friendly name */
+ u16 rows; /* number of row in instance */
+ u16 sw_count; /* maximum subwords used per rule */
+ u16 sw_width; /* bits per subword in a keyset */
+ u16 sticky_width; /* sticky bits per rule */
+ u16 act_width; /* bits per subword in an actionset */
+ u16 default_cnt; /* number of default rules */
+ u16 require_cnt_dis; /* not used */
+ u16 version; /* vcap rtl version */
+ const struct vcap_set *keyfield_set; /* keysets */
+ int keyfield_set_size; /* number of keysets */
+ const struct vcap_set *actionfield_set; /* actionsets */
+ int actionfield_set_size; /* number of actionsets */
+ /* map of keys per keyset */
+ const struct vcap_field **keyfield_set_map;
+ /* number of entries in the above map */
+ int *keyfield_set_map_size;
+ /* map of actions per actionset */
+ const struct vcap_field **actionfield_set_map;
+ /* number of entries in the above map */
+ int *actionfield_set_map_size;
+ /* map of keyset typegroups per subword size */
+ const struct vcap_typegroup **keyfield_set_typegroups;
+ /* map of actionset typegroups per subword size */
+ const struct vcap_typegroup **actionfield_set_typegroups;
+};
+
+enum vcap_field_type {
+ VCAP_FIELD_BIT,
+ VCAP_FIELD_U32,
+ VCAP_FIELD_U48,
+ VCAP_FIELD_U56,
+ VCAP_FIELD_U64,
+ VCAP_FIELD_U72,
+ VCAP_FIELD_U112,
+ VCAP_FIELD_U128,
+};
+
+/* VCAP rule data towards the VCAP cache */
+struct vcap_cache_data {
+ u32 *keystream;
+ u32 *maskstream;
+ u32 *actionstream;
+ u32 counter;
+ bool sticky;
+};
+
+/* Selects which part of the rule must be updated */
+enum vcap_selection {
+ VCAP_SEL_ENTRY = 0x01,
+ VCAP_SEL_ACTION = 0x02,
+ VCAP_SEL_COUNTER = 0x04,
+ VCAP_SEL_ALL = 0xff,
+};
+
+/* Commands towards the VCAP cache */
+enum vcap_command {
+ VCAP_CMD_WRITE = 0,
+ VCAP_CMD_READ = 1,
+ VCAP_CMD_MOVE_DOWN = 2,
+ VCAP_CMD_MOVE_UP = 3,
+ VCAP_CMD_INITIALIZE = 4,
+};
+
+enum vcap_rule_error {
+ VCAP_ERR_NONE = 0, /* No known error */
+ VCAP_ERR_NO_ADMIN, /* No admin instance */
+ VCAP_ERR_NO_NETDEV, /* No netdev instance */
+ VCAP_ERR_NO_KEYSET_MATCH, /* No keyset matched the rule keys */
+ VCAP_ERR_NO_ACTIONSET_MATCH, /* No actionset matched the rule actions */
+ VCAP_ERR_NO_PORT_KEYSET_MATCH, /* No port keyset matched the rule keys */
+};
+
+/* Administration of each VCAP instance */
+struct vcap_admin {
+ struct list_head list; /* for insertion in vcap_control */
+ struct list_head rules; /* list of rules */
+ struct list_head enabled; /* list of enabled ports */
+ struct mutex lock; /* control access to rules */
+ enum vcap_type vtype; /* type of vcap */
+ int vinst; /* instance number within the same type */
+ int first_cid; /* first chain id in this vcap */
+ int last_cid; /* last chain id in this vcap */
+ int tgt_inst; /* hardware instance number */
+ int lookups; /* number of lookups in this vcap type */
+ int lookups_per_instance; /* number of lookups in this instance */
+ int last_valid_addr; /* top of address range to be used */
+ int first_valid_addr; /* bottom of address range to be used */
+ int last_used_addr; /* address of lowest added rule */
+ bool w32be; /* vcap uses "32bit-word big-endian" encoding */
+ bool ingress; /* chain traffic direction */
+ struct vcap_cache_data cache; /* encoded rule data */
+};
+
+/* Client supplied VCAP rule data */
+struct vcap_rule {
+ int vcap_chain_id; /* chain used for this rule */
+ enum vcap_user user; /* rule owner */
+ u16 priority;
+ u32 id; /* vcap rule id, must be unique, 0 will auto-generate a value */
+ u64 cookie; /* used by the client to identify the rule */
+ struct list_head keyfields; /* list of vcap_client_keyfield */
+ struct list_head actionfields; /* list of vcap_client_actionfield */
+ enum vcap_keyfield_set keyset; /* keyset used: may be derived from fields */
+ enum vcap_actionfield_set actionset; /* actionset used: may be derived from fields */
+ enum vcap_rule_error exterr; /* extended error - used by TC */
+ u64 client; /* space for client defined data */
+};
+
+/* List of keysets */
+struct vcap_keyset_list {
+ int max; /* size of the keyset list */
+ int cnt; /* count of keysets actually in the list */
+ enum vcap_keyfield_set *keysets; /* the list of keysets */
+};
+
+/* List of actionsets */
+struct vcap_actionset_list {
+ int max; /* size of the actionset list */
+ int cnt; /* count of actionsets actually in the list */
+ enum vcap_actionfield_set *actionsets; /* the list of actionsets */
+};
+
+/* Client output printf-like function with destination */
+struct vcap_output_print {
+ __printf(2, 3)
+ void (*prf)(void *out, const char *fmt, ...);
+ void *dst;
+};
+
+/* Client supplied VCAP callback operations */
+struct vcap_operations {
+ /* validate port keyset operation */
+ enum vcap_keyfield_set (*validate_keyset)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto);
+ /* add default rule fields for the selected keyset operations */
+ void (*add_default_fields)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule);
+ /* cache operations */
+ void (*cache_erase)
+ (struct vcap_admin *admin);
+ void (*cache_write)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 idx, u32 count);
+ void (*cache_read)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 idx,
+ u32 count);
+ /* block operations */
+ void (*init)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ u32 addr,
+ u32 count);
+ void (*update)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_command cmd,
+ enum vcap_selection sel,
+ u32 addr);
+ void (*move)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ u32 addr,
+ int offset,
+ int count);
+ /* informational */
+ int (*port_info)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out);
+};
+
+/* VCAP API Client control interface */
+struct vcap_control {
+ struct vcap_operations *ops; /* client supplied operations */
+ const struct vcap_info *vcaps; /* client supplied vcap models */
+ const struct vcap_statistics *stats; /* client supplied vcap stats */
+ struct list_head list; /* list of vcap instances */
+};
+
+#endif /* __VCAP_API__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
new file mode 100644
index 0000000000..88641508f8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+#ifndef __VCAP_API_CLIENT__
+#define __VCAP_API_CLIENT__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+
+#include "vcap_api.h"
+
+/* Client supplied VCAP rule key control part */
+struct vcap_client_keyfield_ctrl {
+ struct list_head list; /* For insertion into a rule */
+ enum vcap_key_field key;
+ enum vcap_field_type type;
+};
+
+struct vcap_u1_key {
+ u8 value;
+ u8 mask;
+};
+
+struct vcap_u32_key {
+ u32 value;
+ u32 mask;
+};
+
+struct vcap_u48_key {
+ u8 value[6];
+ u8 mask[6];
+};
+
+struct vcap_u56_key {
+ u8 value[7];
+ u8 mask[7];
+};
+
+struct vcap_u64_key {
+ u8 value[8];
+ u8 mask[8];
+};
+
+struct vcap_u72_key {
+ u8 value[9];
+ u8 mask[9];
+};
+
+struct vcap_u112_key {
+ u8 value[14];
+ u8 mask[14];
+};
+
+struct vcap_u128_key {
+ u8 value[16];
+ u8 mask[16];
+};
+
+/* Client supplied VCAP rule field data */
+struct vcap_client_keyfield_data {
+ union {
+ struct vcap_u1_key u1;
+ struct vcap_u32_key u32;
+ struct vcap_u48_key u48;
+ struct vcap_u56_key u56;
+ struct vcap_u64_key u64;
+ struct vcap_u72_key u72;
+ struct vcap_u112_key u112;
+ struct vcap_u128_key u128;
+ };
+};
+
+/* Client supplied VCAP rule key (value, mask) */
+struct vcap_client_keyfield {
+ struct vcap_client_keyfield_ctrl ctrl;
+ struct vcap_client_keyfield_data data;
+};
+
+/* Client supplied VCAP rule action control part */
+struct vcap_client_actionfield_ctrl {
+ struct list_head list; /* For insertion into a rule */
+ enum vcap_action_field action;
+ enum vcap_field_type type;
+};
+
+struct vcap_u1_action {
+ u8 value;
+};
+
+struct vcap_u32_action {
+ u32 value;
+};
+
+struct vcap_u48_action {
+ u8 value[6];
+};
+
+struct vcap_u56_action {
+ u8 value[7];
+};
+
+struct vcap_u64_action {
+ u8 value[8];
+};
+
+struct vcap_u72_action {
+ u8 value[9];
+};
+
+struct vcap_u112_action {
+ u8 value[14];
+};
+
+struct vcap_u128_action {
+ u8 value[16];
+};
+
+struct vcap_client_actionfield_data {
+ union {
+ struct vcap_u1_action u1;
+ struct vcap_u32_action u32;
+ struct vcap_u48_action u48;
+ struct vcap_u56_action u56;
+ struct vcap_u64_action u64;
+ struct vcap_u72_action u72;
+ struct vcap_u112_action u112;
+ struct vcap_u128_action u128;
+ };
+};
+
+struct vcap_client_actionfield {
+ struct vcap_client_actionfield_ctrl ctrl;
+ struct vcap_client_actionfield_data data;
+};
+
+enum vcap_bit {
+ VCAP_BIT_ANY,
+ VCAP_BIT_0,
+ VCAP_BIT_1
+};
+
+struct vcap_counter {
+ u32 value;
+ bool sticky;
+};
+
+/* Enable/Disable the VCAP instance lookups */
+int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
+ int from_cid, int to_cid, unsigned long cookie,
+ bool enable);
+
+/* VCAP rule operations */
+/* Allocate a rule and fill in the basic information */
+struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl,
+ struct net_device *ndev,
+ int vcap_chain_id,
+ enum vcap_user user,
+ u16 priority,
+ u32 id);
+/* Free mem of a rule owned by client */
+void vcap_free_rule(struct vcap_rule *rule);
+/* Validate a rule before adding it to the VCAP */
+int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto);
+/* Add rule to a VCAP instance */
+int vcap_add_rule(struct vcap_rule *rule);
+/* Delete rule in a VCAP instance */
+int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id);
+/* Make a full copy of an existing rule with a new rule id */
+struct vcap_rule *vcap_copy_rule(struct vcap_rule *rule);
+/* Get rule from a VCAP instance */
+struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id);
+/* Update existing rule */
+int vcap_mod_rule(struct vcap_rule *rule);
+
+/* Update the keyset for the rule */
+int vcap_set_rule_set_keyset(struct vcap_rule *rule,
+ enum vcap_keyfield_set keyset);
+/* Update the actionset for the rule */
+int vcap_set_rule_set_actionset(struct vcap_rule *rule,
+ enum vcap_actionfield_set actionset);
+/* Set a rule counter id (for certain VCAPs only) */
+void vcap_rule_set_counter_id(struct vcap_rule *rule, u32 counter_id);
+
+/* VCAP rule field operations */
+int vcap_rule_add_key_bit(struct vcap_rule *rule, enum vcap_key_field key,
+ enum vcap_bit val);
+int vcap_rule_add_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 value, u32 mask);
+int vcap_rule_add_key_u48(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u48_key *fieldval);
+int vcap_rule_add_key_u72(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u72_key *fieldval);
+int vcap_rule_add_key_u128(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u128_key *fieldval);
+int vcap_rule_add_action_bit(struct vcap_rule *rule,
+ enum vcap_action_field action, enum vcap_bit val);
+int vcap_rule_add_action_u32(struct vcap_rule *rule,
+ enum vcap_action_field action, u32 value);
+
+/* Get number of rules in a vcap instance lookup chain id range */
+int vcap_admin_rule_count(struct vcap_admin *admin, int cid);
+
+/* VCAP rule counter operations */
+int vcap_get_rule_count_by_cookie(struct vcap_control *vctrl,
+ struct vcap_counter *ctr, u64 cookie);
+int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr);
+int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr);
+
+/* VCAP lookup operations */
+/* Convert a chain id to a VCAP lookup index */
+int vcap_chain_id_to_lookup(struct vcap_admin *admin, int cur_cid);
+/* Lookup a vcap instance using chain id */
+struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid);
+/* Find information on a key field in a rule */
+const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule,
+ enum vcap_key_field key);
+/* Find a rule id with a provided cookie */
+int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie);
+/* Calculate the value used for chaining VCAP rules */
+int vcap_chain_offset(struct vcap_control *vctrl, int from_cid, int to_cid);
+/* Is the next chain id in the following lookup, possible in another VCAP */
+bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid);
+/* Is this chain id the last lookup of all VCAPs */
+bool vcap_is_last_chain(struct vcap_control *vctrl, int cid, bool ingress);
+/* Match a list of keys against the keysets available in a vcap type */
+bool vcap_rule_find_keysets(struct vcap_rule *rule,
+ struct vcap_keyset_list *matches);
+/* Return the keyset information for the keyset */
+const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_keyfield_set keyset);
+/* Copy to host byte order */
+void vcap_netbytes_copy(u8 *dst, u8 *src, int count);
+
+/* Convert validation error code into tc extact error message */
+void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule);
+
+/* Cleanup a VCAP instance */
+int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin);
+
+/* Add a keyset to a keyset list */
+bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
+ enum vcap_keyfield_set keyset);
+/* Drop keys in a keylist and any keys that are not supported by the keyset */
+int vcap_filter_rule_keys(struct vcap_rule *rule,
+ enum vcap_key_field keylist[], int length,
+ bool drop_unsupported);
+
+/* map keyset id to a string with the keyset name */
+const char *vcap_keyset_name(struct vcap_control *vctrl,
+ enum vcap_keyfield_set keyset);
+/* map key field id to a string with the key name */
+const char *vcap_keyfield_name(struct vcap_control *vctrl,
+ enum vcap_key_field key);
+
+/* Modify a 32 bit key field with value and mask in the rule */
+int vcap_rule_mod_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 value, u32 mask);
+/* Modify a 32 bit action field with value in the rule */
+int vcap_rule_mod_action_u32(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ u32 value);
+
+/* Get a 32 bit key field value and mask from the rule */
+int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 *value, u32 *mask);
+
+/* Remove a key field with value and mask in the rule */
+int vcap_rule_rem_key(struct vcap_rule *rule, enum vcap_key_field key);
+
+/* Select the keyset from the list that results in the smallest rule size */
+enum vcap_keyfield_set
+vcap_select_min_rule_keyset(struct vcap_control *vctrl, enum vcap_type vtype,
+ struct vcap_keyset_list *kslist);
+
+struct vcap_client_actionfield *
+vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act);
+#endif /* __VCAP_API_CLIENT__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
new file mode 100644
index 0000000000..c2c3397c58
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP API debug file system support
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ *
+ */
+
+#include "vcap_api_private.h"
+#include "vcap_api_debugfs.h"
+
+struct vcap_admin_debugfs_info {
+ struct vcap_control *vctrl;
+ struct vcap_admin *admin;
+};
+
+struct vcap_port_debugfs_info {
+ struct vcap_control *vctrl;
+ struct net_device *ndev;
+};
+
+/* Dump the keyfields value and mask values */
+static void vcap_debugfs_show_rule_keyfield(struct vcap_control *vctrl,
+ struct vcap_output_print *out,
+ enum vcap_key_field key,
+ const struct vcap_field *keyfield,
+ struct vcap_client_keyfield_data *data)
+{
+ bool hex = false;
+ u8 *value, *mask;
+ int idx, bytes;
+
+ out->prf(out->dst, " %s: W%d: ", vcap_keyfield_name(vctrl, key),
+ keyfield[key].width);
+
+ switch (keyfield[key].type) {
+ case VCAP_FIELD_BIT:
+ out->prf(out->dst, "%d/%d", data->u1.value, data->u1.mask);
+ break;
+ case VCAP_FIELD_U32:
+ value = (u8 *)(&data->u32.value);
+ mask = (u8 *)(&data->u32.mask);
+
+ if (key == VCAP_KF_L3_IP4_SIP || key == VCAP_KF_L3_IP4_DIP) {
+ out->prf(out->dst, "%pI4h/%pI4h", &data->u32.value,
+ &data->u32.mask);
+ } else if (key == VCAP_KF_ETYPE ||
+ key == VCAP_KF_IF_IGR_PORT_MASK ||
+ key == VCAP_KF_IF_EGR_PORT_MASK) {
+ hex = true;
+ } else {
+ u32 fmsk = (1 << keyfield[key].width) - 1;
+
+ if (keyfield[key].width == 32)
+ fmsk = ~0;
+ out->prf(out->dst, "%u/%u", data->u32.value & fmsk,
+ data->u32.mask & fmsk);
+ }
+ break;
+ case VCAP_FIELD_U48:
+ value = data->u48.value;
+ mask = data->u48.mask;
+ if (key == VCAP_KF_L2_SMAC || key == VCAP_KF_L2_DMAC)
+ out->prf(out->dst, "%pMR/%pMR", data->u48.value,
+ data->u48.mask);
+ else
+ hex = true;
+ break;
+ case VCAP_FIELD_U56:
+ value = data->u56.value;
+ mask = data->u56.mask;
+ hex = true;
+ break;
+ case VCAP_FIELD_U64:
+ value = data->u64.value;
+ mask = data->u64.mask;
+ hex = true;
+ break;
+ case VCAP_FIELD_U72:
+ value = data->u72.value;
+ mask = data->u72.mask;
+ hex = true;
+ break;
+ case VCAP_FIELD_U112:
+ value = data->u112.value;
+ mask = data->u112.mask;
+ hex = true;
+ break;
+ case VCAP_FIELD_U128:
+ value = data->u128.value;
+ mask = data->u128.mask;
+ if (key == VCAP_KF_L3_IP6_SIP || key == VCAP_KF_L3_IP6_DIP) {
+ u8 nvalue[16], nmask[16];
+
+ vcap_netbytes_copy(nvalue, data->u128.value,
+ sizeof(nvalue));
+ vcap_netbytes_copy(nmask, data->u128.mask,
+ sizeof(nmask));
+ out->prf(out->dst, "%pI6/%pI6", nvalue, nmask);
+ } else {
+ hex = true;
+ }
+ break;
+ }
+ if (hex) {
+ bytes = DIV_ROUND_UP(keyfield[key].width, BITS_PER_BYTE);
+ out->prf(out->dst, "0x");
+ for (idx = 0; idx < bytes; ++idx)
+ out->prf(out->dst, "%02x", value[bytes - idx - 1]);
+ out->prf(out->dst, "/0x");
+ for (idx = 0; idx < bytes; ++idx)
+ out->prf(out->dst, "%02x", mask[bytes - idx - 1]);
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void
+vcap_debugfs_show_rule_actionfield(struct vcap_control *vctrl,
+ struct vcap_output_print *out,
+ enum vcap_action_field action,
+ const struct vcap_field *actionfield,
+ u8 *value)
+{
+ bool hex = false;
+ int idx, bytes;
+ u32 fmsk, val;
+
+ out->prf(out->dst, " %s: W%d: ",
+ vcap_actionfield_name(vctrl, action),
+ actionfield[action].width);
+
+ switch (actionfield[action].type) {
+ case VCAP_FIELD_BIT:
+ out->prf(out->dst, "%d", value[0]);
+ break;
+ case VCAP_FIELD_U32:
+ fmsk = (1 << actionfield[action].width) - 1;
+ val = *(u32 *)value;
+ out->prf(out->dst, "%u", val & fmsk);
+ break;
+ case VCAP_FIELD_U48:
+ case VCAP_FIELD_U56:
+ case VCAP_FIELD_U64:
+ case VCAP_FIELD_U72:
+ case VCAP_FIELD_U112:
+ case VCAP_FIELD_U128:
+ hex = true;
+ break;
+ }
+ if (hex) {
+ bytes = DIV_ROUND_UP(actionfield[action].width, BITS_PER_BYTE);
+ out->prf(out->dst, "0x");
+ for (idx = 0; idx < bytes; ++idx)
+ out->prf(out->dst, "%02x", value[bytes - idx - 1]);
+ }
+ out->prf(out->dst, "\n");
+}
+
+static int vcap_debugfs_show_keysets(struct vcap_rule_internal *ri,
+ struct vcap_output_print *out)
+{
+ struct vcap_admin *admin = ri->admin;
+ enum vcap_keyfield_set keysets[10];
+ struct vcap_keyset_list matches;
+ int err;
+
+ matches.keysets = keysets;
+ matches.cnt = 0;
+ matches.max = ARRAY_SIZE(keysets);
+
+ if (ri->state == VCAP_RS_DISABLED)
+ err = vcap_rule_get_keysets(ri, &matches);
+ else
+ err = vcap_find_keystream_keysets(ri->vctrl, admin->vtype,
+ admin->cache.keystream,
+ admin->cache.maskstream,
+ false, 0, &matches);
+ if (err) {
+ pr_err("%s:%d: could not find valid keysets: %d\n",
+ __func__, __LINE__, err);
+ return err;
+ }
+
+ out->prf(out->dst, " keysets:");
+ for (int idx = 0; idx < matches.cnt; ++idx)
+ out->prf(out->dst, " %s",
+ vcap_keyset_name(ri->vctrl, matches.keysets[idx]));
+ out->prf(out->dst, "\n");
+ return 0;
+}
+
+static int vcap_debugfs_show_rule_keyset(struct vcap_rule_internal *ri,
+ struct vcap_output_print *out)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ struct vcap_admin *admin = ri->admin;
+ const struct vcap_field *keyfield;
+ struct vcap_client_keyfield *ckf;
+
+ vcap_debugfs_show_keysets(ri, out);
+ out->prf(out->dst, " keyset_sw: %d\n", ri->keyset_sw);
+ out->prf(out->dst, " keyset_sw_regs: %d\n", ri->keyset_sw_regs);
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ keyfield = vcap_keyfields(vctrl, admin->vtype, ri->data.keyset);
+ vcap_debugfs_show_rule_keyfield(vctrl, out, ckf->ctrl.key,
+ keyfield, &ckf->data);
+ }
+
+ return 0;
+}
+
+static int vcap_debugfs_show_rule_actionset(struct vcap_rule_internal *ri,
+ struct vcap_output_print *out)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ struct vcap_admin *admin = ri->admin;
+ const struct vcap_field *actionfield;
+ struct vcap_client_actionfield *caf;
+
+ out->prf(out->dst, " actionset: %s\n",
+ vcap_actionset_name(vctrl, ri->data.actionset));
+ out->prf(out->dst, " actionset_sw: %d\n", ri->actionset_sw);
+ out->prf(out->dst, " actionset_sw_regs: %d\n", ri->actionset_sw_regs);
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ actionfield = vcap_actionfields(vctrl, admin->vtype,
+ ri->data.actionset);
+ vcap_debugfs_show_rule_actionfield(vctrl, out, caf->ctrl.action,
+ actionfield,
+ &caf->data.u1.value);
+ }
+
+ return 0;
+}
+
+static void vcap_show_admin_rule(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out,
+ struct vcap_rule_internal *ri)
+{
+ ri->counter.value = admin->cache.counter;
+ ri->counter.sticky = admin->cache.sticky;
+ out->prf(out->dst,
+ "rule: %u, addr: [%d,%d], X%d, ctr[%d]: %d, hit: %d\n",
+ ri->data.id, ri->addr, ri->addr + ri->size - 1, ri->size,
+ ri->counter_id, ri->counter.value, ri->counter.sticky);
+ out->prf(out->dst, " chain_id: %d\n", ri->data.vcap_chain_id);
+ out->prf(out->dst, " user: %d\n", ri->data.user);
+ out->prf(out->dst, " priority: %d\n", ri->data.priority);
+ out->prf(out->dst, " state: ");
+ switch (ri->state) {
+ case VCAP_RS_PERMANENT:
+ out->prf(out->dst, "permanent\n");
+ break;
+ case VCAP_RS_DISABLED:
+ out->prf(out->dst, "disabled\n");
+ break;
+ case VCAP_RS_ENABLED:
+ out->prf(out->dst, "enabled\n");
+ break;
+ }
+ vcap_debugfs_show_rule_keyset(ri, out);
+ vcap_debugfs_show_rule_actionset(ri, out);
+}
+
+static void vcap_show_admin_info(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ const struct vcap_info *vcap = &vctrl->vcaps[admin->vtype];
+
+ out->prf(out->dst, "name: %s\n", vcap->name);
+ out->prf(out->dst, "rows: %d\n", vcap->rows);
+ out->prf(out->dst, "sw_count: %d\n", vcap->sw_count);
+ out->prf(out->dst, "sw_width: %d\n", vcap->sw_width);
+ out->prf(out->dst, "sticky_width: %d\n", vcap->sticky_width);
+ out->prf(out->dst, "act_width: %d\n", vcap->act_width);
+ out->prf(out->dst, "default_cnt: %d\n", vcap->default_cnt);
+ out->prf(out->dst, "require_cnt_dis: %d\n", vcap->require_cnt_dis);
+ out->prf(out->dst, "version: %d\n", vcap->version);
+ out->prf(out->dst, "vtype: %d\n", admin->vtype);
+ out->prf(out->dst, "vinst: %d\n", admin->vinst);
+ out->prf(out->dst, "ingress: %d\n", admin->ingress);
+ out->prf(out->dst, "first_cid: %d\n", admin->first_cid);
+ out->prf(out->dst, "last_cid: %d\n", admin->last_cid);
+ out->prf(out->dst, "lookups: %d\n", admin->lookups);
+ out->prf(out->dst, "first_valid_addr: %d\n", admin->first_valid_addr);
+ out->prf(out->dst, "last_valid_addr: %d\n", admin->last_valid_addr);
+ out->prf(out->dst, "last_used_addr: %d\n", admin->last_used_addr);
+}
+
+static int vcap_show_admin(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct vcap_rule_internal *elem;
+ struct vcap_rule *vrule;
+ int ret = 0;
+
+ vcap_show_admin_info(vctrl, admin, out);
+ list_for_each_entry(elem, &admin->rules, list) {
+ vrule = vcap_decode_rule(elem);
+ if (IS_ERR_OR_NULL(vrule)) {
+ ret = PTR_ERR(vrule);
+ break;
+ }
+
+ out->prf(out->dst, "\n");
+ vcap_show_admin_rule(vctrl, admin, out, to_intrule(vrule));
+ vcap_free_rule(vrule);
+ }
+ return ret;
+}
+
+static int vcap_show_admin_raw(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ enum vcap_keyfield_set keysets[10];
+ enum vcap_type vt = admin->vtype;
+ struct vcap_keyset_list kslist;
+ struct vcap_rule_internal *ri;
+ const struct vcap_set *info;
+ int addr, idx;
+ int ret;
+
+ if (list_empty(&admin->rules))
+ return 0;
+
+ ret = vcap_api_check(vctrl);
+ if (ret)
+ return ret;
+
+ ri = list_first_entry(&admin->rules, struct vcap_rule_internal, list);
+
+ /* Go from higher to lower addresses searching for a keyset */
+ kslist.keysets = keysets;
+ kslist.max = ARRAY_SIZE(keysets);
+ for (addr = admin->last_valid_addr; addr >= admin->first_valid_addr;
+ --addr) {
+ kslist.cnt = 0;
+ ret = vcap_addr_keysets(vctrl, ri->ndev, admin, addr, &kslist);
+ if (ret < 0)
+ continue;
+ info = vcap_keyfieldset(vctrl, vt, kslist.keysets[0]);
+ if (!info)
+ continue;
+ if (addr % info->sw_per_item) {
+ pr_info("addr: %d X%d error rule, keyset: %s\n",
+ addr,
+ info->sw_per_item,
+ vcap_keyset_name(vctrl, kslist.keysets[0]));
+ } else {
+ out->prf(out->dst, " addr: %d, X%d rule, keysets:",
+ addr,
+ info->sw_per_item);
+ for (idx = 0; idx < kslist.cnt; ++idx)
+ out->prf(out->dst, " %s",
+ vcap_keyset_name(vctrl,
+ kslist.keysets[idx]));
+ out->prf(out->dst, "\n");
+ }
+ }
+ return 0;
+}
+
+/* Show the port configuration and status */
+static int vcap_port_debugfs_show(struct seq_file *m, void *unused)
+{
+ struct vcap_port_debugfs_info *info = m->private;
+ struct vcap_admin *admin;
+ struct vcap_output_print out = {
+ .prf = (void *)seq_printf,
+ .dst = m,
+ };
+
+ list_for_each_entry(admin, &info->vctrl->list, list) {
+ if (admin->vinst)
+ continue;
+ info->vctrl->ops->port_info(info->ndev, admin, &out);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(vcap_port_debugfs);
+
+void vcap_port_debugfs(struct device *dev, struct dentry *parent,
+ struct vcap_control *vctrl,
+ struct net_device *ndev)
+{
+ struct vcap_port_debugfs_info *info;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ info->vctrl = vctrl;
+ info->ndev = ndev;
+ debugfs_create_file(netdev_name(ndev), 0444, parent, info,
+ &vcap_port_debugfs_fops);
+}
+EXPORT_SYMBOL_GPL(vcap_port_debugfs);
+
+/* Show the full VCAP instance data (rules with all fields) */
+static int vcap_debugfs_show(struct seq_file *m, void *unused)
+{
+ struct vcap_admin_debugfs_info *info = m->private;
+ struct vcap_output_print out = {
+ .prf = (void *)seq_printf,
+ .dst = m,
+ };
+ int ret;
+
+ mutex_lock(&info->admin->lock);
+ ret = vcap_show_admin(info->vctrl, info->admin, &out);
+ mutex_unlock(&info->admin->lock);
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(vcap_debugfs);
+
+/* Show the raw VCAP instance data (rules with address info) */
+static int vcap_raw_debugfs_show(struct seq_file *m, void *unused)
+{
+ struct vcap_admin_debugfs_info *info = m->private;
+ struct vcap_output_print out = {
+ .prf = (void *)seq_printf,
+ .dst = m,
+ };
+ int ret;
+
+ mutex_lock(&info->admin->lock);
+ ret = vcap_show_admin_raw(info->vctrl, info->admin, &out);
+ mutex_unlock(&info->admin->lock);
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(vcap_raw_debugfs);
+
+struct dentry *vcap_debugfs(struct device *dev, struct dentry *parent,
+ struct vcap_control *vctrl)
+{
+ struct vcap_admin_debugfs_info *info;
+ struct vcap_admin *admin;
+ struct dentry *dir;
+ char name[50];
+
+ dir = debugfs_create_dir("vcaps", parent);
+ if (PTR_ERR_OR_ZERO(dir))
+ return NULL;
+ list_for_each_entry(admin, &vctrl->list, list) {
+ sprintf(name, "raw_%s_%d", vctrl->vcaps[admin->vtype].name,
+ admin->vinst);
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+ info->vctrl = vctrl;
+ info->admin = admin;
+ debugfs_create_file(name, 0444, dir, info,
+ &vcap_raw_debugfs_fops);
+ sprintf(name, "%s_%d", vctrl->vcaps[admin->vtype].name,
+ admin->vinst);
+ debugfs_create_file(name, 0444, dir, info, &vcap_debugfs_fops);
+ }
+ return dir;
+}
+EXPORT_SYMBOL_GPL(vcap_debugfs);
+
+#ifdef CONFIG_VCAP_KUNIT_TEST
+#include "vcap_api_debugfs_kunit.c"
+#endif
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.h b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.h
new file mode 100644
index 0000000000..9f2c59b5f6
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+#ifndef __VCAP_API_DEBUGFS__
+#define __VCAP_API_DEBUGFS__
+
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+
+#include "vcap_api.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+void vcap_port_debugfs(struct device *dev, struct dentry *parent,
+ struct vcap_control *vctrl,
+ struct net_device *ndev);
+
+/* Create a debugFS entry for a vcap instance */
+struct dentry *vcap_debugfs(struct device *dev, struct dentry *parent,
+ struct vcap_control *vctrl);
+
+#else
+
+static inline void vcap_port_debugfs(struct device *dev, struct dentry *parent,
+ struct vcap_control *vctrl,
+ struct net_device *ndev)
+{
+}
+
+static inline struct dentry *vcap_debugfs(struct device *dev,
+ struct dentry *parent,
+ struct vcap_control *vctrl)
+{
+ return NULL;
+}
+
+#endif
+#endif /* __VCAP_API_DEBUGFS__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
new file mode 100644
index 0000000000..b23c11b064
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API kunit test suite
+ */
+
+#include <kunit/test.h>
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "vcap_api_debugfs.h"
+#include "vcap_model_kunit.h"
+
+/* First we have the test infrastructure that emulates the platform
+ * implementation
+ */
+#define TEST_BUF_CNT 100
+#define TEST_BUF_SZ 350
+#define STREAMWSIZE 64
+
+static u32 test_updateaddr[STREAMWSIZE] = {};
+static int test_updateaddridx;
+static int test_cache_erase_count;
+static u32 test_init_start;
+static u32 test_init_count;
+static u32 test_hw_counter_id;
+static struct vcap_cache_data test_hw_cache;
+static struct net_device test_netdev = {};
+static int test_move_addr;
+static int test_move_offset;
+static int test_move_count;
+static char test_pr_buffer[TEST_BUF_CNT][TEST_BUF_SZ];
+static int test_pr_bufferidx;
+static int test_pr_idx;
+
+/* Callback used by the VCAP API */
+static enum vcap_keyfield_set test_val_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto)
+{
+ int idx;
+
+ if (kslist->cnt > 0) {
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ for (idx = 0; idx < kslist->cnt; idx++) {
+ if (kslist->keysets[idx] == VCAP_KFS_ETAG)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] ==
+ VCAP_KFS_PURE_5TUPLE_IP4)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] ==
+ VCAP_KFS_NORMAL_5TUPLE_IP4)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] ==
+ VCAP_KFS_NORMAL_7TUPLE)
+ return kslist->keysets[idx];
+ }
+ break;
+ case VCAP_TYPE_IS2:
+ for (idx = 0; idx < kslist->cnt; idx++) {
+ if (kslist->keysets[idx] == VCAP_KFS_MAC_ETYPE)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_ARP)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_IP_7TUPLE)
+ return kslist->keysets[idx];
+ }
+ break;
+ default:
+ pr_info("%s:%d: no validation for VCAP %d\n",
+ __func__, __LINE__, admin->vtype);
+ break;
+ }
+ }
+ return -EINVAL;
+}
+
+/* Callback used by the VCAP API */
+static void test_add_def_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ if (admin->vinst == 0 || admin->vinst == 2)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_erase(struct vcap_admin *admin)
+{
+ if (test_cache_erase_count) {
+ memset(admin->cache.keystream, 0, test_cache_erase_count);
+ memset(admin->cache.maskstream, 0, test_cache_erase_count);
+ memset(admin->cache.actionstream, 0, test_cache_erase_count);
+ test_cache_erase_count = 0;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_init(struct net_device *ndev, struct vcap_admin *admin,
+ u32 start, u32 count)
+{
+ test_init_start = start;
+ test_init_count = count;
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_read(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_selection sel, u32 start, u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ pr_debug("%s:%d: %d %d\n", __func__, __LINE__, start, count);
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: keydata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, keystr[idx]);
+ }
+ for (idx = 0; idx < count; ++idx) {
+ /* Invert the mask before decoding starts */
+ mskstr[idx] = ~mskstr[idx];
+ pr_debug("%s:%d: mskdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, mskstr[idx]);
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ actstr = &admin->cache.actionstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: actdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, actstr[idx]);
+ }
+ break;
+ case VCAP_SEL_COUNTER:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ test_hw_counter_id = start;
+ admin->cache.counter = test_hw_cache.counter;
+ admin->cache.sticky = test_hw_cache.sticky;
+ break;
+ case VCAP_SEL_ALL:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ break;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_write(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_selection sel, u32 start, u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: keydata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, keystr[idx]);
+ }
+ for (idx = 0; idx < count; ++idx) {
+ /* Invert the mask before encoding starts */
+ mskstr[idx] = ~mskstr[idx];
+ pr_debug("%s:%d: mskdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, mskstr[idx]);
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ actstr = &admin->cache.actionstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: actdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, actstr[idx]);
+ }
+ break;
+ case VCAP_SEL_COUNTER:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ test_hw_counter_id = start;
+ test_hw_cache.counter = admin->cache.counter;
+ test_hw_cache.sticky = admin->cache.sticky;
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_update(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ if (test_updateaddridx < ARRAY_SIZE(test_updateaddr))
+ test_updateaddr[test_updateaddridx] = addr;
+ else
+ pr_err("%s:%d: overflow: %d\n", __func__, __LINE__,
+ test_updateaddridx);
+ test_updateaddridx++;
+}
+
+static void test_cache_move(struct net_device *ndev, struct vcap_admin *admin,
+ u32 addr, int offset, int count)
+{
+ test_move_addr = addr;
+ test_move_offset = offset;
+ test_move_count = count;
+}
+
+/* Provide port information via a callback interface */
+static int vcap_test_port_info(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+
+static struct vcap_operations test_callbacks = {
+ .validate_keyset = test_val_keyset,
+ .add_default_fields = test_add_def_fields,
+ .cache_erase = test_cache_erase,
+ .cache_write = test_cache_write,
+ .cache_read = test_cache_read,
+ .init = test_cache_init,
+ .update = test_cache_update,
+ .move = test_cache_move,
+ .port_info = vcap_test_port_info,
+};
+
+static struct vcap_control test_vctrl = {
+ .vcaps = kunit_test_vcaps,
+ .stats = &kunit_test_vcap_stats,
+ .ops = &test_callbacks,
+};
+
+static void vcap_test_api_init(struct vcap_admin *admin)
+{
+ /* Initialize the shared objects */
+ INIT_LIST_HEAD(&test_vctrl.list);
+ INIT_LIST_HEAD(&admin->list);
+ INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+ mutex_init(&admin->lock);
+ list_add_tail(&admin->list, &test_vctrl.list);
+ memset(test_updateaddr, 0, sizeof(test_updateaddr));
+ test_updateaddridx = 0;
+ test_pr_bufferidx = 0;
+ test_pr_idx = 0;
+}
+
+/* callback used by the show_admin function */
+static __printf(2, 3)
+int test_prf(void *out, const char *fmt, ...)
+{
+ static char test_buffer[TEST_BUF_SZ];
+ va_list args;
+ int idx, cnt;
+
+ if (test_pr_bufferidx >= TEST_BUF_CNT) {
+ pr_err("%s:%d: overflow: %d\n", __func__, __LINE__,
+ test_pr_bufferidx);
+ return 0;
+ }
+
+ va_start(args, fmt);
+ cnt = vscnprintf(test_buffer, TEST_BUF_SZ, fmt, args);
+ va_end(args);
+
+ for (idx = 0; idx < cnt; ++idx) {
+ test_pr_buffer[test_pr_bufferidx][test_pr_idx] =
+ test_buffer[idx];
+ if (test_buffer[idx] == '\n') {
+ test_pr_buffer[test_pr_bufferidx][++test_pr_idx] = 0;
+ test_pr_idx = 0;
+ test_pr_bufferidx++;
+ } else {
+ ++test_pr_idx;
+ }
+ }
+
+ return cnt;
+}
+
+/* Define the test cases. */
+
+static void vcap_api_addr_keyset_test(struct kunit *test)
+{
+ u32 keydata[12] = {
+ 0x40450042, 0x000feaf3, 0x00000003, 0x00050600,
+ 0x10203040, 0x00075880, 0x633c6864, 0x00040003,
+ 0x00000020, 0x00000008, 0x00000240, 0x00000000,
+ };
+ u32 mskdata[12] = {
+ 0x0030ff80, 0xfff00000, 0xfffffffc, 0xfff000ff,
+ 0x00000000, 0xfff00000, 0x00000000, 0xfff3fffc,
+ 0xffffffc0, 0xffffffff, 0xfffffc03, 0xffffffff,
+ };
+ u32 actdata[12] = {};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ enum vcap_keyfield_set keysets[10];
+ struct vcap_keyset_list matches;
+ int ret, idx, addr;
+
+ vcap_test_api_init(&admin);
+
+ /* Go from higher to lower addresses searching for a keyset */
+ matches.keysets = keysets;
+ matches.cnt = 0;
+ matches.max = ARRAY_SIZE(keysets);
+ for (idx = ARRAY_SIZE(keydata) - 1, addr = 799; idx > 0;
+ --idx, --addr) {
+ admin.cache.keystream = &keydata[idx];
+ admin.cache.maskstream = &mskdata[idx];
+ ret = vcap_addr_keysets(&test_vctrl, &test_netdev, &admin,
+ addr, &matches);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+ }
+
+ /* Finally we hit the start of the rule */
+ admin.cache.keystream = &keydata[idx];
+ admin.cache.maskstream = &mskdata[idx];
+ matches.cnt = 0;
+ ret = vcap_addr_keysets(&test_vctrl, &test_netdev, &admin,
+ addr, &matches);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, matches.cnt, 1);
+ KUNIT_EXPECT_EQ(test, matches.keysets[0], VCAP_KFS_MAC_ETYPE);
+}
+
+static void vcap_api_show_admin_raw_test(struct kunit *test)
+{
+ u32 keydata[4] = {
+ 0x40450042, 0x000feaf3, 0x00000003, 0x00050600,
+ };
+ u32 mskdata[4] = {
+ 0x0030ff80, 0xfff00000, 0xfffffffc, 0xfff000ff,
+ };
+ u32 actdata[12] = {};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ .first_valid_addr = 786,
+ .last_valid_addr = 788,
+ };
+ struct vcap_rule_internal ri = {
+ .ndev = &test_netdev,
+ };
+ struct vcap_output_print out = {
+ .prf = (void *)test_prf,
+ };
+ const char *test_expected =
+ " addr: 786, X6 rule, keysets: VCAP_KFS_MAC_ETYPE\n";
+ int ret;
+
+ vcap_test_api_init(&admin);
+ list_add_tail(&ri.list, &admin.rules);
+
+ ret = vcap_show_admin_raw(&test_vctrl, &admin, &out);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_STREQ(test, test_expected, test_pr_buffer[0]);
+}
+
+static const char * const test_admin_info_expect[] = {
+ "name: is2\n",
+ "rows: 256\n",
+ "sw_count: 12\n",
+ "sw_width: 52\n",
+ "sticky_width: 1\n",
+ "act_width: 110\n",
+ "default_cnt: 73\n",
+ "require_cnt_dis: 0\n",
+ "version: 1\n",
+ "vtype: 4\n",
+ "vinst: 0\n",
+ "ingress: 1\n",
+ "first_cid: 10000\n",
+ "last_cid: 19999\n",
+ "lookups: 4\n",
+ "first_valid_addr: 0\n",
+ "last_valid_addr: 3071\n",
+ "last_used_addr: 794\n",
+};
+
+static void vcap_api_show_admin_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 794,
+ .ingress = true,
+ };
+ struct vcap_output_print out = {
+ .prf = (void *)test_prf,
+ };
+ int idx;
+
+ vcap_test_api_init(&admin);
+
+ vcap_show_admin_info(&test_vctrl, &admin, &out);
+ for (idx = 0; idx < test_pr_bufferidx; ++idx) {
+ /* pr_info("log[%02d]: %s", idx, test_pr_buffer[idx]); */
+ KUNIT_EXPECT_STREQ(test, test_admin_info_expect[idx],
+ test_pr_buffer[idx]);
+ }
+}
+
+static const char * const test_admin_expect[] = {
+ "name: is2\n",
+ "rows: 256\n",
+ "sw_count: 12\n",
+ "sw_width: 52\n",
+ "sticky_width: 1\n",
+ "act_width: 110\n",
+ "default_cnt: 73\n",
+ "require_cnt_dis: 0\n",
+ "version: 1\n",
+ "vtype: 4\n",
+ "vinst: 0\n",
+ "ingress: 1\n",
+ "first_cid: 8000000\n",
+ "last_cid: 8199999\n",
+ "lookups: 4\n",
+ "first_valid_addr: 0\n",
+ "last_valid_addr: 3071\n",
+ "last_used_addr: 794\n",
+ "\n",
+ "rule: 100, addr: [794,799], X6, ctr[0]: 0, hit: 0\n",
+ " chain_id: 0\n",
+ " user: 0\n",
+ " priority: 0\n",
+ " state: permanent\n",
+ " keysets: VCAP_KFS_MAC_ETYPE\n",
+ " keyset_sw: 6\n",
+ " keyset_sw_regs: 2\n",
+ " ETYPE_LEN_IS: W1: 1/1\n",
+ " IF_IGR_PORT_MASK: W32: 0xffabcd01/0xffffffff\n",
+ " IF_IGR_PORT_MASK_RNG: W4: 5/15\n",
+ " L2_DMAC: W48: 01:02:03:04:05:06/ff:ff:ff:ff:ff:ff\n",
+ " L2_PAYLOAD_ETYPE: W64: 0x9000002000000081/0xff000000000000ff\n",
+ " L2_SMAC: W48: b1:9e:34:32:75:88/ff:ff:ff:ff:ff:ff\n",
+ " LOOKUP_FIRST_IS: W1: 1/1\n",
+ " TYPE: W4: 0/15\n",
+ " actionset: VCAP_AFS_BASE_TYPE\n",
+ " actionset_sw: 3\n",
+ " actionset_sw_regs: 4\n",
+ " CNT_ID: W12: 100\n",
+ " MATCH_ID: W16: 1\n",
+ " MATCH_ID_MASK: W16: 1\n",
+ " POLICE_ENA: W1: 1\n",
+ " PORT_MASK: W68: 0x0514670115f3324589\n",
+};
+
+static void vcap_api_show_admin_rule_test(struct kunit *test)
+{
+ u32 keydata[] = {
+ 0x40450042, 0x000feaf3, 0x00000003, 0x00050600,
+ 0x10203040, 0x00075880, 0x633c6864, 0x00040003,
+ 0x00000020, 0x00000008, 0x00000240, 0x00000000,
+ };
+ u32 mskdata[] = {
+ 0x0030ff80, 0xfff00000, 0xfffffffc, 0xfff000ff,
+ 0x00000000, 0xfff00000, 0x00000000, 0xfff3fffc,
+ 0xffffffc0, 0xffffffff, 0xfffffc03, 0xffffffff,
+ };
+ u32 actdata[] = {
+ 0x00040002, 0xf3324589, 0x14670115, 0x00000005,
+ 0x00000000, 0x00100000, 0x06400010, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ };
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .first_cid = 8000000,
+ .last_cid = 8199999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 794,
+ .ingress = true,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .data = {
+ .id = 100,
+ .keyset = VCAP_KFS_MAC_ETYPE,
+ .actionset = VCAP_AFS_BASE_TYPE,
+ },
+ .size = 6,
+ .keyset_sw = 6,
+ .keyset_sw_regs = 2,
+ .actionset_sw = 3,
+ .actionset_sw_regs = 4,
+ .addr = 794,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_output_print out = {
+ .prf = (void *)test_prf,
+ };
+ int ret, idx;
+
+ vcap_test_api_init(&admin);
+ list_add_tail(&ri.list, &admin.rules);
+
+ ret = vcap_show_admin(&test_vctrl, &admin, &out);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ for (idx = 0; idx < test_pr_bufferidx; ++idx) {
+ /* pr_info("log[%02d]: %s", idx, test_pr_buffer[idx]); */
+ KUNIT_EXPECT_STREQ(test, test_admin_expect[idx],
+ test_pr_buffer[idx]);
+ }
+}
+
+static struct kunit_case vcap_api_debugfs_test_cases[] = {
+ KUNIT_CASE(vcap_api_addr_keyset_test),
+ KUNIT_CASE(vcap_api_show_admin_raw_test),
+ KUNIT_CASE(vcap_api_show_admin_test),
+ KUNIT_CASE(vcap_api_show_admin_rule_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_debugfs_test_suite = {
+ .name = "VCAP_API_DebugFS_Testsuite",
+ .test_cases = vcap_api_debugfs_test_cases,
+};
+
+kunit_test_suite(vcap_api_debugfs_test_suite);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
new file mode 100644
index 0000000000..fe4e166de8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -0,0 +1,2357 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API kunit test suite
+ */
+
+#include <kunit/test.h>
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "vcap_model_kunit.h"
+
+/* First we have the test infrastructure that emulates the platform
+ * implementation
+ */
+#define TEST_BUF_CNT 100
+#define TEST_BUF_SZ 350
+#define STREAMWSIZE 64
+
+static u32 test_updateaddr[STREAMWSIZE] = {};
+static int test_updateaddridx;
+static int test_cache_erase_count;
+static u32 test_init_start;
+static u32 test_init_count;
+static u32 test_hw_counter_id;
+static struct vcap_cache_data test_hw_cache;
+static struct net_device test_netdev = {};
+static int test_move_addr;
+static int test_move_offset;
+static int test_move_count;
+
+/* Callback used by the VCAP API */
+static enum vcap_keyfield_set test_val_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto)
+{
+ int idx;
+
+ if (kslist->cnt > 0) {
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ for (idx = 0; idx < kslist->cnt; idx++) {
+ if (kslist->keysets[idx] == VCAP_KFS_ETAG)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_PURE_5TUPLE_IP4)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_NORMAL_5TUPLE_IP4)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_NORMAL_7TUPLE)
+ return kslist->keysets[idx];
+ }
+ break;
+ case VCAP_TYPE_IS2:
+ for (idx = 0; idx < kslist->cnt; idx++) {
+ if (kslist->keysets[idx] == VCAP_KFS_MAC_ETYPE)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_ARP)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_IP_7TUPLE)
+ return kslist->keysets[idx];
+ }
+ break;
+ default:
+ pr_info("%s:%d: no validation for VCAP %d\n",
+ __func__, __LINE__, admin->vtype);
+ break;
+ }
+ }
+ return -EINVAL;
+}
+
+/* Callback used by the VCAP API */
+static void test_add_def_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ if (admin->vinst == 0 || admin->vinst == 2)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0);
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_erase(struct vcap_admin *admin)
+{
+ if (test_cache_erase_count) {
+ memset(admin->cache.keystream, 0, test_cache_erase_count);
+ memset(admin->cache.maskstream, 0, test_cache_erase_count);
+ memset(admin->cache.actionstream, 0, test_cache_erase_count);
+ test_cache_erase_count = 0;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_init(struct net_device *ndev, struct vcap_admin *admin,
+ u32 start, u32 count)
+{
+ test_init_start = start;
+ test_init_count = count;
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_read(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_selection sel, u32 start, u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ pr_debug("%s:%d: %d %d\n", __func__, __LINE__, start, count);
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: keydata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, keystr[idx]);
+ }
+ for (idx = 0; idx < count; ++idx) {
+ /* Invert the mask before decoding starts */
+ mskstr[idx] = ~mskstr[idx];
+ pr_debug("%s:%d: mskdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, mskstr[idx]);
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ actstr = &admin->cache.actionstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: actdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, actstr[idx]);
+ }
+ break;
+ case VCAP_SEL_COUNTER:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ test_hw_counter_id = start;
+ admin->cache.counter = test_hw_cache.counter;
+ admin->cache.sticky = test_hw_cache.sticky;
+ break;
+ case VCAP_SEL_ALL:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ break;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_write(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_selection sel, u32 start, u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: keydata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, keystr[idx]);
+ }
+ for (idx = 0; idx < count; ++idx) {
+ /* Invert the mask before encoding starts */
+ mskstr[idx] = ~mskstr[idx];
+ pr_debug("%s:%d: mskdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, mskstr[idx]);
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ actstr = &admin->cache.actionstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: actdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, actstr[idx]);
+ }
+ break;
+ case VCAP_SEL_COUNTER:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ test_hw_counter_id = start;
+ test_hw_cache.counter = admin->cache.counter;
+ test_hw_cache.sticky = admin->cache.sticky;
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_update(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ if (test_updateaddridx < ARRAY_SIZE(test_updateaddr))
+ test_updateaddr[test_updateaddridx] = addr;
+ else
+ pr_err("%s:%d: overflow: %d\n", __func__, __LINE__, test_updateaddridx);
+ test_updateaddridx++;
+}
+
+static void test_cache_move(struct net_device *ndev, struct vcap_admin *admin,
+ u32 addr, int offset, int count)
+{
+ test_move_addr = addr;
+ test_move_offset = offset;
+ test_move_count = count;
+}
+
+/* Provide port information via a callback interface */
+static int vcap_test_port_info(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+
+static struct vcap_operations test_callbacks = {
+ .validate_keyset = test_val_keyset,
+ .add_default_fields = test_add_def_fields,
+ .cache_erase = test_cache_erase,
+ .cache_write = test_cache_write,
+ .cache_read = test_cache_read,
+ .init = test_cache_init,
+ .update = test_cache_update,
+ .move = test_cache_move,
+ .port_info = vcap_test_port_info,
+};
+
+static struct vcap_control test_vctrl = {
+ .vcaps = kunit_test_vcaps,
+ .stats = &kunit_test_vcap_stats,
+ .ops = &test_callbacks,
+};
+
+static void vcap_test_api_init(struct vcap_admin *admin)
+{
+ /* Initialize the shared objects */
+ INIT_LIST_HEAD(&test_vctrl.list);
+ INIT_LIST_HEAD(&admin->list);
+ INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+ mutex_init(&admin->lock);
+ list_add_tail(&admin->list, &test_vctrl.list);
+ memset(test_updateaddr, 0, sizeof(test_updateaddr));
+ test_updateaddridx = 0;
+}
+
+/* Helper function to create a rule of a specific size */
+static void test_vcap_xn_rule_creator(struct kunit *test, int cid,
+ enum vcap_user user, u16 priority,
+ int id, int size, int expected_addr)
+{
+ struct vcap_rule *rule;
+ struct vcap_rule_internal *ri;
+ enum vcap_keyfield_set keyset = VCAP_KFS_NO_VALUE;
+ enum vcap_actionfield_set actionset = VCAP_AFS_NO_VALUE;
+ int ret;
+
+ /* init before testing */
+ memset(test_updateaddr, 0, sizeof(test_updateaddr));
+ test_updateaddridx = 0;
+ test_move_addr = 0;
+ test_move_offset = 0;
+ test_move_count = 0;
+
+ switch (size) {
+ case 2:
+ keyset = VCAP_KFS_ETAG;
+ actionset = VCAP_AFS_CLASS_REDUCED;
+ break;
+ case 3:
+ keyset = VCAP_KFS_PURE_5TUPLE_IP4;
+ actionset = VCAP_AFS_CLASSIFICATION;
+ break;
+ case 6:
+ keyset = VCAP_KFS_NORMAL_5TUPLE_IP4;
+ actionset = VCAP_AFS_CLASSIFICATION;
+ break;
+ case 12:
+ keyset = VCAP_KFS_NORMAL_7TUPLE;
+ actionset = VCAP_AFS_FULL;
+ break;
+ default:
+ break;
+ }
+
+ /* Check that a valid size was used */
+ KUNIT_ASSERT_NE(test, VCAP_KFS_NO_VALUE, keyset);
+
+ /* Allocate the rule */
+ rule = vcap_alloc_rule(&test_vctrl, &test_netdev, cid, user, priority,
+ id);
+ KUNIT_EXPECT_PTR_NE(test, NULL, rule);
+
+ ri = (struct vcap_rule_internal *)rule;
+
+ /* Override rule keyset */
+ ret = vcap_set_rule_set_keyset(rule, keyset);
+
+ /* Add rule actions : there must be at least one action */
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_ISDX_VAL, 0);
+
+ /* Override rule actionset */
+ ret = vcap_set_rule_set_actionset(rule, actionset);
+
+ ret = vcap_val_rule(rule, ETH_P_ALL);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, keyset, rule->keyset);
+ KUNIT_EXPECT_EQ(test, actionset, rule->actionset);
+ KUNIT_EXPECT_EQ(test, size, ri->size);
+
+ /* Add rule with write callback */
+ ret = vcap_add_rule(rule);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, expected_addr, ri->addr);
+ vcap_free_rule(rule);
+}
+
+/* Prepare testing rule deletion */
+static void test_init_rule_deletion(void)
+{
+ test_move_addr = 0;
+ test_move_offset = 0;
+ test_move_count = 0;
+ test_init_start = 0;
+ test_init_count = 0;
+}
+
+/* Define the test cases. */
+
+static void vcap_api_set_bit_1_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter = {
+ .offset = 35,
+ .sw_width = 52,
+ .reg_idx = 1,
+ .reg_bitpos = 20,
+ .tg = NULL,
+ };
+ u32 stream[2] = {0};
+
+ vcap_set_bit(stream, &iter, 1);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)BIT(20), stream[1]);
+}
+
+static void vcap_api_set_bit_0_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter = {
+ .offset = 35,
+ .sw_width = 52,
+ .reg_idx = 2,
+ .reg_bitpos = 11,
+ .tg = NULL,
+ };
+ u32 stream[3] = {~0, ~0, ~0};
+
+ vcap_set_bit(stream, &iter, 0);
+
+ KUNIT_EXPECT_EQ(test, (u32)~0, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)~0, stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)~BIT(11), stream[2]);
+}
+
+static void vcap_api_iterator_init_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter;
+ struct vcap_typegroup typegroups[] = {
+ { .offset = 0, .width = 2, .value = 2, },
+ { .offset = 156, .width = 1, .value = 0, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+ struct vcap_typegroup typegroups2[] = {
+ { .offset = 0, .width = 3, .value = 4, },
+ { .offset = 49, .width = 2, .value = 0, },
+ { .offset = 98, .width = 2, .value = 0, },
+ };
+
+ vcap_iter_init(&iter, 52, typegroups, 86);
+
+ KUNIT_EXPECT_EQ(test, 52, iter.sw_width);
+ KUNIT_EXPECT_EQ(test, 86 + 2, iter.offset);
+ KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 4, iter.reg_bitpos);
+
+ vcap_iter_init(&iter, 49, typegroups2, 134);
+
+ KUNIT_EXPECT_EQ(test, 49, iter.sw_width);
+ KUNIT_EXPECT_EQ(test, 134 + 7, iter.offset);
+ KUNIT_EXPECT_EQ(test, 5, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 11, iter.reg_bitpos);
+}
+
+static void vcap_api_iterator_next_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter;
+ struct vcap_typegroup typegroups[] = {
+ { .offset = 0, .width = 4, .value = 8, },
+ { .offset = 49, .width = 1, .value = 0, },
+ { .offset = 98, .width = 2, .value = 0, },
+ { .offset = 147, .width = 3, .value = 0, },
+ { .offset = 196, .width = 2, .value = 0, },
+ { .offset = 245, .width = 1, .value = 0, },
+ };
+ int idx;
+
+ vcap_iter_init(&iter, 49, typegroups, 86);
+
+ KUNIT_EXPECT_EQ(test, 49, iter.sw_width);
+ KUNIT_EXPECT_EQ(test, 86 + 5, iter.offset);
+ KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 10, iter.reg_bitpos);
+
+ vcap_iter_next(&iter);
+
+ KUNIT_EXPECT_EQ(test, 91 + 1, iter.offset);
+ KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 11, iter.reg_bitpos);
+
+ for (idx = 0; idx < 6; idx++)
+ vcap_iter_next(&iter);
+
+ KUNIT_EXPECT_EQ(test, 92 + 6 + 2, iter.offset);
+ KUNIT_EXPECT_EQ(test, 4, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 2, iter.reg_bitpos);
+}
+
+static void vcap_api_encode_typegroups_test(struct kunit *test)
+{
+ u32 stream[12] = {0};
+ struct vcap_typegroup typegroups[] = {
+ { .offset = 0, .width = 4, .value = 8, },
+ { .offset = 49, .width = 1, .value = 1, },
+ { .offset = 98, .width = 2, .value = 3, },
+ { .offset = 147, .width = 3, .value = 5, },
+ { .offset = 196, .width = 2, .value = 2, },
+ { .offset = 245, .width = 5, .value = 27, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+
+ vcap_encode_typegroups(stream, 49, typegroups, false);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x8, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x1, stream[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x3, stream[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x5, stream[6]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[7]);
+ KUNIT_EXPECT_EQ(test, (u32)0x2, stream[8]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[9]);
+ KUNIT_EXPECT_EQ(test, (u32)27, stream[10]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[11]);
+}
+
+static void vcap_api_encode_bit_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter;
+ u32 stream[4] = {0};
+ struct vcap_typegroup typegroups[] = {
+ { .offset = 0, .width = 4, .value = 8, },
+ { .offset = 49, .width = 1, .value = 1, },
+ { .offset = 98, .width = 2, .value = 3, },
+ { .offset = 147, .width = 3, .value = 5, },
+ { .offset = 196, .width = 2, .value = 2, },
+ { .offset = 245, .width = 1, .value = 0, },
+ };
+
+ vcap_iter_init(&iter, 49, typegroups, 44);
+
+ KUNIT_EXPECT_EQ(test, 48, iter.offset);
+ KUNIT_EXPECT_EQ(test, 1, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 16, iter.reg_bitpos);
+
+ vcap_encode_bit(stream, &iter, 1);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)BIT(16), stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[2]);
+}
+
+static void vcap_api_encode_field_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter;
+ u32 stream[16] = {0};
+ struct vcap_typegroup typegroups[] = {
+ { .offset = 0, .width = 4, .value = 8, },
+ { .offset = 49, .width = 1, .value = 1, },
+ { .offset = 98, .width = 2, .value = 3, },
+ { .offset = 147, .width = 3, .value = 5, },
+ { .offset = 196, .width = 2, .value = 2, },
+ { .offset = 245, .width = 5, .value = 27, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+ struct vcap_field rf = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 4,
+ };
+ u8 value[] = {0x5};
+
+ vcap_iter_init(&iter, 49, typegroups, rf.offset);
+
+ KUNIT_EXPECT_EQ(test, 91, iter.offset);
+ KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 10, iter.reg_bitpos);
+
+ vcap_encode_field(stream, &iter, rf.width, value);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[2]);
+ KUNIT_EXPECT_EQ(test, (u32)(0x5 << 10), stream[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[4]);
+
+ vcap_encode_typegroups(stream, 49, typegroups, false);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x8, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x1, stream[2]);
+ KUNIT_EXPECT_EQ(test, (u32)(0x5 << 10), stream[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x3, stream[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x5, stream[6]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[7]);
+ KUNIT_EXPECT_EQ(test, (u32)0x2, stream[8]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[9]);
+ KUNIT_EXPECT_EQ(test, (u32)27, stream[10]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[11]);
+}
+
+/* In this testcase the subword is smaller than a register */
+static void vcap_api_encode_short_field_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter;
+ int sw_width = 21;
+ u32 stream[6] = {0};
+ struct vcap_typegroup tgt[] = {
+ { .offset = 0, .width = 3, .value = 7, },
+ { .offset = 21, .width = 2, .value = 3, },
+ { .offset = 42, .width = 1, .value = 1, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+ struct vcap_field rf = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 4,
+ };
+ u8 value[] = {0x5};
+
+ vcap_iter_init(&iter, sw_width, tgt, rf.offset);
+
+ KUNIT_EXPECT_EQ(test, 1, iter.regs_per_sw);
+ KUNIT_EXPECT_EQ(test, 21, iter.sw_width);
+ KUNIT_EXPECT_EQ(test, 25 + 3 + 2, iter.offset);
+ KUNIT_EXPECT_EQ(test, 1, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 25 + 3 + 2 - sw_width, iter.reg_bitpos);
+
+ vcap_encode_field(stream, &iter, rf.width, value);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)(0x5 << (25 + 3 + 2 - sw_width)), stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[5]);
+
+ vcap_encode_typegroups(stream, sw_width, tgt, false);
+
+ KUNIT_EXPECT_EQ(test, (u32)7, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)((0x5 << (25 + 3 + 2 - sw_width)) + 3), stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)1, stream[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0, stream[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0, stream[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0, stream[5]);
+}
+
+static void vcap_api_encode_keyfield_test(struct kunit *test)
+{
+ u32 keywords[16] = {0};
+ u32 maskwords[16] = {0};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .cache = {
+ .keystream = keywords,
+ .maskstream = maskwords,
+ .actionstream = keywords,
+ },
+ };
+ struct vcap_rule_internal rule = {
+ .admin = &admin,
+ .data = {
+ .keyset = VCAP_KFS_MAC_ETYPE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf = {
+ .ctrl.list = {},
+ .ctrl.key = VCAP_KF_ISDX_CLS,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0xeef014a1,
+ .data.u32.mask = 0xfff,
+ };
+ struct vcap_field rf = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ };
+ struct vcap_typegroup tgt[] = {
+ { .offset = 0, .width = 2, .value = 2, },
+ { .offset = 156, .width = 1, .value = 1, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+
+ vcap_test_api_init(&admin);
+ vcap_encode_keyfield(&rule, &ckf, &rf, tgt);
+
+ /* Key */
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)(0x04a1 << 6), keywords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[6]);
+
+ /* Mask */
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)(0x0fff << 6), maskwords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[6]);
+}
+
+static void vcap_api_encode_max_keyfield_test(struct kunit *test)
+{
+ int idx;
+ u32 keywords[6] = {0};
+ u32 maskwords[6] = {0};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ /* IS2 sw_width = 52 bit */
+ .cache = {
+ .keystream = keywords,
+ .maskstream = maskwords,
+ .actionstream = keywords,
+ },
+ };
+ struct vcap_rule_internal rule = {
+ .admin = &admin,
+ .data = {
+ .keyset = VCAP_KFS_IP_7TUPLE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf = {
+ .ctrl.list = {},
+ .ctrl.key = VCAP_KF_L3_IP6_DIP,
+ .ctrl.type = VCAP_FIELD_U128,
+ .data.u128.value = { 0xa1, 0xa2, 0xa3, 0xa4, 0, 0, 0x43, 0,
+ 0, 0, 0, 0, 0, 0, 0x78, 0x8e, },
+ .data.u128.mask = { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0,
+ 0, 0, 0, 0, 0, 0, 0xff, 0xff },
+ };
+ struct vcap_field rf = {
+ .type = VCAP_FIELD_U128,
+ .offset = 0,
+ .width = 128,
+ };
+ struct vcap_typegroup tgt[] = {
+ { .offset = 0, .width = 2, .value = 2, },
+ { .offset = 156, .width = 1, .value = 1, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+ u32 keyres[] = {
+ 0x928e8a84,
+ 0x000c0002,
+ 0x00000010,
+ 0x00000000,
+ 0x0239e000,
+ 0x00000000,
+ };
+ u32 mskres[] = {
+ 0xfffffffc,
+ 0x000c0003,
+ 0x0000003f,
+ 0x00000000,
+ 0x03fffc00,
+ 0x00000000,
+ };
+
+ vcap_encode_keyfield(&rule, &ckf, &rf, tgt);
+
+ /* Key */
+ for (idx = 0; idx < ARRAY_SIZE(keyres); ++idx)
+ KUNIT_EXPECT_EQ(test, keyres[idx], keywords[idx]);
+ /* Mask */
+ for (idx = 0; idx < ARRAY_SIZE(mskres); ++idx)
+ KUNIT_EXPECT_EQ(test, mskres[idx], maskwords[idx]);
+}
+
+static void vcap_api_encode_actionfield_test(struct kunit *test)
+{
+ u32 actwords[16] = {0};
+ int sw_width = 21;
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_ES2, /* act_width = 21 */
+ .cache = {
+ .actionstream = actwords,
+ },
+ };
+ struct vcap_rule_internal rule = {
+ .admin = &admin,
+ .data = {
+ .actionset = VCAP_AFS_BASE_TYPE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_actionfield caf = {
+ .ctrl.list = {},
+ .ctrl.action = VCAP_AF_POLICE_IDX,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x67908032,
+ };
+ struct vcap_field rf = {
+ .type = VCAP_FIELD_U32,
+ .offset = 35,
+ .width = 6,
+ };
+ struct vcap_typegroup tgt[] = {
+ { .offset = 0, .width = 2, .value = 2, },
+ { .offset = 21, .width = 1, .value = 1, },
+ { .offset = 42, .width = 1, .value = 0, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+
+ vcap_encode_actionfield(&rule, &caf, &rf, tgt);
+
+ /* Action */
+ KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)((0x32 << (35 + 2 + 1 - sw_width)) & 0x1fffff), actwords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)((0x32 >> ((2 * sw_width) - 38 - 1))), actwords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[6]);
+}
+
+static void vcap_api_keyfield_typegroup_test(struct kunit *test)
+{
+ const struct vcap_typegroup *tg;
+
+ tg = vcap_keyfield_typegroup(&test_vctrl, VCAP_TYPE_IS2, VCAP_KFS_MAC_ETYPE);
+ KUNIT_EXPECT_PTR_NE(test, NULL, tg);
+ KUNIT_EXPECT_EQ(test, 0, tg[0].offset);
+ KUNIT_EXPECT_EQ(test, 2, tg[0].width);
+ KUNIT_EXPECT_EQ(test, 2, tg[0].value);
+ KUNIT_EXPECT_EQ(test, 156, tg[1].offset);
+ KUNIT_EXPECT_EQ(test, 1, tg[1].width);
+ KUNIT_EXPECT_EQ(test, 0, tg[1].value);
+ KUNIT_EXPECT_EQ(test, 0, tg[2].offset);
+ KUNIT_EXPECT_EQ(test, 0, tg[2].width);
+ KUNIT_EXPECT_EQ(test, 0, tg[2].value);
+
+ tg = vcap_keyfield_typegroup(&test_vctrl, VCAP_TYPE_ES2, VCAP_KFS_LL_FULL);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, tg);
+}
+
+static void vcap_api_actionfield_typegroup_test(struct kunit *test)
+{
+ const struct vcap_typegroup *tg;
+
+ tg = vcap_actionfield_typegroup(&test_vctrl, VCAP_TYPE_IS0, VCAP_AFS_FULL);
+ KUNIT_EXPECT_PTR_NE(test, NULL, tg);
+ KUNIT_EXPECT_EQ(test, 0, tg[0].offset);
+ KUNIT_EXPECT_EQ(test, 3, tg[0].width);
+ KUNIT_EXPECT_EQ(test, 4, tg[0].value);
+ KUNIT_EXPECT_EQ(test, 110, tg[1].offset);
+ KUNIT_EXPECT_EQ(test, 2, tg[1].width);
+ KUNIT_EXPECT_EQ(test, 0, tg[1].value);
+ KUNIT_EXPECT_EQ(test, 220, tg[2].offset);
+ KUNIT_EXPECT_EQ(test, 2, tg[2].width);
+ KUNIT_EXPECT_EQ(test, 0, tg[2].value);
+ KUNIT_EXPECT_EQ(test, 0, tg[3].offset);
+ KUNIT_EXPECT_EQ(test, 0, tg[3].width);
+ KUNIT_EXPECT_EQ(test, 0, tg[3].value);
+
+ tg = vcap_actionfield_typegroup(&test_vctrl, VCAP_TYPE_IS2, VCAP_AFS_CLASSIFICATION);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, tg);
+}
+
+static void vcap_api_vcap_keyfields_test(struct kunit *test)
+{
+ const struct vcap_field *ft;
+
+ ft = vcap_keyfields(&test_vctrl, VCAP_TYPE_IS2, VCAP_KFS_MAC_ETYPE);
+ KUNIT_EXPECT_PTR_NE(test, NULL, ft);
+
+ /* Keyset that is not available and within the maximum keyset enum value */
+ ft = vcap_keyfields(&test_vctrl, VCAP_TYPE_ES2, VCAP_KFS_PURE_5TUPLE_IP4);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ft);
+
+ /* Keyset that is not available and beyond the maximum keyset enum value */
+ ft = vcap_keyfields(&test_vctrl, VCAP_TYPE_ES2, VCAP_KFS_LL_FULL);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ft);
+}
+
+static void vcap_api_vcap_actionfields_test(struct kunit *test)
+{
+ const struct vcap_field *ft;
+
+ ft = vcap_actionfields(&test_vctrl, VCAP_TYPE_IS0, VCAP_AFS_FULL);
+ KUNIT_EXPECT_PTR_NE(test, NULL, ft);
+
+ ft = vcap_actionfields(&test_vctrl, VCAP_TYPE_IS2, VCAP_AFS_FULL);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ft);
+
+ ft = vcap_actionfields(&test_vctrl, VCAP_TYPE_IS2, VCAP_AFS_CLASSIFICATION);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ft);
+}
+
+static void vcap_api_encode_rule_keyset_test(struct kunit *test)
+{
+ u32 keywords[16] = {0};
+ u32 maskwords[16] = {0};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .cache = {
+ .keystream = keywords,
+ .maskstream = maskwords,
+ },
+ };
+ struct vcap_rule_internal rule = {
+ .admin = &admin,
+ .data = {
+ .keyset = VCAP_KFS_MAC_ETYPE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf[] = {
+ {
+ .ctrl.key = VCAP_KF_TYPE,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x00,
+ .data.u32.mask = 0x0f,
+ },
+ {
+ .ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
+ .ctrl.type = VCAP_FIELD_BIT,
+ .data.u1.value = 0x01,
+ .data.u1.mask = 0x01,
+ },
+ {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_L3,
+ .ctrl.type = VCAP_FIELD_BIT,
+ .data.u1.value = 0x00,
+ .data.u1.mask = 0x01,
+ },
+ {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x00,
+ .data.u32.mask = 0x0f,
+ },
+ {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK,
+ .ctrl.type = VCAP_FIELD_U72,
+ .data.u72.value = {0x0, 0x00, 0x00, 0x00},
+ .data.u72.mask = {0xfd, 0xff, 0xff, 0xff},
+ },
+ {
+ .ctrl.key = VCAP_KF_L2_DMAC,
+ .ctrl.type = VCAP_FIELD_U48,
+ /* Opposite endianness */
+ .data.u48.value = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06},
+ .data.u48.mask = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ },
+ {
+ .ctrl.key = VCAP_KF_ETYPE_LEN_IS,
+ .ctrl.type = VCAP_FIELD_BIT,
+ .data.u1.value = 0x01,
+ .data.u1.mask = 0x01,
+ },
+ {
+ .ctrl.key = VCAP_KF_ETYPE,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0xaabb,
+ .data.u32.mask = 0xffff,
+ },
+ };
+ int idx;
+ int ret;
+
+ /* Empty entry list */
+ INIT_LIST_HEAD(&rule.data.keyfields);
+ ret = vcap_encode_rule_keyset(&rule);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+
+ for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
+ list_add_tail(&ckf[idx].ctrl.list, &rule.data.keyfields);
+ ret = vcap_encode_rule_keyset(&rule);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* The key and mask values below are from an actual Sparx5 rule config */
+ /* Key */
+ KUNIT_EXPECT_EQ(test, (u32)0x00000042, keywords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00020100, keywords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x60504030, keywords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[6]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0002aaee, keywords[7]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[8]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[9]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[10]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[11]);
+
+ /* Mask: they will be inverted when applied to the register */
+ KUNIT_EXPECT_EQ(test, (u32)~0x00b07f80, maskwords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfff00000, maskwords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfffffffc, maskwords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfff000ff, maskwords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)~0x00000000, maskwords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfffffff0, maskwords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfffffffe, maskwords[6]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfffc0001, maskwords[7]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[8]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[9]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[10]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[11]);
+}
+
+static void vcap_api_encode_rule_actionset_test(struct kunit *test)
+{
+ u32 actwords[16] = {0};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .cache = {
+ .actionstream = actwords,
+ },
+ };
+ struct vcap_rule_internal rule = {
+ .admin = &admin,
+ .data = {
+ .actionset = VCAP_AFS_BASE_TYPE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_actionfield caf[] = {
+ {
+ .ctrl.action = VCAP_AF_MATCH_ID,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x01,
+ },
+ {
+ .ctrl.action = VCAP_AF_MATCH_ID_MASK,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x01,
+ },
+ {
+ .ctrl.action = VCAP_AF_CNT_ID,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x64,
+ },
+ };
+ int idx;
+ int ret;
+
+ /* Empty entry list */
+ INIT_LIST_HEAD(&rule.data.actionfields);
+ ret = vcap_encode_rule_actionset(&rule);
+ /* We allow rules with no actions */
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ for (idx = 0; idx < ARRAY_SIZE(caf); idx++)
+ list_add_tail(&caf[idx].ctrl.list, &rule.data.actionfields);
+ ret = vcap_encode_rule_actionset(&rule);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* The action values below are from an actual Sparx5 rule config */
+ KUNIT_EXPECT_EQ(test, (u32)0x00000002, actwords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00100000, actwords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x06400010, actwords[6]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[7]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[8]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[9]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[10]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[11]);
+}
+
+static void vcap_free_ckf(struct vcap_rule *rule)
+{
+ struct vcap_client_keyfield *ckf, *next_ckf;
+
+ list_for_each_entry_safe(ckf, next_ckf, &rule->keyfields, ctrl.list) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ }
+}
+
+static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .data = {
+ .keyset = VCAP_KFS_NO_VALUE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_rule *rule = (struct vcap_rule *)&ri;
+ struct vcap_client_keyfield *kf;
+ int ret;
+ struct vcap_u128_key dip = {
+ .value = {0x17, 0x26, 0x35, 0x44, 0x63, 0x62, 0x71},
+ .mask = {0xf1, 0xf2, 0xf3, 0xf4, 0x4f, 0x3f, 0x2f, 0x1f},
+ };
+ int idx;
+
+ INIT_LIST_HEAD(&rule->keyfields);
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
+ ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_KF_LOOKUP_FIRST_IS, kf->ctrl.key);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
+ KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
+ vcap_free_ckf(rule);
+
+ INIT_LIST_HEAD(&rule->keyfields);
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
+ ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_KF_LOOKUP_FIRST_IS, kf->ctrl.key);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.value);
+ KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
+ vcap_free_ckf(rule);
+
+ INIT_LIST_HEAD(&rule->keyfields);
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_ANY);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
+ ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_KF_LOOKUP_FIRST_IS, kf->ctrl.key);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
+ KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.mask);
+ vcap_free_ckf(rule);
+
+ INIT_LIST_HEAD(&rule->keyfields);
+ ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE, 0x98765432, 0xff00ffab);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
+ ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_KF_TYPE, kf->ctrl.key);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, kf->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x98765432, kf->data.u32.value);
+ KUNIT_EXPECT_EQ(test, 0xff00ffab, kf->data.u32.mask);
+ vcap_free_ckf(rule);
+
+ INIT_LIST_HEAD(&rule->keyfields);
+ ret = vcap_rule_add_key_u128(rule, VCAP_KF_L3_IP6_SIP, &dip);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
+ ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_KF_L3_IP6_SIP, kf->ctrl.key);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_U128, kf->ctrl.type);
+ for (idx = 0; idx < ARRAY_SIZE(dip.value); ++idx)
+ KUNIT_EXPECT_EQ(test, dip.value[idx], kf->data.u128.value[idx]);
+ for (idx = 0; idx < ARRAY_SIZE(dip.mask); ++idx)
+ KUNIT_EXPECT_EQ(test, dip.mask[idx], kf->data.u128.mask[idx]);
+ vcap_free_ckf(rule);
+}
+
+static void vcap_free_caf(struct vcap_rule *rule)
+{
+ struct vcap_client_actionfield *caf, *next_caf;
+
+ list_for_each_entry_safe(caf, next_caf,
+ &rule->actionfields, ctrl.list) {
+ list_del(&caf->ctrl.list);
+ kfree(caf);
+ }
+}
+
+static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .data = {
+ .actionset = VCAP_AFS_NO_VALUE,
+ },
+ };
+ struct vcap_rule *rule = (struct vcap_rule *)&ri;
+ struct vcap_client_actionfield *af;
+ int ret;
+
+ INIT_LIST_HEAD(&rule->actionfields);
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_0);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ af = list_first_entry(&rule->actionfields,
+ struct vcap_client_actionfield, ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
+ vcap_free_caf(rule);
+
+ INIT_LIST_HEAD(&rule->actionfields);
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ af = list_first_entry(&rule->actionfields,
+ struct vcap_client_actionfield, ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x1, af->data.u1.value);
+ vcap_free_caf(rule);
+
+ INIT_LIST_HEAD(&rule->actionfields);
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_ANY);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ af = list_first_entry(&rule->actionfields,
+ struct vcap_client_actionfield, ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
+ vcap_free_caf(rule);
+
+ INIT_LIST_HEAD(&rule->actionfields);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE, 0x98765432);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ af = list_first_entry(&rule->actionfields,
+ struct vcap_client_actionfield, ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_AF_TYPE, af->ctrl.action);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x98765432, af->data.u32.value);
+ vcap_free_caf(rule);
+
+ INIT_LIST_HEAD(&rule->actionfields);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_MASK_MODE, 0xaabbccdd);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ af = list_first_entry(&rule->actionfields,
+ struct vcap_client_actionfield, ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_AF_MASK_MODE, af->ctrl.action);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0xaabbccdd, af->data.u32.value);
+ vcap_free_caf(rule);
+}
+
+static void vcap_api_rule_find_keyset_basic_test(struct kunit *test)
+{
+ struct vcap_keyset_list matches = {};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf[] = {
+ {
+ .ctrl.key = VCAP_KF_TYPE,
+ }, {
+ .ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
+ }, {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_L3,
+ }, {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ }, {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK,
+ }, {
+ .ctrl.key = VCAP_KF_L2_DMAC,
+ }, {
+ .ctrl.key = VCAP_KF_ETYPE_LEN_IS,
+ }, {
+ .ctrl.key = VCAP_KF_ETYPE,
+ },
+ };
+ int idx;
+ bool ret;
+ enum vcap_keyfield_set keysets[10] = {};
+
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
+ list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
+
+ ret = vcap_rule_find_keysets(&ri.data, &matches);
+
+ KUNIT_EXPECT_EQ(test, true, ret);
+ KUNIT_EXPECT_EQ(test, 1, matches.cnt);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_MAC_ETYPE, matches.keysets[0]);
+}
+
+static void vcap_api_rule_find_keyset_failed_test(struct kunit *test)
+{
+ struct vcap_keyset_list matches = {};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf[] = {
+ {
+ .ctrl.key = VCAP_KF_TYPE,
+ }, {
+ .ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
+ }, {
+ .ctrl.key = VCAP_KF_ARP_OPCODE,
+ }, {
+ .ctrl.key = VCAP_KF_L3_IP4_SIP,
+ }, {
+ .ctrl.key = VCAP_KF_L3_IP4_DIP,
+ }, {
+ .ctrl.key = VCAP_KF_8021Q_PCP_CLS,
+ }, {
+ .ctrl.key = VCAP_KF_ETYPE_LEN_IS, /* Not with ARP */
+ }, {
+ .ctrl.key = VCAP_KF_ETYPE, /* Not with ARP */
+ },
+ };
+ int idx;
+ bool ret;
+ enum vcap_keyfield_set keysets[10] = {};
+
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
+ list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
+
+ ret = vcap_rule_find_keysets(&ri.data, &matches);
+
+ KUNIT_EXPECT_EQ(test, false, ret);
+ KUNIT_EXPECT_EQ(test, 0, matches.cnt);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_NO_VALUE, matches.keysets[0]);
+}
+
+static void vcap_api_rule_find_keyset_many_test(struct kunit *test)
+{
+ struct vcap_keyset_list matches = {};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf[] = {
+ {
+ .ctrl.key = VCAP_KF_TYPE,
+ }, {
+ .ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
+ }, {
+ .ctrl.key = VCAP_KF_8021Q_DEI_CLS,
+ }, {
+ .ctrl.key = VCAP_KF_8021Q_PCP_CLS,
+ }, {
+ .ctrl.key = VCAP_KF_8021Q_VID_CLS,
+ }, {
+ .ctrl.key = VCAP_KF_ISDX_CLS,
+ }, {
+ .ctrl.key = VCAP_KF_L2_MC_IS,
+ }, {
+ .ctrl.key = VCAP_KF_L2_BC_IS,
+ },
+ };
+ int idx;
+ bool ret;
+ enum vcap_keyfield_set keysets[10] = {};
+
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
+ list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
+
+ ret = vcap_rule_find_keysets(&ri.data, &matches);
+
+ KUNIT_EXPECT_EQ(test, true, ret);
+ KUNIT_EXPECT_EQ(test, 6, matches.cnt);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_ARP, matches.keysets[0]);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_IP4_OTHER, matches.keysets[1]);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_IP4_TCP_UDP, matches.keysets[2]);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_IP6_STD, matches.keysets[3]);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_IP_7TUPLE, matches.keysets[4]);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_MAC_ETYPE, matches.keysets[5]);
+}
+
+static void vcap_api_encode_rule_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin is2_admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .first_cid = 8000000,
+ .last_cid = 8099999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ struct vcap_rule *rule;
+ struct vcap_rule_internal *ri;
+ int vcap_chain_id = 8000000;
+ enum vcap_user user = VCAP_USER_VCAP_UTIL;
+ u16 priority = 10;
+ int id = 100;
+ int ret;
+ struct vcap_u48_key smac = {
+ .value = { 0x88, 0x75, 0x32, 0x34, 0x9e, 0xb1 },
+ .mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
+ };
+ struct vcap_u48_key dmac = {
+ .value = { 0x06, 0x05, 0x04, 0x03, 0x02, 0x01 },
+ .mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
+ };
+ u32 port_mask_rng_value = 0x05;
+ u32 port_mask_rng_mask = 0x0f;
+ u32 igr_port_mask_value = 0xffabcd01;
+ u32 igr_port_mask_mask = ~0;
+ /* counter is written as the first operation */
+ u32 expwriteaddr[] = {792, 792, 793, 794, 795, 796, 797};
+ int idx;
+
+ vcap_test_api_init(&is2_admin);
+
+ /* Allocate the rule */
+ rule = vcap_alloc_rule(&test_vctrl, &test_netdev, vcap_chain_id, user,
+ priority, id);
+ KUNIT_EXPECT_PTR_NE(test, NULL, rule);
+ ri = (struct vcap_rule_internal *)rule;
+
+ /* Add rule keys */
+ ret = vcap_rule_add_key_u48(rule, VCAP_KF_L2_DMAC, &dmac);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_key_u48(rule, VCAP_KF_L2_SMAC, &smac);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_ETYPE_LEN_IS, VCAP_BIT_1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ /* Cannot add the same field twice */
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_ETYPE_LEN_IS, VCAP_BIT_1);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_IF_IGR_PORT_MASK_L3,
+ VCAP_BIT_ANY);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ port_mask_rng_value, port_mask_rng_mask);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK,
+ igr_port_mask_value, igr_port_mask_mask);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Add rule actions */
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_CNT_ID, id);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_MATCH_ID, 1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_MATCH_ID_MASK, 1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* For now the actionset is hardcoded */
+ ret = vcap_set_rule_set_actionset(rule, VCAP_AFS_BASE_TYPE);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Validation with validate keyset callback */
+ ret = vcap_val_rule(rule, ETH_P_ALL);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_MAC_ETYPE, rule->keyset);
+ KUNIT_EXPECT_EQ(test, VCAP_AFS_BASE_TYPE, rule->actionset);
+ KUNIT_EXPECT_EQ(test, 6, ri->size);
+ KUNIT_EXPECT_EQ(test, 2, ri->keyset_sw_regs);
+ KUNIT_EXPECT_EQ(test, 4, ri->actionset_sw_regs);
+
+ /* Enable lookup, so the rule will be written */
+ ret = vcap_enable_lookups(&test_vctrl, &test_netdev, 0,
+ rule->vcap_chain_id, rule->cookie, true);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Add rule with write callback */
+ ret = vcap_add_rule(rule);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 792, is2_admin.last_used_addr);
+ for (idx = 0; idx < ARRAY_SIZE(expwriteaddr); ++idx)
+ KUNIT_EXPECT_EQ(test, expwriteaddr[idx], test_updateaddr[idx]);
+
+ /* Check that the rule has been added */
+ ret = list_empty(&is2_admin.rules);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0,
+ rule->cookie, false);
+
+ vcap_free_rule(rule);
+
+ /* Check that the rule has been freed: tricky to access since this
+ * memory should not be accessible anymore
+ */
+ KUNIT_EXPECT_PTR_NE(test, NULL, rule);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, true, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, id);
+}
+
+static void vcap_api_set_rule_counter_test(struct kunit *test)
+{
+ struct vcap_admin is2_admin = {
+ .cache = {
+ .counter = 100,
+ .sticky = true,
+ },
+ };
+ struct vcap_rule_internal ri = {
+ .data = {
+ .id = 1001,
+ },
+ .addr = 600,
+ .admin = &is2_admin,
+ .counter_id = 1002,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_rule_internal ri2 = {
+ .data = {
+ .id = 2001,
+ },
+ .addr = 700,
+ .admin = &is2_admin,
+ .counter_id = 2002,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_counter ctr = { .value = 0, .sticky = false};
+ struct vcap_counter ctr2 = { .value = 101, .sticky = true};
+ int ret;
+
+ vcap_test_api_init(&is2_admin);
+ list_add_tail(&ri.list, &is2_admin.rules);
+ list_add_tail(&ri2.list, &is2_admin.rules);
+
+ pr_info("%s:%d\n", __func__, __LINE__);
+ ret = vcap_rule_set_counter(&ri.data, &ctr);
+ pr_info("%s:%d\n", __func__, __LINE__);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ KUNIT_EXPECT_EQ(test, 1002, test_hw_counter_id);
+ KUNIT_EXPECT_EQ(test, 0, test_hw_cache.counter);
+ KUNIT_EXPECT_EQ(test, false, test_hw_cache.sticky);
+ KUNIT_EXPECT_EQ(test, 600, test_updateaddr[0]);
+
+ ret = vcap_rule_set_counter(&ri2.data, &ctr2);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ KUNIT_EXPECT_EQ(test, 2002, test_hw_counter_id);
+ KUNIT_EXPECT_EQ(test, 101, test_hw_cache.counter);
+ KUNIT_EXPECT_EQ(test, true, test_hw_cache.sticky);
+ KUNIT_EXPECT_EQ(test, 700, test_updateaddr[1]);
+}
+
+static void vcap_api_get_rule_counter_test(struct kunit *test)
+{
+ struct vcap_admin is2_admin = {
+ .cache = {
+ .counter = 100,
+ .sticky = true,
+ },
+ };
+ struct vcap_rule_internal ri = {
+ .data = {
+ .id = 1010,
+ },
+ .addr = 400,
+ .admin = &is2_admin,
+ .counter_id = 1011,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_rule_internal ri2 = {
+ .data = {
+ .id = 2011,
+ },
+ .addr = 300,
+ .admin = &is2_admin,
+ .counter_id = 2012,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_counter ctr = {};
+ struct vcap_counter ctr2 = {};
+ int ret;
+
+ vcap_test_api_init(&is2_admin);
+ test_hw_cache.counter = 55;
+ test_hw_cache.sticky = true;
+
+ list_add_tail(&ri.list, &is2_admin.rules);
+ list_add_tail(&ri2.list, &is2_admin.rules);
+
+ ret = vcap_rule_get_counter(&ri.data, &ctr);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ KUNIT_EXPECT_EQ(test, 1011, test_hw_counter_id);
+ KUNIT_EXPECT_EQ(test, 55, ctr.value);
+ KUNIT_EXPECT_EQ(test, true, ctr.sticky);
+ KUNIT_EXPECT_EQ(test, 400, test_updateaddr[0]);
+
+ test_hw_cache.counter = 22;
+ test_hw_cache.sticky = false;
+
+ ret = vcap_rule_get_counter(&ri2.data, &ctr2);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ KUNIT_EXPECT_EQ(test, 2012, test_hw_counter_id);
+ KUNIT_EXPECT_EQ(test, 22, ctr2.value);
+ KUNIT_EXPECT_EQ(test, false, ctr2.sticky);
+ KUNIT_EXPECT_EQ(test, 300, test_updateaddr[1]);
+}
+
+static void vcap_api_rule_insert_in_order_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+
+ vcap_test_api_init(&admin);
+
+ /* Create rules with different sizes and check that they are placed
+ * at the correct address in the VCAP according to size
+ */
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 10, 500, 12, 780);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ vcap_del_rule(&test_vctrl, &test_netdev, 500);
+}
+
+static void vcap_api_rule_insert_reverse_order_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ struct vcap_rule_internal *elem;
+ u32 exp_addr[] = {780, 774, 771, 768, 767};
+ int idx;
+
+ vcap_test_api_init(&admin);
+
+ /* Create rules with different sizes and check that they are placed
+ * at the correct address in the VCAP according to size
+ */
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 200, 2, 798);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 795);
+ KUNIT_EXPECT_EQ(test, 6, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 3, test_move_count);
+ KUNIT_EXPECT_EQ(test, 798, test_move_addr);
+
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 400, 6, 792);
+ KUNIT_EXPECT_EQ(test, 6, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 6, test_move_count);
+ KUNIT_EXPECT_EQ(test, 792, test_move_addr);
+
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 50, 500, 12, 780);
+ KUNIT_EXPECT_EQ(test, 18, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 12, test_move_count);
+ KUNIT_EXPECT_EQ(test, 786, test_move_addr);
+
+ idx = 0;
+ list_for_each_entry(elem, &admin.rules, list) {
+ KUNIT_EXPECT_EQ(test, exp_addr[idx], elem->addr);
+ ++idx;
+ }
+ KUNIT_EXPECT_EQ(test, 768, admin.last_used_addr);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
+}
+
+static void vcap_api_rule_remove_at_end_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ int ret;
+
+ vcap_test_api_init(&admin);
+ test_init_rule_deletion();
+
+ /* Create rules with different sizes and check that they are placed
+ * at the correct address in the VCAP according to size
+ */
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 10, 500, 12, 780);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768);
+
+ /* Remove rules again from the end */
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 768, test_init_start);
+ KUNIT_EXPECT_EQ(test, 2, test_init_count);
+ KUNIT_EXPECT_EQ(test, 771, admin.last_used_addr);
+
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 771, test_init_start);
+ KUNIT_EXPECT_EQ(test, 3, test_init_count);
+ KUNIT_EXPECT_EQ(test, 774, admin.last_used_addr);
+
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 774, test_init_start);
+ KUNIT_EXPECT_EQ(test, 6, test_init_count);
+ KUNIT_EXPECT_EQ(test, 780, admin.last_used_addr);
+
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 780, test_init_start);
+ KUNIT_EXPECT_EQ(test, 12, test_init_count);
+ KUNIT_EXPECT_EQ(test, 3072, admin.last_used_addr);
+}
+
+static void vcap_api_rule_remove_in_middle_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .last_valid_addr = 800 - 1,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ int ret;
+
+ vcap_test_api_init(&admin);
+
+ /* Create rules with different sizes and check that they are placed
+ * at the correct address in the VCAP according to size
+ */
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 10, 500, 12, 780);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768);
+
+ /* Remove rules in the middle */
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 768, test_move_addr);
+ KUNIT_EXPECT_EQ(test, -6, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 6, test_move_count);
+ KUNIT_EXPECT_EQ(test, 768, test_init_start);
+ KUNIT_EXPECT_EQ(test, 6, test_init_count);
+ KUNIT_EXPECT_EQ(test, 774, admin.last_used_addr);
+
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 774, test_move_addr);
+ KUNIT_EXPECT_EQ(test, -4, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 2, test_move_count);
+ KUNIT_EXPECT_EQ(test, 774, test_init_start);
+ KUNIT_EXPECT_EQ(test, 4, test_init_count);
+ KUNIT_EXPECT_EQ(test, 778, admin.last_used_addr);
+
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 778, test_move_addr);
+ KUNIT_EXPECT_EQ(test, -20, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 2, test_move_count);
+ KUNIT_EXPECT_EQ(test, 778, test_init_start);
+ KUNIT_EXPECT_EQ(test, 20, test_init_count);
+ KUNIT_EXPECT_EQ(test, 798, admin.last_used_addr);
+
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 798, test_init_start);
+ KUNIT_EXPECT_EQ(test, 2, test_init_count);
+ KUNIT_EXPECT_EQ(test, 800, admin.last_used_addr);
+}
+
+static void vcap_api_rule_remove_in_front_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .last_valid_addr = 800 - 1,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ int ret;
+
+ vcap_test_api_init(&admin);
+
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 10, 500, 12, 780);
+ KUNIT_EXPECT_EQ(test, 780, admin.last_used_addr);
+
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 780, test_init_start);
+ KUNIT_EXPECT_EQ(test, 12, test_init_count);
+ KUNIT_EXPECT_EQ(test, 800, admin.last_used_addr);
+
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 792);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 789);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 786);
+
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 786, test_move_addr);
+ KUNIT_EXPECT_EQ(test, -8, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 6, test_move_count);
+ KUNIT_EXPECT_EQ(test, 786, test_init_start);
+ KUNIT_EXPECT_EQ(test, 8, test_init_count);
+ KUNIT_EXPECT_EQ(test, 794, admin.last_used_addr);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
+}
+
+static struct kunit_case vcap_api_rule_remove_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_remove_at_end_test),
+ KUNIT_CASE(vcap_api_rule_remove_in_middle_test),
+ KUNIT_CASE(vcap_api_rule_remove_in_front_test),
+ {}
+};
+
+static void vcap_api_next_lookup_basic_test(struct kunit *test)
+{
+ struct vcap_admin admin1 = {
+ .vtype = VCAP_TYPE_IS2,
+ .vinst = 0,
+ .first_cid = 8000000,
+ .last_cid = 8199999,
+ .lookups = 4,
+ .lookups_per_instance = 2,
+ };
+ struct vcap_admin admin2 = {
+ .vtype = VCAP_TYPE_IS2,
+ .vinst = 1,
+ .first_cid = 8200000,
+ .last_cid = 8399999,
+ .lookups = 4,
+ .lookups_per_instance = 2,
+ };
+ bool ret;
+
+ vcap_test_api_init(&admin1);
+ list_add_tail(&admin2.list, &test_vctrl.list);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8000000, 1001000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8001000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8101000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8100000, 8101000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8100000, 8201000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8200000, 8201000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8200000, 8301000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+}
+
+static void vcap_api_next_lookup_advanced_test(struct kunit *test)
+{
+ struct vcap_admin admin[] = {
+ {
+ .vtype = VCAP_TYPE_IS0,
+ .vinst = 0,
+ .first_cid = 1000000,
+ .last_cid = 1199999,
+ .lookups = 6,
+ .lookups_per_instance = 2,
+ }, {
+ .vtype = VCAP_TYPE_IS0,
+ .vinst = 1,
+ .first_cid = 1200000,
+ .last_cid = 1399999,
+ .lookups = 6,
+ .lookups_per_instance = 2,
+ }, {
+ .vtype = VCAP_TYPE_IS0,
+ .vinst = 2,
+ .first_cid = 1400000,
+ .last_cid = 1599999,
+ .lookups = 6,
+ .lookups_per_instance = 2,
+ }, {
+ .vtype = VCAP_TYPE_IS2,
+ .vinst = 0,
+ .first_cid = 8000000,
+ .last_cid = 8199999,
+ .lookups = 4,
+ .lookups_per_instance = 2,
+ }, {
+ .vtype = VCAP_TYPE_IS2,
+ .vinst = 1,
+ .first_cid = 8200000,
+ .last_cid = 8399999,
+ .lookups = 4,
+ .lookups_per_instance = 2,
+ }
+ };
+ bool ret;
+
+ vcap_test_api_init(&admin[0]);
+ list_add_tail(&admin[1].list, &test_vctrl.list);
+ list_add_tail(&admin[2].list, &test_vctrl.list);
+ list_add_tail(&admin[3].list, &test_vctrl.list);
+ list_add_tail(&admin[4].list, &test_vctrl.list);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 1000000, 1001000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1000000, 1101000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1201000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1301000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1100000, 8101000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1300000, 1401000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1400000, 1501000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1500000, 8001000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8001000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8101000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+}
+
+static void vcap_api_filter_unsupported_keys_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ .data.keyset = VCAP_KFS_MAC_ETYPE,
+ };
+ enum vcap_key_field keylist[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_ARP_ADDR_SPACE_OK_IS, /* arp keys are not in keyset */
+ VCAP_KF_ARP_PROTO_SPACE_OK_IS,
+ VCAP_KF_ARP_LEN_OK_IS,
+ VCAP_KF_ARP_TGT_MATCH_IS,
+ VCAP_KF_ARP_SENDER_MATCH_IS,
+ VCAP_KF_ARP_OPCODE_UNKNOWN_IS,
+ VCAP_KF_ARP_OPCODE,
+ VCAP_KF_8021Q_DEI_CLS,
+ VCAP_KF_8021Q_PCP_CLS,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ };
+ enum vcap_key_field expected[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_8021Q_DEI_CLS,
+ VCAP_KF_8021Q_PCP_CLS,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ };
+ struct vcap_client_keyfield *ckf, *next;
+ bool ret;
+ int idx;
+
+ /* Add all keys to the rule */
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(keylist); idx++) {
+ ckf = kzalloc(sizeof(*ckf), GFP_KERNEL);
+ if (ckf) {
+ ckf->ctrl.key = keylist[idx];
+ list_add_tail(&ckf->ctrl.list, &ri.data.keyfields);
+ }
+ }
+
+ KUNIT_EXPECT_EQ(test, 14, ARRAY_SIZE(keylist));
+
+ /* Drop unsupported keys from the rule */
+ ret = vcap_filter_rule_keys(&ri.data, NULL, 0, true);
+
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Check remaining keys in the rule */
+ idx = 0;
+ list_for_each_entry_safe(ckf, next, &ri.data.keyfields, ctrl.list) {
+ KUNIT_EXPECT_EQ(test, expected[idx], ckf->ctrl.key);
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ ++idx;
+ }
+ KUNIT_EXPECT_EQ(test, 7, idx);
+}
+
+static void vcap_api_filter_keylist_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ .data.keyset = VCAP_KFS_NORMAL_7TUPLE,
+ };
+ enum vcap_key_field keylist[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ VCAP_KF_LOOKUP_GEN_IDX,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ VCAP_KF_8021Q_VLAN_TAGS,
+ VCAP_KF_8021Q_TPID0,
+ VCAP_KF_8021Q_PCP0,
+ VCAP_KF_8021Q_DEI0,
+ VCAP_KF_8021Q_VID0,
+ VCAP_KF_8021Q_TPID1,
+ VCAP_KF_8021Q_PCP1,
+ VCAP_KF_8021Q_DEI1,
+ VCAP_KF_8021Q_VID1,
+ VCAP_KF_8021Q_TPID2,
+ VCAP_KF_8021Q_PCP2,
+ VCAP_KF_8021Q_DEI2,
+ VCAP_KF_8021Q_VID2,
+ VCAP_KF_L2_DMAC,
+ VCAP_KF_L2_SMAC,
+ VCAP_KF_IP_MC_IS,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_KF_ETYPE,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_KF_IP4_IS,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ VCAP_KF_L3_FRAG_INVLD_L4_LEN,
+ VCAP_KF_L3_OPTIONS_IS,
+ VCAP_KF_L3_DSCP,
+ VCAP_KF_L3_IP6_DIP,
+ VCAP_KF_L3_IP6_SIP,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_KF_TCP_IS,
+ VCAP_KF_L4_SPORT,
+ VCAP_KF_L4_RNG,
+ };
+ enum vcap_key_field droplist[] = {
+ VCAP_KF_8021Q_TPID1,
+ VCAP_KF_8021Q_PCP1,
+ VCAP_KF_8021Q_DEI1,
+ VCAP_KF_8021Q_VID1,
+ VCAP_KF_8021Q_TPID2,
+ VCAP_KF_8021Q_PCP2,
+ VCAP_KF_8021Q_DEI2,
+ VCAP_KF_8021Q_VID2,
+ VCAP_KF_L3_IP6_DIP,
+ VCAP_KF_L3_IP6_SIP,
+ VCAP_KF_L4_SPORT,
+ VCAP_KF_L4_RNG,
+ };
+ enum vcap_key_field expected[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ VCAP_KF_LOOKUP_GEN_IDX,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ VCAP_KF_8021Q_VLAN_TAGS,
+ VCAP_KF_8021Q_TPID0,
+ VCAP_KF_8021Q_PCP0,
+ VCAP_KF_8021Q_DEI0,
+ VCAP_KF_8021Q_VID0,
+ VCAP_KF_L2_DMAC,
+ VCAP_KF_L2_SMAC,
+ VCAP_KF_IP_MC_IS,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_KF_ETYPE,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_KF_IP4_IS,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ VCAP_KF_L3_FRAG_INVLD_L4_LEN,
+ VCAP_KF_L3_OPTIONS_IS,
+ VCAP_KF_L3_DSCP,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_KF_TCP_IS,
+ };
+ struct vcap_client_keyfield *ckf, *next;
+ bool ret;
+ int idx;
+
+ /* Add all keys to the rule */
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(keylist); idx++) {
+ ckf = kzalloc(sizeof(*ckf), GFP_KERNEL);
+ if (ckf) {
+ ckf->ctrl.key = keylist[idx];
+ list_add_tail(&ckf->ctrl.list, &ri.data.keyfields);
+ }
+ }
+
+ KUNIT_EXPECT_EQ(test, 38, ARRAY_SIZE(keylist));
+
+ /* Drop listed keys from the rule */
+ ret = vcap_filter_rule_keys(&ri.data, droplist, ARRAY_SIZE(droplist),
+ false);
+
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Check remaining keys in the rule */
+ idx = 0;
+ list_for_each_entry_safe(ckf, next, &ri.data.keyfields, ctrl.list) {
+ KUNIT_EXPECT_EQ(test, expected[idx], ckf->ctrl.key);
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ ++idx;
+ }
+ KUNIT_EXPECT_EQ(test, 26, idx);
+}
+
+static void vcap_api_rule_chain_path_test(struct kunit *test)
+{
+ struct vcap_admin admin1 = {
+ .vtype = VCAP_TYPE_IS0,
+ .vinst = 0,
+ .first_cid = 1000000,
+ .last_cid = 1199999,
+ .lookups = 6,
+ .lookups_per_instance = 2,
+ };
+ struct vcap_enabled_port eport3 = {
+ .ndev = &test_netdev,
+ .cookie = 0x100,
+ .src_cid = 0,
+ .dst_cid = 1000000,
+ };
+ struct vcap_enabled_port eport2 = {
+ .ndev = &test_netdev,
+ .cookie = 0x200,
+ .src_cid = 1000000,
+ .dst_cid = 1100000,
+ };
+ struct vcap_enabled_port eport1 = {
+ .ndev = &test_netdev,
+ .cookie = 0x300,
+ .src_cid = 1100000,
+ .dst_cid = 8000000,
+ };
+ bool ret;
+ int chain;
+
+ vcap_test_api_init(&admin1);
+ list_add_tail(&eport1.list, &admin1.enabled);
+ list_add_tail(&eport2.list, &admin1.enabled);
+ list_add_tail(&eport3.list, &admin1.enabled);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1000000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1100000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1200000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 0);
+ KUNIT_EXPECT_EQ(test, 1000000, chain);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 1000000);
+ KUNIT_EXPECT_EQ(test, 1100000, chain);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 1100000);
+ KUNIT_EXPECT_EQ(test, 8000000, chain);
+}
+
+static struct kunit_case vcap_api_rule_enable_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_chain_path_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_rule_enable_test_suite = {
+ .name = "VCAP_API_Rule_Enable_Testsuite",
+ .test_cases = vcap_api_rule_enable_test_cases,
+};
+
+static struct kunit_suite vcap_api_rule_remove_test_suite = {
+ .name = "VCAP_API_Rule_Remove_Testsuite",
+ .test_cases = vcap_api_rule_remove_test_cases,
+};
+
+static struct kunit_case vcap_api_rule_insert_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_insert_in_order_test),
+ KUNIT_CASE(vcap_api_rule_insert_reverse_order_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_rule_insert_test_suite = {
+ .name = "VCAP_API_Rule_Insert_Testsuite",
+ .test_cases = vcap_api_rule_insert_test_cases,
+};
+
+static struct kunit_case vcap_api_rule_counter_test_cases[] = {
+ KUNIT_CASE(vcap_api_set_rule_counter_test),
+ KUNIT_CASE(vcap_api_get_rule_counter_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_rule_counter_test_suite = {
+ .name = "VCAP_API_Rule_Counter_Testsuite",
+ .test_cases = vcap_api_rule_counter_test_cases,
+};
+
+static struct kunit_case vcap_api_support_test_cases[] = {
+ KUNIT_CASE(vcap_api_next_lookup_basic_test),
+ KUNIT_CASE(vcap_api_next_lookup_advanced_test),
+ KUNIT_CASE(vcap_api_filter_unsupported_keys_test),
+ KUNIT_CASE(vcap_api_filter_keylist_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_support_test_suite = {
+ .name = "VCAP_API_Support_Testsuite",
+ .test_cases = vcap_api_support_test_cases,
+};
+
+static struct kunit_case vcap_api_full_rule_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_find_keyset_basic_test),
+ KUNIT_CASE(vcap_api_rule_find_keyset_failed_test),
+ KUNIT_CASE(vcap_api_rule_find_keyset_many_test),
+ KUNIT_CASE(vcap_api_encode_rule_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_full_rule_test_suite = {
+ .name = "VCAP_API_Full_Rule_Testsuite",
+ .test_cases = vcap_api_full_rule_test_cases,
+};
+
+static struct kunit_case vcap_api_rule_value_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_add_keyvalue_test),
+ KUNIT_CASE(vcap_api_rule_add_actionvalue_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_rule_value_test_suite = {
+ .name = "VCAP_API_Rule_Value_Testsuite",
+ .test_cases = vcap_api_rule_value_test_cases,
+};
+
+static struct kunit_case vcap_api_encoding_test_cases[] = {
+ KUNIT_CASE(vcap_api_set_bit_1_test),
+ KUNIT_CASE(vcap_api_set_bit_0_test),
+ KUNIT_CASE(vcap_api_iterator_init_test),
+ KUNIT_CASE(vcap_api_iterator_next_test),
+ KUNIT_CASE(vcap_api_encode_typegroups_test),
+ KUNIT_CASE(vcap_api_encode_bit_test),
+ KUNIT_CASE(vcap_api_encode_field_test),
+ KUNIT_CASE(vcap_api_encode_short_field_test),
+ KUNIT_CASE(vcap_api_encode_keyfield_test),
+ KUNIT_CASE(vcap_api_encode_max_keyfield_test),
+ KUNIT_CASE(vcap_api_encode_actionfield_test),
+ KUNIT_CASE(vcap_api_keyfield_typegroup_test),
+ KUNIT_CASE(vcap_api_actionfield_typegroup_test),
+ KUNIT_CASE(vcap_api_vcap_keyfields_test),
+ KUNIT_CASE(vcap_api_vcap_actionfields_test),
+ KUNIT_CASE(vcap_api_encode_rule_keyset_test),
+ KUNIT_CASE(vcap_api_encode_rule_actionset_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_encoding_test_suite = {
+ .name = "VCAP_API_Encoding_Testsuite",
+ .test_cases = vcap_api_encoding_test_cases,
+};
+
+kunit_test_suite(vcap_api_rule_enable_test_suite);
+kunit_test_suite(vcap_api_rule_remove_test_suite);
+kunit_test_suite(vcap_api_rule_insert_test_suite);
+kunit_test_suite(vcap_api_rule_counter_test_suite);
+kunit_test_suite(vcap_api_support_test_suite);
+kunit_test_suite(vcap_api_full_rule_test_suite);
+kunit_test_suite(vcap_api_rule_value_test_suite);
+kunit_test_suite(vcap_api_encoding_test_suite);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
new file mode 100644
index 0000000000..df81d9ff50
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+#ifndef __VCAP_API_PRIVATE__
+#define __VCAP_API_PRIVATE__
+
+#include <linux/types.h>
+
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+#define to_intrule(rule) container_of((rule), struct vcap_rule_internal, data)
+
+enum vcap_rule_state {
+ VCAP_RS_PERMANENT, /* the rule is always stored in HW */
+ VCAP_RS_ENABLED, /* enabled in HW but can be disabled */
+ VCAP_RS_DISABLED, /* disabled (stored in SW) and can be enabled */
+};
+
+/* Private VCAP API rule data */
+struct vcap_rule_internal {
+ struct vcap_rule data; /* provided by the client */
+ struct list_head list; /* the vcap admin list of rules */
+ struct vcap_admin *admin; /* vcap hw instance */
+ struct net_device *ndev; /* the interface that the rule applies to */
+ struct vcap_control *vctrl; /* the client control */
+ u32 sort_key; /* defines the position in the VCAP */
+ int keyset_sw; /* subwords in a keyset */
+ int actionset_sw; /* subwords in an actionset */
+ int keyset_sw_regs; /* registers in a subword in an keyset */
+ int actionset_sw_regs; /* registers in a subword in an actionset */
+ int size; /* the size of the rule: max(entry, action) */
+ u32 addr; /* address in the VCAP at insertion */
+ u32 counter_id; /* counter id (if a dedicated counter is available) */
+ struct vcap_counter counter; /* last read counter value */
+ enum vcap_rule_state state; /* rule storage state */
+};
+
+/* Bit iterator for the VCAP cache streams */
+struct vcap_stream_iter {
+ u32 offset; /* bit offset from the stream start */
+ u32 sw_width; /* subword width in bits */
+ u32 regs_per_sw; /* registers per subword */
+ u32 reg_idx; /* current register index */
+ u32 reg_bitpos; /* bit offset in current register */
+ const struct vcap_typegroup *tg; /* current typegroup */
+};
+
+/* Check that the control has a valid set of callbacks */
+int vcap_api_check(struct vcap_control *ctrl);
+/* Erase the VCAP cache area used or encoding and decoding */
+void vcap_erase_cache(struct vcap_rule_internal *ri);
+
+/* Iterator functionality */
+
+void vcap_iter_init(struct vcap_stream_iter *itr, int sw_width,
+ const struct vcap_typegroup *tg, u32 offset);
+void vcap_iter_next(struct vcap_stream_iter *itr);
+void vcap_iter_set(struct vcap_stream_iter *itr, int sw_width,
+ const struct vcap_typegroup *tg, u32 offset);
+void vcap_iter_update(struct vcap_stream_iter *itr);
+
+/* Keyset and keyfield functionality */
+
+/* Return the number of keyfields in the keyset */
+int vcap_keyfield_count(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_keyfield_set keyset);
+/* Return the typegroup table for the matching keyset (using subword size) */
+const struct vcap_typegroup *
+vcap_keyfield_typegroup(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_keyfield_set keyset);
+/* Return the list of keyfields for the keyset */
+const struct vcap_field *vcap_keyfields(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_keyfield_set keyset);
+
+/* Actionset and actionfield functionality */
+
+/* Return the actionset information for the actionset */
+const struct vcap_set *
+vcap_actionfieldset(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_actionfield_set actionset);
+/* Return the number of actionfields in the actionset */
+int vcap_actionfield_count(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_actionfield_set actionset);
+/* Return the typegroup table for the matching actionset (using subword size) */
+const struct vcap_typegroup *
+vcap_actionfield_typegroup(struct vcap_control *vctrl, enum vcap_type vt,
+ enum vcap_actionfield_set actionset);
+/* Return the list of actionfields for the actionset */
+const struct vcap_field *
+vcap_actionfields(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_actionfield_set actionset);
+/* Map actionset id to a string with the actionset name */
+const char *vcap_actionset_name(struct vcap_control *vctrl,
+ enum vcap_actionfield_set actionset);
+/* Map key field id to a string with the key name */
+const char *vcap_actionfield_name(struct vcap_control *vctrl,
+ enum vcap_action_field action);
+
+/* Read key data from a VCAP address and discover if there are any rule keysets
+ * here
+ */
+int vcap_addr_keysets(struct vcap_control *vctrl, struct net_device *ndev,
+ struct vcap_admin *admin, int addr,
+ struct vcap_keyset_list *kslist);
+
+/* Verify that the typegroup information, subword count, keyset and type id
+ * are in sync and correct, return the list of matchin keysets
+ */
+int vcap_find_keystream_keysets(struct vcap_control *vctrl, enum vcap_type vt,
+ u32 *keystream, u32 *mskstream, bool mask,
+ int sw_max, struct vcap_keyset_list *kslist);
+
+/* Get the keysets that matches the rule key type/mask */
+int vcap_rule_get_keysets(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches);
+/* Decode a rule from the VCAP cache and return a copy */
+struct vcap_rule *vcap_decode_rule(struct vcap_rule_internal *elem);
+
+#endif /* __VCAP_API_PRIVATE__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
new file mode 100644
index 0000000000..5dbfc0d0c3
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
@@ -0,0 +1,4062 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP test model interface for kunit testing
+ */
+
+/* This file is autogenerated by cml-utils 2023-02-10 11:16:00 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "vcap_api.h"
+#include "vcap_model_kunit.h"
+
+/* keyfields */
+static const struct vcap_field is0_ll_full_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 35,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 38,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 51,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 54,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 57,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 58,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 70,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 118,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 166,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 167,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 183,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 184,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 185,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 187,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 188,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 189,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 195,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 227,
+ .width = 32,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 259,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 260,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 261,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 277,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 107,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 110,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 113,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 114,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 126,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 129,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 132,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 133,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 145,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 193,
+ .width = 48,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 241,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 242,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 243,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 259,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 260,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 261,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 263,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 264,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 265,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 271,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 399,
+ .width = 128,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 527,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 528,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 529,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 545,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 19,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 92,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 114,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 115,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 133,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 134,
+ .width = 12,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 148,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 158,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 190,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 222,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 232,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 240,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is0_pure_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 12,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 59,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 107,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is0_etag_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 7,
+ },
+ [VCAP_KF_8021BR_E_TAGGED] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_8021BR_GRP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 2,
+ },
+ [VCAP_KF_8021BR_ECID_EXT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 8,
+ },
+ [VCAP_KF_8021BR_ECID_BASE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 12,
+ },
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 8,
+ },
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 90,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 138,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 186,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 187,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 203,
+ .width = 64,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 267,
+ .width = 16,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 283,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 284,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 86,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 134,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 135,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 136,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 137,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 138,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 139,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 140,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 142,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 206,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 207,
+ .width = 16,
+ },
+};
+
+static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 169,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 202,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 220,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 221,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 222,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 223,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 224,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 225,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 226,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field is2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 169,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 193,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field is2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 91,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 220,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 228,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 244,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 86,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 112,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 116,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 119,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 120,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 121,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 169,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 217,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 219,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 227,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 355,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 483,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 484,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 485,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 486,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 502,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 518,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 534,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 535,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 536,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 537,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 538,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 539,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 540,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 541,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 542,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 99,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 147,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 195,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 196,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 212,
+ .width = 64,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 276,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 277,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 98,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 148,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 154,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 177,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 178,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 210,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 226,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 227,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 232,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 233,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 234,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 185,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field es2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 74,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 75,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 96,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 193,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 202,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 330,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 458,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 459,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 460,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 461,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 477,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 493,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 509,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 510,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 511,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 512,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 513,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 514,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 515,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 516,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 517,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 100,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 229,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 237,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 253,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field es2_ip4_vid_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_ACL_GRP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 8,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 42,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 43,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 47,
+ .width = 1,
+ },
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 48,
+ .width = 1,
+ },
+ [VCAP_KF_MIRROR_PROBE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 49,
+ .width = 2,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 51,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 52,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 32,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 116,
+ .width = 16,
+ },
+};
+
+static const struct vcap_field es2_ip6_vid_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_ACL_GRP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 8,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 42,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 43,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 171,
+ .width = 128,
+ },
+};
+
+/* keyfield_set */
+static const struct vcap_set is0_keyfield_set[] = {
+ [VCAP_KFS_LL_FULL] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_NORMAL_7TUPLE] = {
+ .type_id = 0,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_PURE_5TUPLE_IP4] = {
+ .type_id = 2,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_ETAG] = {
+ .type_id = 3,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+};
+
+static const struct vcap_set is2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 5,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 6,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = 1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 1,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = -1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_VID] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_IP6_VID] = {
+ .type_id = 5,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+/* keyfield_set map */
+static const struct vcap_field *is0_keyfield_set_map[] = {
+ [VCAP_KFS_LL_FULL] = is0_ll_full_keyfield,
+ [VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield,
+ [VCAP_KFS_PURE_5TUPLE_IP4] = is0_pure_5tuple_ip4_keyfield,
+ [VCAP_KFS_ETAG] = is0_etag_keyfield,
+};
+
+static const struct vcap_field *is2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = is2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
+ [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
+};
+
+static const struct vcap_field *es2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = es2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = es2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_STD] = es2_ip6_std_keyfield,
+ [VCAP_KFS_IP4_VID] = es2_ip4_vid_keyfield,
+ [VCAP_KFS_IP6_VID] = es2_ip6_vid_keyfield,
+};
+
+/* keyfield_set map sizes */
+static int is0_keyfield_set_map_size[] = {
+ [VCAP_KFS_LL_FULL] = ARRAY_SIZE(is0_ll_full_keyfield),
+ [VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield),
+ [VCAP_KFS_PURE_5TUPLE_IP4] = ARRAY_SIZE(is0_pure_5tuple_ip4_keyfield),
+ [VCAP_KFS_ETAG] = ARRAY_SIZE(is0_etag_keyfield),
+};
+
+static int is2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
+};
+
+static int es2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(es2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(es2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(es2_ip6_std_keyfield),
+ [VCAP_KFS_IP4_VID] = ARRAY_SIZE(es2_ip4_vid_keyfield),
+ [VCAP_KFS_IP6_VID] = ARRAY_SIZE(es2_ip6_vid_keyfield),
+};
+
+/* actionfields */
+static const struct vcap_field is0_classification_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 68,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 69,
+ .width = 12,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 117,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 171,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_full_actionfield[] = {
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 67,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 12,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 80,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 83,
+ .width = 65,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 204,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 212,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 298,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 301,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_class_reduced_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 47,
+ .width = 12,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 93,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is2_base_type_actionfield[] = {
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 5,
+ },
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 3,
+ },
+ [VCAP_AF_LRN_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_AF_RT_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 6,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 30,
+ .width = 68,
+ },
+ [VCAP_AF_MIRROR_PROBE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 2,
+ },
+ [VCAP_AF_MATCH_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 159,
+ .width = 16,
+ },
+ [VCAP_AF_MATCH_ID_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 16,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 191,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field es2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_AF_COPY_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 16,
+ },
+ [VCAP_AF_COPY_PORT_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 7,
+ },
+ [VCAP_AF_MIRROR_PROBE_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 3,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 33,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_REMARK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 34,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 35,
+ .width = 6,
+ },
+ [VCAP_AF_ES2_REW_CMD] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 11,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+};
+
+/* actionfield_set */
+static const struct vcap_set is0_actionfield_set[] = {
+ [VCAP_AFS_CLASSIFICATION] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+ [VCAP_AFS_FULL] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_AFS_CLASS_REDUCED] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 12,
+ },
+};
+
+static const struct vcap_set is2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set es2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+/* actionfield_set map */
+static const struct vcap_field *is0_actionfield_set_map[] = {
+ [VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
+ [VCAP_AFS_FULL] = is0_full_actionfield,
+ [VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield,
+};
+
+static const struct vcap_field *is2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
+};
+
+static const struct vcap_field *es2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = es2_base_type_actionfield,
+};
+
+/* actionfield_set map size */
+static int is0_actionfield_set_map_size[] = {
+ [VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
+ [VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield),
+ [VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield),
+};
+
+static int is2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
+};
+
+static int es2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(es2_base_type_actionfield),
+};
+
+/* Type Groups */
+static const struct vcap_typegroup is0_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 5,
+ .value = 16,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 4,
+ .value = 0,
+ },
+ {
+ .offset = 364,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 416,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 520,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 572,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 4,
+ .value = 8,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x3_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 52,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_keyfield_set_typegroups[] = {
+ [12] = is0_x12_keyfield_set_typegroups,
+ [6] = is0_x6_keyfield_set_typegroups,
+ [3] = is0_x3_keyfield_set_typegroups,
+ [2] = is0_x2_keyfield_set_typegroups,
+ [1] = is0_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
+ [12] = is2_x12_keyfield_set_typegroups,
+ [6] = is2_x6_keyfield_set_typegroups,
+ [3] = is2_x3_keyfield_set_typegroups,
+ [1] = is2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es2_keyfield_set_typegroups[] = {
+ [12] = es2_x12_keyfield_set_typegroups,
+ [6] = es2_x6_keyfield_set_typegroups,
+ [3] = es2_x3_keyfield_set_typegroups,
+ [1] = es2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup is0_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 110,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 220,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 110,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 110,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 220,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 21,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 42,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_actionfield_set_typegroups[] = {
+ [3] = is0_x3_actionfield_set_typegroups,
+ [2] = is0_x2_actionfield_set_typegroups,
+ [1] = is0_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
+ [3] = is2_x3_actionfield_set_typegroups,
+ [1] = is2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es2_actionfield_set_typegroups[] = {
+ [3] = es2_x3_actionfield_set_typegroups,
+ [1] = es2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+/* Keyfieldset names */
+static const char * const vcap_keyfield_set_names[] = {
+ [VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
+ [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
+ [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
+ [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
+ [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
+ [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
+};
+
+/* Actionfieldset names */
+static const char * const vcap_actionfield_set_names[] = {
+ [VCAP_AFS_NO_VALUE] = "(None)",
+ [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
+};
+
+/* Keyfield names */
+static const char * const vcap_keyfield_names[] = {
+ [VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
+ [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
+ [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
+ [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
+ [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
+ [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS",
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
+ [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
+ [VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
+ [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
+ [VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
+ [VCAP_KF_ISDX_CLS] = "ISDX_CLS",
+ [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
+ [VCAP_KF_L2_BC_IS] = "L2_BC_IS",
+ [VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
+ [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
+ [VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
+ [VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
+ [VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
+ [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
+ [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
+ [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
+ [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
+ [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
+ [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_RT_IS] = "L3_RT_IS",
+ [VCAP_KF_L3_TOS] = "L3_TOS",
+ [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
+ [VCAP_KF_L4_ACK] = "L4_ACK",
+ [VCAP_KF_L4_DPORT] = "L4_DPORT",
+ [VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD",
+ [VCAP_KF_L4_PSH] = "L4_PSH",
+ [VCAP_KF_L4_RNG] = "L4_RNG",
+ [VCAP_KF_L4_RST] = "L4_RST",
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS",
+ [VCAP_KF_L4_SPORT] = "L4_SPORT",
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS",
+ [VCAP_KF_L4_SYN] = "L4_SYN",
+ [VCAP_KF_L4_URG] = "L4_URG",
+ [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
+ [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
+ [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
+ [VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
+ [VCAP_KF_TYPE] = "TYPE",
+};
+
+/* Actionfield names */
+static const char * const vcap_actionfield_names[] = {
+ [VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
+ [VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
+ [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
+ [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
+ [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
+ [VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
+ [VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
+ [VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MATCH_ID] = "MATCH_ID",
+ [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
+ [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
+ [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
+ [VCAP_AF_POLICE_ENA] = "POLICE_ENA",
+ [VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
+ [VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
+ [VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
+};
+
+/* VCAPs */
+const struct vcap_info kunit_test_vcaps[] = {
+ [VCAP_TYPE_IS0] = {
+ .name = "is0",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 110,
+ .default_cnt = 140,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is0_keyfield_set),
+ .actionfield_set = is0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is0_actionfield_set),
+ .keyfield_set_map = is0_keyfield_set_map,
+ .keyfield_set_map_size = is0_keyfield_set_map_size,
+ .actionfield_set_map = is0_actionfield_set_map,
+ .actionfield_set_map_size = is0_actionfield_set_map_size,
+ .keyfield_set_typegroups = is0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_IS2] = {
+ .name = "is2",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 110,
+ .default_cnt = 73,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set),
+ .actionfield_set = is2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set),
+ .keyfield_set_map = is2_keyfield_set_map,
+ .keyfield_set_map_size = is2_keyfield_set_map_size,
+ .actionfield_set_map = is2_actionfield_set_map,
+ .actionfield_set_map_size = is2_actionfield_set_map_size,
+ .keyfield_set_typegroups = is2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is2_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES2] = {
+ .name = "es2",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 21,
+ .default_cnt = 74,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es2_keyfield_set),
+ .actionfield_set = es2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es2_actionfield_set),
+ .keyfield_set_map = es2_keyfield_set_map,
+ .keyfield_set_map_size = es2_keyfield_set_map_size,
+ .actionfield_set_map = es2_actionfield_set_map,
+ .actionfield_set_map_size = es2_actionfield_set_map_size,
+ .keyfield_set_typegroups = es2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es2_actionfield_set_typegroups,
+ },
+};
+
+const struct vcap_statistics kunit_test_vcap_stats = {
+ .name = "kunit_test",
+ .count = 3,
+ .keyfield_set_names = vcap_keyfield_set_names,
+ .actionfield_set_names = vcap_actionfield_set_names,
+ .keyfield_names = vcap_keyfield_names,
+ .actionfield_names = vcap_actionfield_names,
+};
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h
new file mode 100644
index 0000000000..55762f24e1
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP test model interface for kunit testing
+ */
+
+/* This file is autogenerated by cml-utils 2023-02-10 11:16:00 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
+ */
+
+#ifndef __VCAP_MODEL_KUNIT_H__
+#define __VCAP_MODEL_KUNIT_H__
+
+/* VCAPs */
+extern const struct vcap_info kunit_test_vcaps[];
+extern const struct vcap_statistics kunit_test_vcap_stats;
+
+#endif /* __VCAP_MODEL_KUNIT_H__ */
+
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.c b/drivers/net/ethernet/microchip/vcap/vcap_tc.c
new file mode 100644
index 0000000000..27e2dffb65
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP TC
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/flow_offload.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+
+#include "vcap_api_client.h"
+#include "vcap_tc.h"
+
+enum vcap_is2_arp_opcode {
+ VCAP_IS2_ARP_REQUEST,
+ VCAP_IS2_ARP_REPLY,
+ VCAP_IS2_RARP_REQUEST,
+ VCAP_IS2_RARP_REPLY,
+};
+
+enum vcap_arp_opcode {
+ VCAP_ARP_OP_RESERVED,
+ VCAP_ARP_OP_REQUEST,
+ VCAP_ARP_OP_REPLY,
+};
+
+int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
+ enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
+ struct flow_match_eth_addrs match;
+ struct vcap_u48_key smac, dmac;
+ int err = 0;
+
+ flow_rule_match_eth_addrs(st->frule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
+ vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
+ if (err)
+ goto out;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
+ vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ethaddr_usage);
+
+int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IP) {
+ struct flow_match_ipv4_addrs mt;
+
+ flow_rule_match_ipv4_addrs(st->frule, &mt);
+ if (mt.mask->src) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_SIP,
+ be32_to_cpu(mt.key->src),
+ be32_to_cpu(mt.mask->src));
+ if (err)
+ goto out;
+ }
+ if (mt.mask->dst) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_DIP,
+ be32_to_cpu(mt.key->dst),
+ be32_to_cpu(mt.mask->dst));
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv4_usage);
+
+int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IPV6) {
+ struct flow_match_ipv6_addrs mt;
+ struct vcap_u128_key sip;
+ struct vcap_u128_key dip;
+
+ flow_rule_match_ipv6_addrs(st->frule, &mt);
+ /* Check if address masks are non-zero */
+ if (!ipv6_addr_any(&mt.mask->src)) {
+ vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
+ vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_SIP, &sip);
+ if (err)
+ goto out;
+ }
+ if (!ipv6_addr_any(&mt.mask->dst)) {
+ vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
+ vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_DIP, &dip);
+ if (err)
+ goto out;
+ }
+ }
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv6_usage);
+
+int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_ports mt;
+ u16 value, mask;
+ int err = 0;
+
+ flow_rule_match_ports(st->frule, &mt);
+
+ if (mt.mask->src) {
+ value = be16_to_cpu(mt.key->src);
+ mask = be16_to_cpu(mt.mask->src);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->dst) {
+ value = be16_to_cpu(mt.key->dst);
+ mask = be16_to_cpu(mt.mask->dst);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_PORTS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_portnum_usage);
+
+int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0;
+ struct flow_match_vlan mt;
+ u16 tpid;
+ int err;
+
+ flow_rule_match_cvlan(st->frule, &mt);
+
+ tpid = be16_to_cpu(mt.key->vlan_tpid);
+
+ if (tpid == ETH_P_8021Q) {
+ vid_key = VCAP_KF_8021Q_VID1;
+ pcp_key = VCAP_KF_8021Q_PCP1;
+ }
+
+ if (mt.mask->vlan_id) {
+ err = vcap_rule_add_key_u32(st->vrule, vid_key,
+ mt.key->vlan_id,
+ mt.mask->vlan_id);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_priority) {
+ err = vcap_rule_add_key_u32(st->vrule, pcp_key,
+ mt.key->vlan_priority,
+ mt.mask->vlan_priority);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN);
+
+ return 0;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_cvlan_usage);
+
+int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
+ enum vcap_key_field vid_key,
+ enum vcap_key_field pcp_key)
+{
+ struct flow_match_vlan mt;
+ int err;
+
+ flow_rule_match_vlan(st->frule, &mt);
+
+ if (mt.mask->vlan_id) {
+ err = vcap_rule_add_key_u32(st->vrule, vid_key,
+ mt.key->vlan_id,
+ mt.mask->vlan_id);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_priority) {
+ err = vcap_rule_add_key_u32(st->vrule, pcp_key,
+ mt.key->vlan_priority,
+ mt.mask->vlan_priority);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_tpid)
+ st->tpid = be16_to_cpu(mt.key->vlan_tpid);
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_VLAN);
+
+ return 0;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_vlan_usage);
+
+int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_tcp mt;
+ u16 tcp_flags_mask;
+ u16 tcp_flags_key;
+ enum vcap_bit val;
+ int err = 0;
+
+ flow_rule_match_tcp(st->frule, &mt);
+ tcp_flags_key = be16_to_cpu(mt.key->flags);
+ tcp_flags_mask = be16_to_cpu(mt.mask->flags);
+
+ if (tcp_flags_mask & TCPHDR_FIN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_FIN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_SYN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_SYN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_RST) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_RST)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_PSH) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_PSH)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_ACK) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_ACK)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_URG) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_URG)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_TCP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_tcp_usage);
+
+int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_arp mt;
+ u16 value, mask;
+ u32 ipval, ipmsk;
+ int err;
+
+ flow_rule_match_arp(st->frule, &mt);
+
+ if (mt.mask->op) {
+ mask = 0x3;
+ if (st->l3_proto == ETH_P_ARP) {
+ value = mt.key->op == VCAP_ARP_OP_REQUEST ?
+ VCAP_IS2_ARP_REQUEST :
+ VCAP_IS2_ARP_REPLY;
+ } else { /* RARP */
+ value = mt.key->op == VCAP_ARP_OP_REQUEST ?
+ VCAP_IS2_RARP_REQUEST :
+ VCAP_IS2_RARP_REPLY;
+ }
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
+ value, mask);
+ if (err)
+ goto out;
+ }
+
+ /* The IS2 ARP keyset does not support ARP hardware addresses */
+ if (!is_zero_ether_addr(mt.mask->sha) ||
+ !is_zero_ether_addr(mt.mask->tha)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mt.mask->sip) {
+ ipval = be32_to_cpu((__force __be32)mt.key->sip);
+ ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
+ ipval, ipmsk);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->tip) {
+ ipval = be32_to_cpu((__force __be32)mt.key->tip);
+ ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
+ ipval, ipmsk);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ARP);
+
+ return 0;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_arp_usage);
+
+int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_ip mt;
+ int err = 0;
+
+ flow_rule_match_ip(st->frule, &mt);
+
+ if (mt.mask->tos) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
+ mt.key->tos,
+ mt.mask->tos);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ip_usage);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.h b/drivers/net/ethernet/microchip/vcap/vcap_tc.h
new file mode 100644
index 0000000000..49b02d0329
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP TC
+ */
+
+#ifndef __VCAP_TC__
+#define __VCAP_TC__
+
+struct vcap_tc_flower_parse_usage {
+ struct flow_cls_offload *fco;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+ struct vcap_admin *admin;
+ u16 l3_proto;
+ u8 l4_proto;
+ u16 tpid;
+ unsigned long long used_keys;
+};
+
+int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
+ enum vcap_key_field vid_key,
+ enum vcap_key_field pcp_key);
+int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st);
+
+#endif /* __VCAP_TC__ */