summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/marvell
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/Kconfig185
-rw-r--r--drivers/net/ethernet/marvell/Makefile16
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c3343
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c442
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5941
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c501
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h192
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h1570
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c1748
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h314
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c734
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c7785
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c2519
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h335
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c457
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Kconfig20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Makefile9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c755
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h210
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h11
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c279
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h180
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c368
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h341
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c467
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c1273
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h375
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h373
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c506
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h199
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c334
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h298
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c1904
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h187
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h266
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h241
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h169
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c416
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h2214
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c1617
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.h246
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c277
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h1129
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c926
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h632
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h15266
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c645
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c748
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h137
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c3434
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h972
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c1291
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c561
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c1221
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c3440
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c1695
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h82
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c5709
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c601
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c3463
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c1700
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c2036
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h239
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h738
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c111
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h823
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c280
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c487
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c1816
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c1903
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1128
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c474
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c125
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c209
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c1480
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c1517
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3264
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c509
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h175
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h341
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c1456
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c1461
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h175
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c797
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c1689
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.h78
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c296
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig27
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile9
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h417
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c927
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h209
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c475
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.h30
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c598
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h23
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.c107
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.h36
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c802
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h14
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c315
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h37
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c581
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h25
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c2561
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h330
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c1525
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.c127
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.h17
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c981
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c1645
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c688
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h155
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c820
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.h19
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c191
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.h24
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c1918
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.h17
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c1597
-rw-r--r--drivers/net/ethernet/marvell/skge.c4195
-rw-r--r--drivers/net/ethernet/marvell/skge.h2579
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5164
-rw-r--r--drivers/net/ethernet/marvell/sky2.h2428
137 files changed, 138920 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
new file mode 100644
index 000000000..884d64114
--- /dev/null
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -0,0 +1,185 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell device configuration
+#
+
+config NET_VENDOR_MARVELL
+ bool "Marvell devices"
+ default y
+ depends on PCI || CPU_PXA168 || PPC32 || PLAT_ORION || INET || COMPILE_TEST
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Marvell devices. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_MARVELL
+
+config MV643XX_ETH
+ tristate "Marvell Discovery (643XX) and Orion ethernet support"
+ depends on PPC32 || PLAT_ORION || COMPILE_TEST
+ depends on INET
+ select PHYLIB
+ select MVMDIO
+ help
+ This driver supports the gigabit ethernet MACs in the
+ Marvell Discovery PPC/MIPS chipset family (MV643XX) and
+ in the Marvell Orion ARM SoC family.
+
+ Some boards that use the Discovery chipset are the Momenco
+ Ocelot C and Jaguar ATX and Pegasos II.
+
+config MVMDIO
+ tristate "Marvell MDIO interface support"
+ depends on HAS_IOMEM
+ select MDIO_DEVRES
+ select PHYLIB
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
+ Dove, Armada 370 and Armada XP).
+
+ This driver is used by the MV643XX_ETH and MVNETA drivers.
+
+config MVNETA_BM_ENABLE
+ tristate "Marvell Armada 38x/XP network interface BM support"
+ depends on MVNETA
+ depends on !64BIT
+ help
+ This driver supports auxiliary block of the network
+ interface units in the Marvell ARMADA XP and ARMADA 38x SoC
+ family, which is called buffer manager.
+
+ This driver, when enabled, strictly cooperates with mvneta
+ driver and is common for all network ports of the devices,
+ even for Armada 370 SoC, which doesn't support hardware
+ buffer management.
+
+config MVNETA
+ tristate "Marvell Armada 370/38x/XP/37xx network interface support"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ select MVMDIO
+ select PHYLINK
+ select PAGE_POOL
+ select PAGE_POOL_STATS
+ help
+ This driver supports the network interface units in the
+ Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
+ ARMADA 37xx SoC family.
+
+ Note that this driver is distinct from the mv643xx_eth
+ driver, which should be used for the older Marvell SoCs
+ (Dove, Orion, Discovery, Kirkwood).
+
+config MVNETA_BM
+ tristate
+ depends on !64BIT
+ default y if MVNETA=y && MVNETA_BM_ENABLE!=n
+ default MVNETA_BM_ENABLE
+ select HWBM
+ select GENERIC_ALLOCATOR
+ help
+ MVNETA_BM must not be 'm' if MVNETA=y, so this symbol ensures
+ that all dependencies are met.
+
+config MVPP2
+ tristate "Marvell Armada 375/7K/8K network interface support"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ select MVMDIO
+ select PHYLINK
+ select PAGE_POOL
+ help
+ This driver supports the network interface units in the
+ Marvell ARMADA 375, 7K and 8K SoCs.
+
+config MVPP2_PTP
+ bool "Marvell Armada 8K Enable PTP support"
+ depends on (PTP_1588_CLOCK = y && MVPP2 = y) || \
+ (PTP_1588_CLOCK && MVPP2 = m)
+
+config PXA168_ETH
+ tristate "Marvell pxa168 ethernet support"
+ depends on HAS_IOMEM
+ depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
+ select PHYLIB
+ help
+ This driver supports the pxa168 Ethernet ports.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pxa168_eth.
+
+config SKGE
+ tristate "Marvell Yukon Gigabit Ethernet support"
+ depends on PCI
+ select CRC32
+ help
+ This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
+ and related Gigabit Ethernet adapters. It is a new smaller driver
+ with better performance and more complete ethtool support.
+
+ It does not support the link failover and network management
+ features that "portable" vendor supplied sk98lin driver does.
+
+ This driver supports adapters based on the original Yukon chipset:
+ Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T,
+ Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872.
+
+ It does not support the newer Yukon2 chipset: a separate driver,
+ sky2, is provided for these adapters.
+
+ To compile this driver as a module, choose M here: the module
+ will be called skge. This is recommended.
+
+config SKGE_DEBUG
+ bool "Debugging interface"
+ depends on SKGE && DEBUG_FS
+ help
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/skge/ethX displays the state of the internal
+ transmit and receive rings.
+
+ If unsure, say N.
+
+config SKGE_GENESIS
+ bool "Support for older SysKonnect Genesis boards"
+ depends on SKGE
+ help
+ This enables support for the older and uncommon SysKonnect Genesis
+ chips, which support MII via an external transceiver, instead of
+ an internal one. Disabling this option will save some memory
+ by making code smaller. If unsure say Y.
+
+config SKY2
+ tristate "Marvell Yukon 2 support"
+ depends on PCI
+ select CRC32
+ help
+ This driver supports Gigabit Ethernet adapters based on the
+ Marvell Yukon 2 chipset:
+ Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/
+ 88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21
+
+ There is companion driver for the older Marvell Yukon and
+ SysKonnect Genesis based adapters: skge.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sky2. This is recommended.
+
+config SKY2_DEBUG
+ bool "Debugging interface"
+ depends on SKY2 && DEBUG_FS
+ help
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/sky2/ethX displays the state of the internal
+ transmit and receive rings.
+
+ If unsure, say N.
+
+
+source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
+source "drivers/net/ethernet/marvell/prestera/Kconfig"
+
+endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
new file mode 100644
index 000000000..ceba4aa4f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Marvell device drivers.
+#
+
+obj-$(CONFIG_MVMDIO) += mvmdio.o
+obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o
+obj-$(CONFIG_MVNETA) += mvneta.o
+obj-$(CONFIG_MVPP2) += mvpp2/
+obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
+obj-$(CONFIG_SKGE) += skge.o
+obj-$(CONFIG_SKY2) += sky2.o
+obj-y += octeon_ep/
+obj-y += octeontx2/
+obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
new file mode 100644
index 000000000..3b129a1c3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -0,0 +1,3343 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
+ * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
+ *
+ * Based on the 64360 driver from:
+ * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
+ * Rabeeh Khoury <rabeeh@marvell.com>
+ *
+ * Copyright (C) 2003 PMC-Sierra, Inc.,
+ * written by Manish Lachwani
+ *
+ * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
+ *
+ * Copyright (C) 2004-2006 MontaVista Software, Inc.
+ * Dale Farnsworth <dale@farnsworth.org>
+ *
+ * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
+ * <sjhill@realitydiluted.com>
+ *
+ * Copyright (C) 2007-2008 Marvell Semiconductor
+ * Lennert Buytenhek <buytenh@marvell.com>
+ *
+ * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <net/tso.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+
+static char mv643xx_eth_driver_name[] = "mv643xx_eth";
+static char mv643xx_eth_driver_version[] = "1.4";
+
+
+/*
+ * Registers shared between all ports.
+ */
+#define PHY_ADDR 0x0000
+#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
+#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
+#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
+#define WINDOW_BAR_ENABLE 0x0290
+#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
+
+/*
+ * Main per-port registers. These live at offset 0x0400 for
+ * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
+ */
+#define PORT_CONFIG 0x0000
+#define UNICAST_PROMISCUOUS_MODE 0x00000001
+#define PORT_CONFIG_EXT 0x0004
+#define MAC_ADDR_LOW 0x0014
+#define MAC_ADDR_HIGH 0x0018
+#define SDMA_CONFIG 0x001c
+#define TX_BURST_SIZE_16_64BIT 0x01000000
+#define TX_BURST_SIZE_4_64BIT 0x00800000
+#define BLM_TX_NO_SWAP 0x00000020
+#define BLM_RX_NO_SWAP 0x00000010
+#define RX_BURST_SIZE_16_64BIT 0x00000008
+#define RX_BURST_SIZE_4_64BIT 0x00000004
+#define PORT_SERIAL_CONTROL 0x003c
+#define SET_MII_SPEED_TO_100 0x01000000
+#define SET_GMII_SPEED_TO_1000 0x00800000
+#define SET_FULL_DUPLEX_MODE 0x00200000
+#define MAX_RX_PACKET_9700BYTE 0x000a0000
+#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
+#define DO_NOT_FORCE_LINK_FAIL 0x00000400
+#define SERIAL_PORT_CONTROL_RESERVED 0x00000200
+#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
+#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
+#define FORCE_LINK_PASS 0x00000002
+#define SERIAL_PORT_ENABLE 0x00000001
+#define PORT_STATUS 0x0044
+#define TX_FIFO_EMPTY 0x00000400
+#define TX_IN_PROGRESS 0x00000080
+#define PORT_SPEED_MASK 0x00000030
+#define PORT_SPEED_1000 0x00000010
+#define PORT_SPEED_100 0x00000020
+#define PORT_SPEED_10 0x00000000
+#define FLOW_CONTROL_ENABLED 0x00000008
+#define FULL_DUPLEX 0x00000004
+#define LINK_UP 0x00000002
+#define TXQ_COMMAND 0x0048
+#define TXQ_FIX_PRIO_CONF 0x004c
+#define PORT_SERIAL_CONTROL1 0x004c
+#define RGMII_EN 0x00000008
+#define CLK125_BYPASS_EN 0x00000010
+#define TX_BW_RATE 0x0050
+#define TX_BW_MTU 0x0058
+#define TX_BW_BURST 0x005c
+#define INT_CAUSE 0x0060
+#define INT_TX_END 0x07f80000
+#define INT_TX_END_0 0x00080000
+#define INT_RX 0x000003fc
+#define INT_RX_0 0x00000004
+#define INT_EXT 0x00000002
+#define INT_CAUSE_EXT 0x0064
+#define INT_EXT_LINK_PHY 0x00110000
+#define INT_EXT_TX 0x000000ff
+#define INT_MASK 0x0068
+#define INT_MASK_EXT 0x006c
+#define TX_FIFO_URGENT_THRESHOLD 0x0074
+#define RX_DISCARD_FRAME_CNT 0x0084
+#define RX_OVERRUN_FRAME_CNT 0x0088
+#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
+#define TX_BW_RATE_MOVED 0x00e0
+#define TX_BW_MTU_MOVED 0x00e8
+#define TX_BW_BURST_MOVED 0x00ec
+#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
+#define RXQ_COMMAND 0x0280
+#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
+#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
+#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
+#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
+
+/*
+ * Misc per-port registers.
+ */
+#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
+#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
+#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
+#define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
+
+
+/*
+ * SDMA configuration register default value.
+ */
+#if defined(__BIG_ENDIAN)
+#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
+ (RX_BURST_SIZE_4_64BIT | \
+ TX_BURST_SIZE_4_64BIT)
+#elif defined(__LITTLE_ENDIAN)
+#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
+ (RX_BURST_SIZE_4_64BIT | \
+ BLM_RX_NO_SWAP | \
+ BLM_TX_NO_SWAP | \
+ TX_BURST_SIZE_4_64BIT)
+#else
+#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
+#endif
+
+
+/*
+ * Misc definitions.
+ */
+#define DEFAULT_RX_QUEUE_SIZE 128
+#define DEFAULT_TX_QUEUE_SIZE 512
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+/* Max number of allowed TCP segments for software TSO */
+#define MV643XX_MAX_TSO_SEGS 100
+#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_dma) && \
+ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
+/*
+ * RX/TX descriptors.
+ */
+#if defined(__BIG_ENDIAN)
+struct rx_desc {
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 buf_size; /* Buffer size */
+ u32 cmd_sts; /* Descriptor command status */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+};
+
+struct tx_desc {
+ u16 byte_cnt; /* buffer byte count */
+ u16 l4i_chk; /* CPU provided TCP checksum */
+ u32 cmd_sts; /* Command/status field */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+ u32 buf_ptr; /* pointer to buffer for this descriptor*/
+};
+#elif defined(__LITTLE_ENDIAN)
+struct rx_desc {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 buf_size; /* Buffer size */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+};
+
+struct tx_desc {
+ u32 cmd_sts; /* Command/status field */
+ u16 l4i_chk; /* CPU provided TCP checksum */
+ u16 byte_cnt; /* buffer byte count */
+ u32 buf_ptr; /* pointer to buffer for this descriptor*/
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+};
+#else
+#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
+#endif
+
+/* RX & TX descriptor command */
+#define BUFFER_OWNED_BY_DMA 0x80000000
+
+/* RX & TX descriptor status */
+#define ERROR_SUMMARY 0x00000001
+
+/* RX descriptor status */
+#define LAYER_4_CHECKSUM_OK 0x40000000
+#define RX_ENABLE_INTERRUPT 0x20000000
+#define RX_FIRST_DESC 0x08000000
+#define RX_LAST_DESC 0x04000000
+#define RX_IP_HDR_OK 0x02000000
+#define RX_PKT_IS_IPV4 0x01000000
+#define RX_PKT_IS_ETHERNETV2 0x00800000
+#define RX_PKT_LAYER4_TYPE_MASK 0x00600000
+#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
+#define RX_PKT_IS_VLAN_TAGGED 0x00080000
+
+/* TX descriptor command */
+#define TX_ENABLE_INTERRUPT 0x00800000
+#define GEN_CRC 0x00400000
+#define TX_FIRST_DESC 0x00200000
+#define TX_LAST_DESC 0x00100000
+#define ZERO_PADDING 0x00080000
+#define GEN_IP_V4_CHECKSUM 0x00040000
+#define GEN_TCP_UDP_CHECKSUM 0x00020000
+#define UDP_FRAME 0x00010000
+#define MAC_HDR_EXTRA_4_BYTES 0x00008000
+#define GEN_TCP_UDP_CHK_FULL 0x00000400
+#define MAC_HDR_EXTRA_8_BYTES 0x00000200
+
+#define TX_IHL_SHIFT 11
+
+
+/* global *******************************************************************/
+struct mv643xx_eth_shared_private {
+ /*
+ * Ethernet controller base address.
+ */
+ void __iomem *base;
+
+ /*
+ * Per-port MBUS window access register value.
+ */
+ u32 win_protect;
+
+ /*
+ * Hardware-specific parameters.
+ */
+ int extended_rx_coal_limit;
+ int tx_bw_control;
+ int tx_csum_limit;
+ struct clk *clk;
+};
+
+#define TX_BW_CONTROL_ABSENT 0
+#define TX_BW_CONTROL_OLD_LAYOUT 1
+#define TX_BW_CONTROL_NEW_LAYOUT 2
+
+static int mv643xx_eth_open(struct net_device *dev);
+static int mv643xx_eth_stop(struct net_device *dev);
+
+
+/* per-port *****************************************************************/
+struct mib_counters {
+ u64 good_octets_received;
+ u32 bad_octets_received;
+ u32 internal_mac_transmit_err;
+ u32 good_frames_received;
+ u32 bad_frames_received;
+ u32 broadcast_frames_received;
+ u32 multicast_frames_received;
+ u32 frames_64_octets;
+ u32 frames_65_to_127_octets;
+ u32 frames_128_to_255_octets;
+ u32 frames_256_to_511_octets;
+ u32 frames_512_to_1023_octets;
+ u32 frames_1024_to_max_octets;
+ u64 good_octets_sent;
+ u32 good_frames_sent;
+ u32 excessive_collision;
+ u32 multicast_frames_sent;
+ u32 broadcast_frames_sent;
+ u32 unrec_mac_control_received;
+ u32 fc_sent;
+ u32 good_fc_received;
+ u32 bad_fc_received;
+ u32 undersize_received;
+ u32 fragments_received;
+ u32 oversize_received;
+ u32 jabber_received;
+ u32 mac_receive_error;
+ u32 bad_crc_event;
+ u32 collision;
+ u32 late_collision;
+ /* Non MIB hardware counters */
+ u32 rx_discard;
+ u32 rx_overrun;
+};
+
+struct rx_queue {
+ int index;
+
+ int rx_ring_size;
+
+ int rx_desc_count;
+ int rx_curr_desc;
+ int rx_used_desc;
+
+ struct rx_desc *rx_desc_area;
+ dma_addr_t rx_desc_dma;
+ int rx_desc_area_size;
+ struct sk_buff **rx_skb;
+};
+
+struct tx_queue {
+ int index;
+
+ int tx_ring_size;
+
+ int tx_desc_count;
+ int tx_curr_desc;
+ int tx_used_desc;
+
+ int tx_stop_threshold;
+ int tx_wake_threshold;
+
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
+
+ struct tx_desc *tx_desc_area;
+ char *tx_desc_mapping; /* array to track the type of the dma mapping */
+ dma_addr_t tx_desc_dma;
+ int tx_desc_area_size;
+
+ struct sk_buff_head tx_skb;
+
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+ unsigned long tx_dropped;
+};
+
+struct mv643xx_eth_private {
+ struct mv643xx_eth_shared_private *shared;
+ void __iomem *base;
+ int port_num;
+
+ struct net_device *dev;
+
+ struct timer_list mib_counters_timer;
+ spinlock_t mib_counters_lock;
+ struct mib_counters mib_counters;
+
+ struct work_struct tx_timeout_task;
+
+ struct napi_struct napi;
+ u32 int_mask;
+ u8 oom;
+ u8 work_link;
+ u8 work_tx;
+ u8 work_tx_end;
+ u8 work_rx;
+ u8 work_rx_refill;
+
+ int skb_size;
+
+ /*
+ * RX state.
+ */
+ int rx_ring_size;
+ unsigned long rx_desc_sram_addr;
+ int rx_desc_sram_size;
+ int rxq_count;
+ struct timer_list rx_oom;
+ struct rx_queue rxq[8];
+
+ /*
+ * TX state.
+ */
+ int tx_ring_size;
+ unsigned long tx_desc_sram_addr;
+ int tx_desc_sram_size;
+ int txq_count;
+ struct tx_queue txq[8];
+
+ /*
+ * Hardware-specific parameters.
+ */
+ struct clk *clk;
+ unsigned int t_clk;
+};
+
+
+/* port register accessors **************************************************/
+static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
+{
+ return readl(mp->shared->base + offset);
+}
+
+static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
+{
+ return readl(mp->base + offset);
+}
+
+static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
+{
+ writel(data, mp->shared->base + offset);
+}
+
+static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
+{
+ writel(data, mp->base + offset);
+}
+
+
+/* rxq/txq helper functions *************************************************/
+static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
+{
+ return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
+}
+
+static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
+{
+ return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
+}
+
+static void rxq_enable(struct rx_queue *rxq)
+{
+ struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+ wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
+}
+
+static void rxq_disable(struct rx_queue *rxq)
+{
+ struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+ u8 mask = 1 << rxq->index;
+
+ wrlp(mp, RXQ_COMMAND, mask << 8);
+ while (rdlp(mp, RXQ_COMMAND) & mask)
+ udelay(10);
+}
+
+static void txq_reset_hw_ptr(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ u32 addr;
+
+ addr = (u32)txq->tx_desc_dma;
+ addr += txq->tx_curr_desc * sizeof(struct tx_desc);
+ wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
+}
+
+static void txq_enable(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ wrlp(mp, TXQ_COMMAND, 1 << txq->index);
+}
+
+static void txq_disable(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ u8 mask = 1 << txq->index;
+
+ wrlp(mp, TXQ_COMMAND, mask << 8);
+ while (rdlp(mp, TXQ_COMMAND) & mask)
+ udelay(10);
+}
+
+static void txq_maybe_wake(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+
+ if (netif_tx_queue_stopped(nq)) {
+ __netif_tx_lock(nq, smp_processor_id());
+ if (txq->tx_desc_count <= txq->tx_wake_threshold)
+ netif_tx_wake_queue(nq);
+ __netif_tx_unlock(nq);
+ }
+}
+
+static int rxq_process(struct rx_queue *rxq, int budget)
+{
+ struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+ struct net_device_stats *stats = &mp->dev->stats;
+ int rx;
+
+ rx = 0;
+ while (rx < budget && rxq->rx_desc_count) {
+ struct rx_desc *rx_desc;
+ unsigned int cmd_sts;
+ struct sk_buff *skb;
+ u16 byte_cnt;
+
+ rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
+
+ cmd_sts = rx_desc->cmd_sts;
+ if (cmd_sts & BUFFER_OWNED_BY_DMA)
+ break;
+ rmb();
+
+ skb = rxq->rx_skb[rxq->rx_curr_desc];
+ rxq->rx_skb[rxq->rx_curr_desc] = NULL;
+
+ rxq->rx_curr_desc++;
+ if (rxq->rx_curr_desc == rxq->rx_ring_size)
+ rxq->rx_curr_desc = 0;
+
+ dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
+ rx_desc->buf_size, DMA_FROM_DEVICE);
+ rxq->rx_desc_count--;
+ rx++;
+
+ mp->work_rx_refill |= 1 << rxq->index;
+
+ byte_cnt = rx_desc->byte_cnt;
+
+ /*
+ * Update statistics.
+ *
+ * Note that the descriptor byte count includes 2 dummy
+ * bytes automatically inserted by the hardware at the
+ * start of the packet (which we don't count), and a 4
+ * byte CRC at the end of the packet (which we do count).
+ */
+ stats->rx_packets++;
+ stats->rx_bytes += byte_cnt - 2;
+
+ /*
+ * In case we received a packet without first / last bits
+ * on, or the error summary bit is set, the packet needs
+ * to be dropped.
+ */
+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
+ != (RX_FIRST_DESC | RX_LAST_DESC))
+ goto err;
+
+ /*
+ * The -4 is for the CRC in the trailer of the
+ * received packet
+ */
+ skb_put(skb, byte_cnt - 2 - 4);
+
+ if (cmd_sts & LAYER_4_CHECKSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->protocol = eth_type_trans(skb, mp->dev);
+
+ napi_gro_receive(&mp->napi, skb);
+
+ continue;
+
+err:
+ stats->rx_dropped++;
+
+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC)) {
+ if (net_ratelimit())
+ netdev_err(mp->dev,
+ "received packet spanning multiple descriptors\n");
+ }
+
+ if (cmd_sts & ERROR_SUMMARY)
+ stats->rx_errors++;
+
+ dev_kfree_skb(skb);
+ }
+
+ if (rx < budget)
+ mp->work_rx &= ~(1 << rxq->index);
+
+ return rx;
+}
+
+static int rxq_refill(struct rx_queue *rxq, int budget)
+{
+ struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+ int refilled;
+
+ refilled = 0;
+ while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
+ struct sk_buff *skb;
+ int rx;
+ struct rx_desc *rx_desc;
+ int size;
+
+ skb = netdev_alloc_skb(mp->dev, mp->skb_size);
+
+ if (skb == NULL) {
+ mp->oom = 1;
+ goto oom;
+ }
+
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
+
+ refilled++;
+ rxq->rx_desc_count++;
+
+ rx = rxq->rx_used_desc++;
+ if (rxq->rx_used_desc == rxq->rx_ring_size)
+ rxq->rx_used_desc = 0;
+
+ rx_desc = rxq->rx_desc_area + rx;
+
+ size = skb_end_pointer(skb) - skb->data;
+ rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
+ skb->data, size,
+ DMA_FROM_DEVICE);
+ rx_desc->buf_size = size;
+ rxq->rx_skb[rx] = skb;
+ wmb();
+ rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
+ wmb();
+
+ /*
+ * The hardware automatically prepends 2 bytes of
+ * dummy data to each received packet, so that the
+ * IP header ends up 16-byte aligned.
+ */
+ skb_reserve(skb, 2);
+ }
+
+ if (refilled < budget)
+ mp->work_rx_refill &= ~(1 << rxq->index);
+
+oom:
+ return refilled;
+}
+
+
+/* tx ***********************************************************************/
+static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
+{
+ int frag;
+
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+
+ if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
+ u16 *l4i_chk, u32 *command, int length)
+{
+ int ret;
+ u32 cmd = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int hdr_len;
+ int tag_bytes;
+
+ BUG_ON(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_8021Q));
+
+ hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
+ tag_bytes = hdr_len - ETH_HLEN;
+
+ if (length - hdr_len > mp->shared->tx_csum_limit ||
+ unlikely(tag_bytes & ~12)) {
+ ret = skb_checksum_help(skb);
+ if (!ret)
+ goto no_csum;
+ return ret;
+ }
+
+ if (tag_bytes & 4)
+ cmd |= MAC_HDR_EXTRA_4_BYTES;
+ if (tag_bytes & 8)
+ cmd |= MAC_HDR_EXTRA_8_BYTES;
+
+ cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
+ GEN_IP_V4_CHECKSUM |
+ ip_hdr(skb)->ihl << TX_IHL_SHIFT;
+
+ /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
+ * it seems we don't need to pass the initial checksum.
+ */
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ cmd |= UDP_FRAME;
+ *l4i_chk = 0;
+ break;
+ case IPPROTO_TCP:
+ *l4i_chk = 0;
+ break;
+ default:
+ WARN(1, "protocol not supported");
+ }
+ } else {
+no_csum:
+ /* Errata BTS #50, IHL must be 5 if no HW checksum */
+ cmd |= 5 << TX_IHL_SHIFT;
+ }
+ *command = cmd;
+ return 0;
+}
+
+static inline int
+txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
+ struct sk_buff *skb, char *data, int length,
+ bool last_tcp, bool is_last)
+{
+ int tx_index;
+ u32 cmd_sts;
+ struct tx_desc *desc;
+
+ tx_index = txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+ desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+
+ desc->l4i_chk = 0;
+ desc->byte_cnt = length;
+
+ if (length <= 8 && (uintptr_t)data & 0x7) {
+ /* Copy unaligned small data fragment to TSO header data area */
+ memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
+ data, length);
+ desc->buf_ptr = txq->tso_hdrs_dma
+ + tx_index * TSO_HEADER_SIZE;
+ } else {
+ /* Alignment is okay, map buffer and hand off to hardware */
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+ desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+ length, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ desc->buf_ptr))) {
+ WARN(1, "dma_map_single failed!\n");
+ return -ENOMEM;
+ }
+ }
+
+ cmd_sts = BUFFER_OWNED_BY_DMA;
+ if (last_tcp) {
+ /* last descriptor in the TCP packet */
+ cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
+ /* last descriptor in SKB */
+ if (is_last)
+ cmd_sts |= TX_ENABLE_INTERRUPT;
+ }
+ desc->cmd_sts = cmd_sts;
+ return 0;
+}
+
+static inline void
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
+ u32 *first_cmd_sts, bool first_desc)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int hdr_len = skb_tcp_all_headers(skb);
+ int tx_index;
+ struct tx_desc *desc;
+ int ret;
+ u32 cmd_csum = 0;
+ u16 l4i_chk = 0;
+ u32 cmd_sts;
+
+ tx_index = txq->tx_curr_desc;
+ desc = &txq->tx_desc_area[tx_index];
+
+ ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
+ if (ret)
+ WARN(1, "failed to prepare checksum!");
+
+ /* Should we set this? Can't use the value from skb_tx_csum()
+ * as it's not the correct initial L4 checksum to use.
+ */
+ desc->l4i_chk = 0;
+
+ desc->byte_cnt = hdr_len;
+ desc->buf_ptr = txq->tso_hdrs_dma +
+ txq->tx_curr_desc * TSO_HEADER_SIZE;
+ cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
+ GEN_CRC;
+
+ /* Defer updating the first command descriptor until all
+ * following descriptors have been written.
+ */
+ if (first_desc)
+ *first_cmd_sts = cmd_sts;
+ else
+ desc->cmd_sts = cmd_sts;
+
+ txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+}
+
+static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int hdr_len, total_len, data_left, ret;
+ int desc_count = 0;
+ struct tso_t tso;
+ struct tx_desc *first_tx_desc;
+ u32 first_cmd_sts = 0;
+
+ /* Count needed descriptors */
+ if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
+ netdev_dbg(dev, "not enough descriptors for TSO!\n");
+ return -EBUSY;
+ }
+
+ first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ bool first_desc = (desc_count == 0);
+ char *hdr;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ desc_count++;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
+ first_desc);
+
+ while (data_left > 0) {
+ int size;
+ desc_count++;
+
+ size = min_t(int, tso.size, data_left);
+ ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
+ size == data_left,
+ total_len == 0);
+ if (ret)
+ goto err_release;
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ }
+
+ __skb_queue_tail(&txq->tx_skb, skb);
+ skb_tx_timestamp(skb);
+
+ /* ensure all other descriptors are written before first cmd_sts */
+ wmb();
+ first_tx_desc->cmd_sts = first_cmd_sts;
+
+ /* clear TX_END status */
+ mp->work_tx_end &= ~(1 << txq->index);
+
+ /* ensure all descriptors are written before poking hardware */
+ wmb();
+ txq_enable(txq);
+ txq->tx_desc_count += desc_count;
+ return 0;
+err_release:
+ /* TODO: Release all used data descriptors; header descriptors must not
+ * be DMA-unmapped.
+ */
+ return ret;
+}
+
+static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int frag;
+
+ for (frag = 0; frag < nr_frags; frag++) {
+ skb_frag_t *this_frag;
+ int tx_index;
+ struct tx_desc *desc;
+
+ this_frag = &skb_shinfo(skb)->frags[frag];
+ tx_index = txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+ desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
+
+ /*
+ * The last fragment will generate an interrupt
+ * which will free the skb on TX completion.
+ */
+ if (frag == nr_frags - 1) {
+ desc->cmd_sts = BUFFER_OWNED_BY_DMA |
+ ZERO_PADDING | TX_LAST_DESC |
+ TX_ENABLE_INTERRUPT;
+ } else {
+ desc->cmd_sts = BUFFER_OWNED_BY_DMA;
+ }
+
+ desc->l4i_chk = 0;
+ desc->byte_cnt = skb_frag_size(this_frag);
+ desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+ this_frag, 0, desc->byte_cnt,
+ DMA_TO_DEVICE);
+ }
+}
+
+static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int tx_index;
+ struct tx_desc *desc;
+ u32 cmd_sts;
+ u16 l4i_chk;
+ int length, ret;
+
+ cmd_sts = 0;
+ l4i_chk = 0;
+
+ if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
+ if (net_ratelimit())
+ netdev_err(dev, "tx queue full?!\n");
+ return -EBUSY;
+ }
+
+ ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
+ if (ret)
+ return ret;
+ cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+
+ tx_index = txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+ desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+
+ if (nr_frags) {
+ txq_submit_frag_skb(txq, skb);
+ length = skb_headlen(skb);
+ } else {
+ cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
+ length = skb->len;
+ }
+
+ desc->l4i_chk = l4i_chk;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
+ length, DMA_TO_DEVICE);
+
+ __skb_queue_tail(&txq->tx_skb, skb);
+
+ skb_tx_timestamp(skb);
+
+ /* ensure all other descriptors are written before first cmd_sts */
+ wmb();
+ desc->cmd_sts = cmd_sts;
+
+ /* clear TX_END status */
+ mp->work_tx_end &= ~(1 << txq->index);
+
+ /* ensure all descriptors are written before poking hardware */
+ wmb();
+ txq_enable(txq);
+
+ txq->tx_desc_count += nr_frags + 1;
+
+ return 0;
+}
+
+static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int length, queue, ret;
+ struct tx_queue *txq;
+ struct netdev_queue *nq;
+
+ queue = skb_get_queue_mapping(skb);
+ txq = mp->txq + queue;
+ nq = netdev_get_tx_queue(dev, queue);
+
+ if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
+ netdev_printk(KERN_DEBUG, dev,
+ "failed to linearize skb with tiny unaligned fragment\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ length = skb->len;
+
+ if (skb_is_gso(skb))
+ ret = txq_submit_tso(txq, skb, dev);
+ else
+ ret = txq_submit_skb(txq, skb, dev);
+ if (!ret) {
+ txq->tx_bytes += length;
+ txq->tx_packets++;
+
+ if (txq->tx_desc_count >= txq->tx_stop_threshold)
+ netif_tx_stop_queue(nq);
+ } else {
+ txq->tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+
+/* tx napi ******************************************************************/
+static void txq_kick(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+ u32 hw_desc_ptr;
+ u32 expected_ptr;
+
+ __netif_tx_lock(nq, smp_processor_id());
+
+ if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
+ goto out;
+
+ hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
+ expected_ptr = (u32)txq->tx_desc_dma +
+ txq->tx_curr_desc * sizeof(struct tx_desc);
+
+ if (hw_desc_ptr != expected_ptr)
+ txq_enable(txq);
+
+out:
+ __netif_tx_unlock(nq);
+
+ mp->work_tx_end &= ~(1 << txq->index);
+}
+
+static int txq_reclaim(struct tx_queue *txq, int budget, int force)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+ int reclaimed;
+
+ __netif_tx_lock_bh(nq);
+
+ reclaimed = 0;
+ while (reclaimed < budget && txq->tx_desc_count > 0) {
+ int tx_index;
+ struct tx_desc *desc;
+ u32 cmd_sts;
+ char desc_dma_map;
+
+ tx_index = txq->tx_used_desc;
+ desc = &txq->tx_desc_area[tx_index];
+ desc_dma_map = txq->tx_desc_mapping[tx_index];
+
+ cmd_sts = desc->cmd_sts;
+
+ if (cmd_sts & BUFFER_OWNED_BY_DMA) {
+ if (!force)
+ break;
+ desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
+ }
+
+ txq->tx_used_desc = tx_index + 1;
+ if (txq->tx_used_desc == txq->tx_ring_size)
+ txq->tx_used_desc = 0;
+
+ reclaimed++;
+ txq->tx_desc_count--;
+
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
+
+ if (desc_dma_map == DESC_DMA_MAP_PAGE)
+ dma_unmap_page(mp->dev->dev.parent,
+ desc->buf_ptr,
+ desc->byte_cnt,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(mp->dev->dev.parent,
+ desc->buf_ptr,
+ desc->byte_cnt,
+ DMA_TO_DEVICE);
+ }
+
+ if (cmd_sts & TX_ENABLE_INTERRUPT) {
+ struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
+
+ if (!WARN_ON(!skb))
+ dev_consume_skb_any(skb);
+ }
+
+ if (cmd_sts & ERROR_SUMMARY) {
+ netdev_info(mp->dev, "tx error\n");
+ mp->dev->stats.tx_errors++;
+ }
+
+ }
+
+ __netif_tx_unlock_bh(nq);
+
+ if (reclaimed < budget)
+ mp->work_tx &= ~(1 << txq->index);
+
+ return reclaimed;
+}
+
+
+/* tx rate control **********************************************************/
+/*
+ * Set total maximum TX rate (shared by all TX queues for this port)
+ * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
+ */
+static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
+{
+ int token_rate;
+ int mtu;
+ int bucket_size;
+
+ token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
+ if (token_rate > 1023)
+ token_rate = 1023;
+
+ mtu = (mp->dev->mtu + 255) >> 8;
+ if (mtu > 63)
+ mtu = 63;
+
+ bucket_size = (burst + 255) >> 8;
+ if (bucket_size > 65535)
+ bucket_size = 65535;
+
+ switch (mp->shared->tx_bw_control) {
+ case TX_BW_CONTROL_OLD_LAYOUT:
+ wrlp(mp, TX_BW_RATE, token_rate);
+ wrlp(mp, TX_BW_MTU, mtu);
+ wrlp(mp, TX_BW_BURST, bucket_size);
+ break;
+ case TX_BW_CONTROL_NEW_LAYOUT:
+ wrlp(mp, TX_BW_RATE_MOVED, token_rate);
+ wrlp(mp, TX_BW_MTU_MOVED, mtu);
+ wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
+ break;
+ }
+}
+
+static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int token_rate;
+ int bucket_size;
+
+ token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
+ if (token_rate > 1023)
+ token_rate = 1023;
+
+ bucket_size = (burst + 255) >> 8;
+ if (bucket_size > 65535)
+ bucket_size = 65535;
+
+ wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
+ wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
+}
+
+static void txq_set_fixed_prio_mode(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int off;
+ u32 val;
+
+ /*
+ * Turn on fixed priority mode.
+ */
+ off = 0;
+ switch (mp->shared->tx_bw_control) {
+ case TX_BW_CONTROL_OLD_LAYOUT:
+ off = TXQ_FIX_PRIO_CONF;
+ break;
+ case TX_BW_CONTROL_NEW_LAYOUT:
+ off = TXQ_FIX_PRIO_CONF_MOVED;
+ break;
+ }
+
+ if (off) {
+ val = rdlp(mp, off);
+ val |= 1 << txq->index;
+ wrlp(mp, off, val);
+ }
+}
+
+
+/* mii management interface *************************************************/
+static void mv643xx_eth_adjust_link(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
+ u32 autoneg_disable = FORCE_LINK_PASS |
+ DISABLE_AUTO_NEG_SPEED_GMII |
+ DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
+ DISABLE_AUTO_NEG_FOR_DUPLEX;
+
+ if (dev->phydev->autoneg == AUTONEG_ENABLE) {
+ /* enable auto negotiation */
+ pscr &= ~autoneg_disable;
+ goto out_write;
+ }
+
+ pscr |= autoneg_disable;
+
+ if (dev->phydev->speed == SPEED_1000) {
+ /* force gigabit, half duplex not supported */
+ pscr |= SET_GMII_SPEED_TO_1000;
+ pscr |= SET_FULL_DUPLEX_MODE;
+ goto out_write;
+ }
+
+ pscr &= ~SET_GMII_SPEED_TO_1000;
+
+ if (dev->phydev->speed == SPEED_100)
+ pscr |= SET_MII_SPEED_TO_100;
+ else
+ pscr &= ~SET_MII_SPEED_TO_100;
+
+ if (dev->phydev->duplex == DUPLEX_FULL)
+ pscr |= SET_FULL_DUPLEX_MODE;
+ else
+ pscr &= ~SET_FULL_DUPLEX_MODE;
+
+out_write:
+ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+}
+
+/* statistics ***************************************************************/
+static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned long tx_packets = 0;
+ unsigned long tx_bytes = 0;
+ unsigned long tx_dropped = 0;
+ int i;
+
+ for (i = 0; i < mp->txq_count; i++) {
+ struct tx_queue *txq = mp->txq + i;
+
+ tx_packets += txq->tx_packets;
+ tx_bytes += txq->tx_bytes;
+ tx_dropped += txq->tx_dropped;
+ }
+
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->tx_dropped = tx_dropped;
+
+ return stats;
+}
+
+static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
+{
+ return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
+}
+
+static void mib_counters_clear(struct mv643xx_eth_private *mp)
+{
+ int i;
+
+ for (i = 0; i < 0x80; i += 4)
+ mib_read(mp, i);
+
+ /* Clear non MIB hw counters also */
+ rdlp(mp, RX_DISCARD_FRAME_CNT);
+ rdlp(mp, RX_OVERRUN_FRAME_CNT);
+}
+
+static void mib_counters_update(struct mv643xx_eth_private *mp)
+{
+ struct mib_counters *p = &mp->mib_counters;
+
+ spin_lock_bh(&mp->mib_counters_lock);
+ p->good_octets_received += mib_read(mp, 0x00);
+ p->bad_octets_received += mib_read(mp, 0x08);
+ p->internal_mac_transmit_err += mib_read(mp, 0x0c);
+ p->good_frames_received += mib_read(mp, 0x10);
+ p->bad_frames_received += mib_read(mp, 0x14);
+ p->broadcast_frames_received += mib_read(mp, 0x18);
+ p->multicast_frames_received += mib_read(mp, 0x1c);
+ p->frames_64_octets += mib_read(mp, 0x20);
+ p->frames_65_to_127_octets += mib_read(mp, 0x24);
+ p->frames_128_to_255_octets += mib_read(mp, 0x28);
+ p->frames_256_to_511_octets += mib_read(mp, 0x2c);
+ p->frames_512_to_1023_octets += mib_read(mp, 0x30);
+ p->frames_1024_to_max_octets += mib_read(mp, 0x34);
+ p->good_octets_sent += mib_read(mp, 0x38);
+ p->good_frames_sent += mib_read(mp, 0x40);
+ p->excessive_collision += mib_read(mp, 0x44);
+ p->multicast_frames_sent += mib_read(mp, 0x48);
+ p->broadcast_frames_sent += mib_read(mp, 0x4c);
+ p->unrec_mac_control_received += mib_read(mp, 0x50);
+ p->fc_sent += mib_read(mp, 0x54);
+ p->good_fc_received += mib_read(mp, 0x58);
+ p->bad_fc_received += mib_read(mp, 0x5c);
+ p->undersize_received += mib_read(mp, 0x60);
+ p->fragments_received += mib_read(mp, 0x64);
+ p->oversize_received += mib_read(mp, 0x68);
+ p->jabber_received += mib_read(mp, 0x6c);
+ p->mac_receive_error += mib_read(mp, 0x70);
+ p->bad_crc_event += mib_read(mp, 0x74);
+ p->collision += mib_read(mp, 0x78);
+ p->late_collision += mib_read(mp, 0x7c);
+ /* Non MIB hardware counters */
+ p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
+ p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
+ spin_unlock_bh(&mp->mib_counters_lock);
+}
+
+static void mib_counters_timer_wrapper(struct timer_list *t)
+{
+ struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
+ mib_counters_update(mp);
+ mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
+}
+
+
+/* interrupt coalescing *****************************************************/
+/*
+ * Hardware coalescing parameters are set in units of 64 t_clk
+ * cycles. I.e.:
+ *
+ * coal_delay_in_usec = 64000000 * register_value / t_clk_rate
+ *
+ * register_value = coal_delay_in_usec * t_clk_rate / 64000000
+ *
+ * In the ->set*() methods, we round the computed register value
+ * to the nearest integer.
+ */
+static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
+{
+ u32 val = rdlp(mp, SDMA_CONFIG);
+ u64 temp;
+
+ if (mp->shared->extended_rx_coal_limit)
+ temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
+ else
+ temp = (val & 0x003fff00) >> 8;
+
+ temp *= 64000000;
+ temp += mp->t_clk / 2;
+ do_div(temp, mp->t_clk);
+
+ return (unsigned int)temp;
+}
+
+static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
+{
+ u64 temp;
+ u32 val;
+
+ temp = (u64)usec * mp->t_clk;
+ temp += 31999999;
+ do_div(temp, 64000000);
+
+ val = rdlp(mp, SDMA_CONFIG);
+ if (mp->shared->extended_rx_coal_limit) {
+ if (temp > 0xffff)
+ temp = 0xffff;
+ val &= ~0x023fff80;
+ val |= (temp & 0x8000) << 10;
+ val |= (temp & 0x7fff) << 7;
+ } else {
+ if (temp > 0x3fff)
+ temp = 0x3fff;
+ val &= ~0x003fff00;
+ val |= (temp & 0x3fff) << 8;
+ }
+ wrlp(mp, SDMA_CONFIG, val);
+}
+
+static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
+{
+ u64 temp;
+
+ temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
+ temp *= 64000000;
+ temp += mp->t_clk / 2;
+ do_div(temp, mp->t_clk);
+
+ return (unsigned int)temp;
+}
+
+static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
+{
+ u64 temp;
+
+ temp = (u64)usec * mp->t_clk;
+ temp += 31999999;
+ do_div(temp, 64000000);
+
+ if (temp > 0x3fff)
+ temp = 0x3fff;
+
+ wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
+}
+
+
+/* ethtool ******************************************************************/
+struct mv643xx_eth_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int netdev_off;
+ int mp_off;
+};
+
+#define SSTAT(m) \
+ { #m, sizeof_field(struct net_device_stats, m), \
+ offsetof(struct net_device, stats.m), -1 }
+
+#define MIBSTAT(m) \
+ { #m, sizeof_field(struct mib_counters, m), \
+ -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
+
+static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
+ SSTAT(rx_packets),
+ SSTAT(tx_packets),
+ SSTAT(rx_bytes),
+ SSTAT(tx_bytes),
+ SSTAT(rx_errors),
+ SSTAT(tx_errors),
+ SSTAT(rx_dropped),
+ SSTAT(tx_dropped),
+ MIBSTAT(good_octets_received),
+ MIBSTAT(bad_octets_received),
+ MIBSTAT(internal_mac_transmit_err),
+ MIBSTAT(good_frames_received),
+ MIBSTAT(bad_frames_received),
+ MIBSTAT(broadcast_frames_received),
+ MIBSTAT(multicast_frames_received),
+ MIBSTAT(frames_64_octets),
+ MIBSTAT(frames_65_to_127_octets),
+ MIBSTAT(frames_128_to_255_octets),
+ MIBSTAT(frames_256_to_511_octets),
+ MIBSTAT(frames_512_to_1023_octets),
+ MIBSTAT(frames_1024_to_max_octets),
+ MIBSTAT(good_octets_sent),
+ MIBSTAT(good_frames_sent),
+ MIBSTAT(excessive_collision),
+ MIBSTAT(multicast_frames_sent),
+ MIBSTAT(broadcast_frames_sent),
+ MIBSTAT(unrec_mac_control_received),
+ MIBSTAT(fc_sent),
+ MIBSTAT(good_fc_received),
+ MIBSTAT(bad_fc_received),
+ MIBSTAT(undersize_received),
+ MIBSTAT(fragments_received),
+ MIBSTAT(oversize_received),
+ MIBSTAT(jabber_received),
+ MIBSTAT(mac_receive_error),
+ MIBSTAT(bad_crc_event),
+ MIBSTAT(collision),
+ MIBSTAT(late_collision),
+ MIBSTAT(rx_discard),
+ MIBSTAT(rx_overrun),
+};
+
+static int
+mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct net_device *dev = mp->dev;
+
+ phy_ethtool_ksettings_get(dev->phydev, cmd);
+
+ /*
+ * The MAC does not support 1000baseT_Half.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.advertising);
+
+ return 0;
+}
+
+static int
+mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 port_status;
+ u32 supported, advertising;
+
+ port_status = rdlp(mp, PORT_STATUS);
+
+ supported = SUPPORTED_MII;
+ advertising = ADVERTISED_MII;
+ switch (port_status & PORT_SPEED_MASK) {
+ case PORT_SPEED_10:
+ cmd->base.speed = SPEED_10;
+ break;
+ case PORT_SPEED_100:
+ cmd->base.speed = SPEED_100;
+ break;
+ case PORT_SPEED_1000:
+ cmd->base.speed = SPEED_1000;
+ break;
+ default:
+ cmd->base.speed = -1;
+ break;
+ }
+ cmd->base.duplex = (port_status & FULL_DUPLEX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
+ return 0;
+}
+
+static void
+mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ if (dev->phydev)
+ phy_ethtool_get_wol(dev->phydev, wol);
+}
+
+static int
+mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ int err;
+
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+
+ err = phy_ethtool_set_wol(dev->phydev, wol);
+ /* Given that mv643xx_eth works without the marvell-specific PHY driver,
+ * this debugging hint is useful to have.
+ */
+ if (err == -EOPNOTSUPP)
+ netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
+ return err;
+}
+
+static int
+mv643xx_eth_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (dev->phydev)
+ return mv643xx_eth_get_link_ksettings_phy(mp, cmd);
+ else
+ return mv643xx_eth_get_link_ksettings_phyless(mp, cmd);
+}
+
+static int
+mv643xx_eth_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct ethtool_link_ksettings c = *cmd;
+ u32 advertising;
+ int ret;
+
+ if (!dev->phydev)
+ return -EINVAL;
+
+ /*
+ * The MAC does not support 1000baseT_Half.
+ */
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ c.link_modes.advertising);
+ advertising &= ~ADVERTISED_1000baseT_Half;
+ ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising,
+ advertising);
+
+ ret = phy_ethtool_ksettings_set(dev->phydev, &c);
+ if (!ret)
+ mv643xx_eth_adjust_link(dev);
+ return ret;
+}
+
+static void mv643xx_eth_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, mv643xx_eth_driver_name,
+ sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, mv643xx_eth_driver_version,
+ sizeof(drvinfo->version));
+ strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
+}
+
+static int mv643xx_eth_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ ec->rx_coalesce_usecs = get_rx_coal(mp);
+ ec->tx_coalesce_usecs = get_tx_coal(mp);
+
+ return 0;
+}
+
+static int mv643xx_eth_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ set_rx_coal(mp, ec->rx_coalesce_usecs);
+ set_tx_coal(mp, ec->tx_coalesce_usecs);
+
+ return 0;
+}
+
+static void
+mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
+ struct kernel_ethtool_ringparam *kernel_er,
+ struct netlink_ext_ack *extack)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ er->rx_max_pending = 4096;
+ er->tx_max_pending = 4096;
+
+ er->rx_pending = mp->rx_ring_size;
+ er->tx_pending = mp->tx_ring_size;
+}
+
+static int
+mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
+ struct kernel_ethtool_ringparam *kernel_er,
+ struct netlink_ext_ack *extack)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (er->rx_mini_pending || er->rx_jumbo_pending)
+ return -EINVAL;
+
+ mp->rx_ring_size = min(er->rx_pending, 4096U);
+ mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
+ MV643XX_MAX_SKB_DESCS * 2, 4096);
+ if (mp->tx_ring_size != er->tx_pending)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ mp->tx_ring_size, er->tx_pending);
+
+ if (netif_running(dev)) {
+ mv643xx_eth_stop(dev);
+ if (mv643xx_eth_open(dev)) {
+ netdev_err(dev,
+ "fatal error on re-opening device after ring param change\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+
+static int
+mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ bool rx_csum = features & NETIF_F_RXCSUM;
+
+ wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
+
+ return 0;
+}
+
+static void mv643xx_eth_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ int i;
+
+ if (stringset == ETH_SS_STATS) {
+ for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mv643xx_eth_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ }
+}
+
+static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int i;
+
+ mv643xx_eth_get_stats(dev);
+ mib_counters_update(mp);
+
+ for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
+ const struct mv643xx_eth_stats *stat;
+ void *p;
+
+ stat = mv643xx_eth_stats + i;
+
+ if (stat->netdev_off >= 0)
+ p = ((void *)mp->dev) + stat->netdev_off;
+ else
+ p = ((void *)mp) + stat->mp_off;
+
+ data[i] = (stat->sizeof_stat == 8) ?
+ *(uint64_t *)p : *(uint32_t *)p;
+ }
+}
+
+static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(mv643xx_eth_stats);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = mv643xx_eth_get_drvinfo,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = mv643xx_eth_get_coalesce,
+ .set_coalesce = mv643xx_eth_set_coalesce,
+ .get_ringparam = mv643xx_eth_get_ringparam,
+ .set_ringparam = mv643xx_eth_set_ringparam,
+ .get_strings = mv643xx_eth_get_strings,
+ .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
+ .get_sset_count = mv643xx_eth_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_wol = mv643xx_eth_get_wol,
+ .set_wol = mv643xx_eth_set_wol,
+ .get_link_ksettings = mv643xx_eth_get_link_ksettings,
+ .set_link_ksettings = mv643xx_eth_set_link_ksettings,
+};
+
+
+/* address handling *********************************************************/
+static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
+{
+ unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
+ unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
+
+ addr[0] = (mac_h >> 24) & 0xff;
+ addr[1] = (mac_h >> 16) & 0xff;
+ addr[2] = (mac_h >> 8) & 0xff;
+ addr[3] = mac_h & 0xff;
+ addr[4] = (mac_l >> 8) & 0xff;
+ addr[5] = mac_l & 0xff;
+}
+
+static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr)
+{
+ wrlp(mp, MAC_ADDR_HIGH,
+ (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
+ wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
+}
+
+static u32 uc_addr_filter_mask(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ u32 nibbles;
+
+ if (dev->flags & IFF_PROMISC)
+ return 0;
+
+ nibbles = 1 << (dev->dev_addr[5] & 0x0f);
+ netdev_for_each_uc_addr(ha, dev) {
+ if (memcmp(dev->dev_addr, ha->addr, 5))
+ return 0;
+ if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
+ return 0;
+
+ nibbles |= 1 << (ha->addr[5] & 0x0f);
+ }
+
+ return nibbles;
+}
+
+static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ u32 port_config;
+ u32 nibbles;
+ int i;
+
+ uc_addr_set(mp, dev->dev_addr);
+
+ port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
+
+ nibbles = uc_addr_filter_mask(dev);
+ if (!nibbles) {
+ port_config |= UNICAST_PROMISCUOUS_MODE;
+ nibbles = 0xffff;
+ }
+
+ for (i = 0; i < 16; i += 4) {
+ int off = UNICAST_TABLE(mp->port_num) + i;
+ u32 v;
+
+ v = 0;
+ if (nibbles & 1)
+ v |= 0x00000001;
+ if (nibbles & 2)
+ v |= 0x00000100;
+ if (nibbles & 4)
+ v |= 0x00010000;
+ if (nibbles & 8)
+ v |= 0x01000000;
+ nibbles >>= 4;
+
+ wrl(mp, off, v);
+ }
+
+ wrlp(mp, PORT_CONFIG, port_config);
+}
+
+static int addr_crc(unsigned char *addr)
+{
+ int crc = 0;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ int j;
+
+ crc = (crc ^ addr[i]) << 8;
+ for (j = 7; j >= 0; j--) {
+ if (crc & (0x100 << j))
+ crc ^= 0x107 << j;
+ }
+ }
+
+ return crc;
+}
+
+static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ u32 *mc_spec;
+ u32 *mc_other;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+ goto promiscuous;
+
+ /* Allocate both mc_spec and mc_other tables */
+ mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC);
+ if (!mc_spec)
+ goto promiscuous;
+ mc_other = &mc_spec[64];
+
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *a = ha->addr;
+ u32 *table;
+ u8 entry;
+
+ if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
+ table = mc_spec;
+ entry = a[5];
+ } else {
+ table = mc_other;
+ entry = addr_crc(a);
+ }
+
+ table[entry >> 2] |= 1 << (8 * (entry & 3));
+ }
+
+ for (i = 0; i < 64; i++) {
+ wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ mc_spec[i]);
+ wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ mc_other[i]);
+ }
+
+ kfree(mc_spec);
+ return;
+
+promiscuous:
+ for (i = 0; i < 64; i++) {
+ wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ 0x01010101u);
+ wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ 0x01010101u);
+ }
+}
+
+static void mv643xx_eth_set_rx_mode(struct net_device *dev)
+{
+ mv643xx_eth_program_unicast_filter(dev);
+ mv643xx_eth_program_multicast_filter(dev);
+}
+
+static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(dev, sa->sa_data);
+
+ netif_addr_lock_bh(dev);
+ mv643xx_eth_program_unicast_filter(dev);
+ netif_addr_unlock_bh(dev);
+
+ return 0;
+}
+
+
+/* rx/tx queue initialisation ***********************************************/
+static int rxq_init(struct mv643xx_eth_private *mp, int index)
+{
+ struct rx_queue *rxq = mp->rxq + index;
+ struct rx_desc *rx_desc;
+ int size;
+ int i;
+
+ rxq->index = index;
+
+ rxq->rx_ring_size = mp->rx_ring_size;
+
+ rxq->rx_desc_count = 0;
+ rxq->rx_curr_desc = 0;
+ rxq->rx_used_desc = 0;
+
+ size = rxq->rx_ring_size * sizeof(struct rx_desc);
+
+ if (index == 0 && size <= mp->rx_desc_sram_size) {
+ rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
+ mp->rx_desc_sram_size);
+ rxq->rx_desc_dma = mp->rx_desc_sram_addr;
+ } else {
+ rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+ size, &rxq->rx_desc_dma,
+ GFP_KERNEL);
+ }
+
+ if (rxq->rx_desc_area == NULL) {
+ netdev_err(mp->dev,
+ "can't allocate rx ring (%d bytes)\n", size);
+ goto out;
+ }
+ memset(rxq->rx_desc_area, 0, size);
+
+ rxq->rx_desc_area_size = size;
+ rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
+ GFP_KERNEL);
+ if (rxq->rx_skb == NULL)
+ goto out_free;
+
+ rx_desc = rxq->rx_desc_area;
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+ int nexti;
+
+ nexti = i + 1;
+ if (nexti == rxq->rx_ring_size)
+ nexti = 0;
+
+ rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
+ nexti * sizeof(struct rx_desc);
+ }
+
+ return 0;
+
+
+out_free:
+ if (index == 0 && size <= mp->rx_desc_sram_size)
+ iounmap(rxq->rx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, size,
+ rxq->rx_desc_area,
+ rxq->rx_desc_dma);
+
+out:
+ return -ENOMEM;
+}
+
+static void rxq_deinit(struct rx_queue *rxq)
+{
+ struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+ int i;
+
+ rxq_disable(rxq);
+
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+ if (rxq->rx_skb[i]) {
+ dev_consume_skb_any(rxq->rx_skb[i]);
+ rxq->rx_desc_count--;
+ }
+ }
+
+ if (rxq->rx_desc_count) {
+ netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
+ rxq->rx_desc_count);
+ }
+
+ if (rxq->index == 0 &&
+ rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
+ iounmap(rxq->rx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
+ rxq->rx_desc_area, rxq->rx_desc_dma);
+
+ kfree(rxq->rx_skb);
+}
+
+static int txq_init(struct mv643xx_eth_private *mp, int index)
+{
+ struct tx_queue *txq = mp->txq + index;
+ struct tx_desc *tx_desc;
+ int size;
+ int ret;
+ int i;
+
+ txq->index = index;
+
+ txq->tx_ring_size = mp->tx_ring_size;
+
+ /* A queue must always have room for at least one skb.
+ * Therefore, stop the queue when the free entries reaches
+ * the maximum number of descriptors per skb.
+ */
+ txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
+ txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
+ txq->tx_desc_count = 0;
+ txq->tx_curr_desc = 0;
+ txq->tx_used_desc = 0;
+
+ size = txq->tx_ring_size * sizeof(struct tx_desc);
+
+ if (index == 0 && size <= mp->tx_desc_sram_size) {
+ txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
+ mp->tx_desc_sram_size);
+ txq->tx_desc_dma = mp->tx_desc_sram_addr;
+ } else {
+ txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+ size, &txq->tx_desc_dma,
+ GFP_KERNEL);
+ }
+
+ if (txq->tx_desc_area == NULL) {
+ netdev_err(mp->dev,
+ "can't allocate tx ring (%d bytes)\n", size);
+ return -ENOMEM;
+ }
+ memset(txq->tx_desc_area, 0, size);
+
+ txq->tx_desc_area_size = size;
+
+ tx_desc = txq->tx_desc_area;
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ struct tx_desc *txd = tx_desc + i;
+ int nexti;
+
+ nexti = i + 1;
+ if (nexti == txq->tx_ring_size)
+ nexti = 0;
+
+ txd->cmd_sts = 0;
+ txd->next_desc_ptr = txq->tx_desc_dma +
+ nexti * sizeof(struct tx_desc);
+ }
+
+ txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
+ GFP_KERNEL);
+ if (!txq->tx_desc_mapping) {
+ ret = -ENOMEM;
+ goto err_free_desc_area;
+ }
+
+ /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+ txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_dma, GFP_KERNEL);
+ if (txq->tso_hdrs == NULL) {
+ ret = -ENOMEM;
+ goto err_free_desc_mapping;
+ }
+ skb_queue_head_init(&txq->tx_skb);
+
+ return 0;
+
+err_free_desc_mapping:
+ kfree(txq->tx_desc_mapping);
+err_free_desc_area:
+ if (index == 0 && size <= mp->tx_desc_sram_size)
+ iounmap(txq->tx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+ txq->tx_desc_area, txq->tx_desc_dma);
+ return ret;
+}
+
+static void txq_deinit(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+
+ txq_disable(txq);
+ txq_reclaim(txq, txq->tx_ring_size, 1);
+
+ BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
+
+ if (txq->index == 0 &&
+ txq->tx_desc_area_size <= mp->tx_desc_sram_size)
+ iounmap(txq->tx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+ txq->tx_desc_area, txq->tx_desc_dma);
+ kfree(txq->tx_desc_mapping);
+
+ if (txq->tso_hdrs)
+ dma_free_coherent(mp->dev->dev.parent,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs, txq->tso_hdrs_dma);
+}
+
+
+/* netdev ops and related ***************************************************/
+static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
+{
+ u32 int_cause;
+ u32 int_cause_ext;
+
+ int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
+ if (int_cause == 0)
+ return 0;
+
+ int_cause_ext = 0;
+ if (int_cause & INT_EXT) {
+ int_cause &= ~INT_EXT;
+ int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
+ }
+
+ if (int_cause) {
+ wrlp(mp, INT_CAUSE, ~int_cause);
+ mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
+ ~(rdlp(mp, TXQ_COMMAND) & 0xff);
+ mp->work_rx |= (int_cause & INT_RX) >> 2;
+ }
+
+ int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
+ if (int_cause_ext) {
+ wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
+ if (int_cause_ext & INT_EXT_LINK_PHY)
+ mp->work_link = 1;
+ mp->work_tx |= int_cause_ext & INT_EXT_TX;
+ }
+
+ return 1;
+}
+
+static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (unlikely(!mv643xx_eth_collect_events(mp)))
+ return IRQ_NONE;
+
+ wrlp(mp, INT_MASK, 0);
+ napi_schedule(&mp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void handle_link_event(struct mv643xx_eth_private *mp)
+{
+ struct net_device *dev = mp->dev;
+ u32 port_status;
+ int speed;
+ int duplex;
+ int fc;
+
+ port_status = rdlp(mp, PORT_STATUS);
+ if (!(port_status & LINK_UP)) {
+ if (netif_carrier_ok(dev)) {
+ int i;
+
+ netdev_info(dev, "link down\n");
+
+ netif_carrier_off(dev);
+
+ for (i = 0; i < mp->txq_count; i++) {
+ struct tx_queue *txq = mp->txq + i;
+
+ txq_reclaim(txq, txq->tx_ring_size, 1);
+ txq_reset_hw_ptr(txq);
+ }
+ }
+ return;
+ }
+
+ switch (port_status & PORT_SPEED_MASK) {
+ case PORT_SPEED_10:
+ speed = 10;
+ break;
+ case PORT_SPEED_100:
+ speed = 100;
+ break;
+ case PORT_SPEED_1000:
+ speed = 1000;
+ break;
+ default:
+ speed = -1;
+ break;
+ }
+ duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+ fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+
+ netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
+ speed, duplex ? "full" : "half", fc ? "en" : "dis");
+
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+}
+
+static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct mv643xx_eth_private *mp;
+ int work_done;
+
+ mp = container_of(napi, struct mv643xx_eth_private, napi);
+
+ if (unlikely(mp->oom)) {
+ mp->oom = 0;
+ del_timer(&mp->rx_oom);
+ }
+
+ work_done = 0;
+ while (work_done < budget) {
+ u8 queue_mask;
+ int queue;
+ int work_tbd;
+
+ if (mp->work_link) {
+ mp->work_link = 0;
+ handle_link_event(mp);
+ work_done++;
+ continue;
+ }
+
+ queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
+ if (likely(!mp->oom))
+ queue_mask |= mp->work_rx_refill;
+
+ if (!queue_mask) {
+ if (mv643xx_eth_collect_events(mp))
+ continue;
+ break;
+ }
+
+ queue = fls(queue_mask) - 1;
+ queue_mask = 1 << queue;
+
+ work_tbd = budget - work_done;
+ if (work_tbd > 16)
+ work_tbd = 16;
+
+ if (mp->work_tx_end & queue_mask) {
+ txq_kick(mp->txq + queue);
+ } else if (mp->work_tx & queue_mask) {
+ work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
+ txq_maybe_wake(mp->txq + queue);
+ } else if (mp->work_rx & queue_mask) {
+ work_done += rxq_process(mp->rxq + queue, work_tbd);
+ } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
+ work_done += rxq_refill(mp->rxq + queue, work_tbd);
+ } else {
+ BUG();
+ }
+ }
+
+ if (work_done < budget) {
+ if (mp->oom)
+ mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
+ napi_complete_done(napi, work_done);
+ wrlp(mp, INT_MASK, mp->int_mask);
+ }
+
+ return work_done;
+}
+
+static inline void oom_timer_wrapper(struct timer_list *t)
+{
+ struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
+
+ napi_schedule(&mp->napi);
+}
+
+static void port_start(struct mv643xx_eth_private *mp)
+{
+ struct net_device *dev = mp->dev;
+ u32 pscr;
+ int i;
+
+ /*
+ * Perform PHY reset, if there is a PHY.
+ */
+ if (dev->phydev) {
+ struct ethtool_link_ksettings cmd;
+
+ mv643xx_eth_get_link_ksettings(dev, &cmd);
+ phy_init_hw(dev->phydev);
+ mv643xx_eth_set_link_ksettings(
+ dev, (const struct ethtool_link_ksettings *)&cmd);
+ phy_start(dev->phydev);
+ }
+
+ /*
+ * Configure basic link parameters.
+ */
+ pscr = rdlp(mp, PORT_SERIAL_CONTROL);
+
+ pscr |= SERIAL_PORT_ENABLE;
+ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+
+ pscr |= DO_NOT_FORCE_LINK_FAIL;
+ if (!dev->phydev)
+ pscr |= FORCE_LINK_PASS;
+ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+
+ /*
+ * Configure TX path and queues.
+ */
+ tx_set_rate(mp, 1000000000, 16777216);
+ for (i = 0; i < mp->txq_count; i++) {
+ struct tx_queue *txq = mp->txq + i;
+
+ txq_reset_hw_ptr(txq);
+ txq_set_rate(txq, 1000000000, 16777216);
+ txq_set_fixed_prio_mode(txq);
+ }
+
+ /*
+ * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
+ * frames to RX queue #0, and include the pseudo-header when
+ * calculating receive checksums.
+ */
+ mv643xx_eth_set_features(mp->dev, mp->dev->features);
+
+ /*
+ * Treat BPDUs as normal multicasts, and disable partition mode.
+ */
+ wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
+
+ /*
+ * Add configured unicast addresses to address filter table.
+ */
+ mv643xx_eth_program_unicast_filter(mp->dev);
+
+ /*
+ * Enable the receive queues.
+ */
+ for (i = 0; i < mp->rxq_count; i++) {
+ struct rx_queue *rxq = mp->rxq + i;
+ u32 addr;
+
+ addr = (u32)rxq->rx_desc_dma;
+ addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
+ wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
+
+ rxq_enable(rxq);
+ }
+}
+
+static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
+{
+ int skb_size;
+
+ /*
+ * Reserve 2+14 bytes for an ethernet header (the hardware
+ * automatically prepends 2 bytes of dummy data to each
+ * received packet), 16 bytes for up to four VLAN tags, and
+ * 4 bytes for the trailing FCS -- 36 bytes total.
+ */
+ skb_size = mp->dev->mtu + 36;
+
+ /*
+ * Make sure that the skb size is a multiple of 8 bytes, as
+ * the lower three bits of the receive descriptor's buffer
+ * size field are ignored by the hardware.
+ */
+ mp->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ mp->skb_size += SKB_DMA_REALIGN;
+}
+
+static int mv643xx_eth_open(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int err;
+ int i;
+
+ wrlp(mp, INT_CAUSE, 0);
+ wrlp(mp, INT_CAUSE_EXT, 0);
+ rdlp(mp, INT_CAUSE_EXT);
+
+ err = request_irq(dev->irq, mv643xx_eth_irq,
+ IRQF_SHARED, dev->name, dev);
+ if (err) {
+ netdev_err(dev, "can't assign irq\n");
+ return -EAGAIN;
+ }
+
+ mv643xx_eth_recalc_skb_size(mp);
+
+ napi_enable(&mp->napi);
+
+ mp->int_mask = INT_EXT;
+
+ for (i = 0; i < mp->rxq_count; i++) {
+ err = rxq_init(mp, i);
+ if (err) {
+ while (--i >= 0)
+ rxq_deinit(mp->rxq + i);
+ goto out;
+ }
+
+ rxq_refill(mp->rxq + i, INT_MAX);
+ mp->int_mask |= INT_RX_0 << i;
+ }
+
+ if (mp->oom) {
+ mp->rx_oom.expires = jiffies + (HZ / 10);
+ add_timer(&mp->rx_oom);
+ }
+
+ for (i = 0; i < mp->txq_count; i++) {
+ err = txq_init(mp, i);
+ if (err) {
+ while (--i >= 0)
+ txq_deinit(mp->txq + i);
+ goto out_free;
+ }
+ mp->int_mask |= INT_TX_END_0 << i;
+ }
+
+ add_timer(&mp->mib_counters_timer);
+ port_start(mp);
+
+ wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
+ wrlp(mp, INT_MASK, mp->int_mask);
+
+ return 0;
+
+
+out_free:
+ for (i = 0; i < mp->rxq_count; i++)
+ rxq_deinit(mp->rxq + i);
+out:
+ napi_disable(&mp->napi);
+ free_irq(dev->irq, dev);
+
+ return err;
+}
+
+static void port_reset(struct mv643xx_eth_private *mp)
+{
+ unsigned int data;
+ int i;
+
+ for (i = 0; i < mp->rxq_count; i++)
+ rxq_disable(mp->rxq + i);
+ for (i = 0; i < mp->txq_count; i++)
+ txq_disable(mp->txq + i);
+
+ while (1) {
+ u32 ps = rdlp(mp, PORT_STATUS);
+
+ if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
+ break;
+ udelay(10);
+ }
+
+ /* Reset the Enable bit in the Configuration Register */
+ data = rdlp(mp, PORT_SERIAL_CONTROL);
+ data &= ~(SERIAL_PORT_ENABLE |
+ DO_NOT_FORCE_LINK_FAIL |
+ FORCE_LINK_PASS);
+ wrlp(mp, PORT_SERIAL_CONTROL, data);
+}
+
+static int mv643xx_eth_stop(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int i;
+
+ wrlp(mp, INT_MASK_EXT, 0x00000000);
+ wrlp(mp, INT_MASK, 0x00000000);
+ rdlp(mp, INT_MASK);
+
+ napi_disable(&mp->napi);
+
+ del_timer_sync(&mp->rx_oom);
+
+ netif_carrier_off(dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
+ free_irq(dev->irq, dev);
+
+ port_reset(mp);
+ mv643xx_eth_get_stats(dev);
+ mib_counters_update(mp);
+ del_timer_sync(&mp->mib_counters_timer);
+
+ for (i = 0; i < mp->rxq_count; i++)
+ rxq_deinit(mp->rxq + i);
+ for (i = 0; i < mp->txq_count; i++)
+ txq_deinit(mp->txq + i);
+
+ return 0;
+}
+
+static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int ret;
+
+ if (!dev->phydev)
+ return -ENOTSUPP;
+
+ ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
+ if (!ret)
+ mv643xx_eth_adjust_link(dev);
+ return ret;
+}
+
+static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ dev->mtu = new_mtu;
+ mv643xx_eth_recalc_skb_size(mp);
+ tx_set_rate(mp, 1000000000, 16777216);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /*
+ * Stop and then re-open the interface. This will allocate RX
+ * skbs of the new MTU.
+ * There is a possible danger that the open will not succeed,
+ * due to memory being full.
+ */
+ mv643xx_eth_stop(dev);
+ if (mv643xx_eth_open(dev)) {
+ netdev_err(dev,
+ "fatal error on re-opening device after MTU change\n");
+ }
+
+ return 0;
+}
+
+static void tx_timeout_task(struct work_struct *ugly)
+{
+ struct mv643xx_eth_private *mp;
+
+ mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
+ if (netif_running(mp->dev)) {
+ netif_tx_stop_all_queues(mp->dev);
+ port_reset(mp);
+ port_start(mp);
+ netif_tx_wake_all_queues(mp->dev);
+ }
+}
+
+static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ netdev_info(dev, "tx timeout\n");
+
+ schedule_work(&mp->tx_timeout_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mv643xx_eth_netpoll(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ wrlp(mp, INT_MASK, 0x00000000);
+ rdlp(mp, INT_MASK);
+
+ mv643xx_eth_irq(dev->irq, dev);
+
+ wrlp(mp, INT_MASK, mp->int_mask);
+}
+#endif
+
+
+/* platform glue ************************************************************/
+static void
+mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
+ const struct mbus_dram_target_info *dram)
+{
+ void __iomem *base = msp->base;
+ u32 win_enable;
+ u32 win_protect;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ writel(0, base + WINDOW_BASE(i));
+ writel(0, base + WINDOW_SIZE(i));
+ if (i < 4)
+ writel(0, base + WINDOW_REMAP_HIGH(i));
+ }
+
+ win_enable = 0x3f;
+ win_protect = 0;
+
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ writel((cs->base & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ dram->mbus_dram_target_id, base + WINDOW_BASE(i));
+ writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
+
+ win_enable &= ~(1 << i);
+ win_protect |= 3 << (2 * i);
+ }
+
+ writel(win_enable, base + WINDOW_BAR_ENABLE);
+ msp->win_protect = win_protect;
+}
+
+static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
+{
+ /*
+ * Check whether we have a 14-bit coal limit field in bits
+ * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
+ * SDMA config register.
+ */
+ writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
+ if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
+ msp->extended_rx_coal_limit = 1;
+ else
+ msp->extended_rx_coal_limit = 0;
+
+ /*
+ * Check whether the MAC supports TX rate control, and if
+ * yes, whether its associated registers are in the old or
+ * the new place.
+ */
+ writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
+ if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
+ msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
+ } else {
+ writel(7, msp->base + 0x0400 + TX_BW_RATE);
+ if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
+ msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
+ else
+ msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
+ }
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id mv643xx_eth_shared_ids[] = {
+ { .compatible = "marvell,orion-eth", },
+ { .compatible = "marvell,kirkwood-eth", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
+#endif
+
+#ifdef CONFIG_OF_IRQ
+#define mv643xx_eth_property(_np, _name, _v) \
+ do { \
+ u32 tmp; \
+ if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \
+ _v = tmp; \
+ } while (0)
+
+static struct platform_device *port_platdev[3];
+
+static void mv643xx_eth_shared_of_remove(void)
+{
+ int n;
+
+ for (n = 0; n < 3; n++) {
+ platform_device_del(port_platdev[n]);
+ port_platdev[n] = NULL;
+ }
+}
+
+static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+ struct device_node *pnp)
+{
+ struct platform_device *ppdev;
+ struct mv643xx_eth_platform_data ppd;
+ struct resource res;
+ int ret;
+ int dev_num = 0;
+
+ memset(&ppd, 0, sizeof(ppd));
+ ppd.shared = pdev;
+
+ memset(&res, 0, sizeof(res));
+ if (of_irq_to_resource(pnp, 0, &res) <= 0) {
+ dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
+ dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp);
+ return -EINVAL;
+ }
+
+ if (ppd.port_number >= 3) {
+ dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp);
+ return -EINVAL;
+ }
+
+ while (dev_num < 3 && port_platdev[dev_num])
+ dev_num++;
+
+ if (dev_num == 3) {
+ dev_err(&pdev->dev, "too many ports registered\n");
+ return -EINVAL;
+ }
+
+ ret = of_get_mac_address(pnp, ppd.mac_addr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
+ mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
+ mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
+ mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
+ mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
+ mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
+
+ of_get_phy_mode(pnp, &ppd.interface);
+
+ ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
+ if (!ppd.phy_node) {
+ ppd.phy_addr = MV643XX_ETH_PHY_NONE;
+ of_property_read_u32(pnp, "speed", &ppd.speed);
+ of_property_read_u32(pnp, "duplex", &ppd.duplex);
+ }
+
+ ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
+ if (!ppdev)
+ return -ENOMEM;
+ ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ppdev->dev.of_node = pnp;
+
+ ret = platform_device_add_resources(ppdev, &res, 1);
+ if (ret)
+ goto port_err;
+
+ ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
+ if (ret)
+ goto port_err;
+
+ ret = platform_device_add(ppdev);
+ if (ret)
+ goto port_err;
+
+ port_platdev[dev_num] = ppdev;
+
+ return 0;
+
+port_err:
+ platform_device_put(ppdev);
+ return ret;
+}
+
+static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
+{
+ struct mv643xx_eth_shared_platform_data *pd;
+ struct device_node *pnp, *np = pdev->dev.of_node;
+ int ret;
+
+ /* bail out if not registered from DT */
+ if (!np)
+ return 0;
+
+ pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+ pdev->dev.platform_data = pd;
+
+ mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
+
+ for_each_available_child_of_node(np, pnp) {
+ ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
+ if (ret) {
+ of_node_put(pnp);
+ mv643xx_eth_shared_of_remove();
+ return ret;
+ }
+ }
+ return 0;
+}
+
+#else
+static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static inline void mv643xx_eth_shared_of_remove(void)
+{
+}
+#endif
+
+static int mv643xx_eth_shared_probe(struct platform_device *pdev)
+{
+ static int mv643xx_eth_version_printed;
+ struct mv643xx_eth_shared_platform_data *pd;
+ struct mv643xx_eth_shared_private *msp;
+ const struct mbus_dram_target_info *dram;
+ struct resource *res;
+ int ret;
+
+ if (!mv643xx_eth_version_printed++)
+ pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
+ mv643xx_eth_driver_version);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+
+ msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
+ if (msp == NULL)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, msp);
+
+ msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (msp->base == NULL)
+ return -ENOMEM;
+
+ msp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(msp->clk))
+ clk_prepare_enable(msp->clk);
+
+ /*
+ * (Re-)program MBUS remapping windows if we are asked to.
+ */
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv643xx_eth_conf_mbus_windows(msp, dram);
+
+ ret = mv643xx_eth_shared_of_probe(pdev);
+ if (ret)
+ goto err_put_clk;
+ pd = dev_get_platdata(&pdev->dev);
+
+ msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
+ pd->tx_csum_limit : 9 * 1024;
+ infer_hw_params(msp);
+
+ return 0;
+
+err_put_clk:
+ if (!IS_ERR(msp->clk))
+ clk_disable_unprepare(msp->clk);
+ return ret;
+}
+
+static int mv643xx_eth_shared_remove(struct platform_device *pdev)
+{
+ struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
+
+ mv643xx_eth_shared_of_remove();
+ if (!IS_ERR(msp->clk))
+ clk_disable_unprepare(msp->clk);
+ return 0;
+}
+
+static struct platform_driver mv643xx_eth_shared_driver = {
+ .probe = mv643xx_eth_shared_probe,
+ .remove = mv643xx_eth_shared_remove,
+ .driver = {
+ .name = MV643XX_ETH_SHARED_NAME,
+ .of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
+ },
+};
+
+static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
+{
+ int addr_shift = 5 * mp->port_num;
+ u32 data;
+
+ data = rdl(mp, PHY_ADDR);
+ data &= ~(0x1f << addr_shift);
+ data |= (phy_addr & 0x1f) << addr_shift;
+ wrl(mp, PHY_ADDR, data);
+}
+
+static int phy_addr_get(struct mv643xx_eth_private *mp)
+{
+ unsigned int data;
+
+ data = rdl(mp, PHY_ADDR);
+
+ return (data >> (5 * mp->port_num)) & 0x1f;
+}
+
+static void set_params(struct mv643xx_eth_private *mp,
+ struct mv643xx_eth_platform_data *pd)
+{
+ struct net_device *dev = mp->dev;
+ unsigned int tx_ring_size;
+
+ if (is_valid_ether_addr(pd->mac_addr)) {
+ eth_hw_addr_set(dev, pd->mac_addr);
+ } else {
+ u8 addr[ETH_ALEN];
+
+ uc_addr_get(mp, addr);
+ eth_hw_addr_set(dev, addr);
+ }
+
+ mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
+ if (pd->rx_queue_size)
+ mp->rx_ring_size = pd->rx_queue_size;
+ mp->rx_desc_sram_addr = pd->rx_sram_addr;
+ mp->rx_desc_sram_size = pd->rx_sram_size;
+
+ mp->rxq_count = pd->rx_queue_count ? : 1;
+
+ tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
+ if (pd->tx_queue_size)
+ tx_ring_size = pd->tx_queue_size;
+
+ mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
+ MV643XX_MAX_SKB_DESCS * 2, 4096);
+ if (mp->tx_ring_size != tx_ring_size)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ mp->tx_ring_size, tx_ring_size);
+
+ mp->tx_desc_sram_addr = pd->tx_sram_addr;
+ mp->tx_desc_sram_size = pd->tx_sram_size;
+
+ mp->txq_count = pd->tx_queue_count ? : 1;
+}
+
+static int get_phy_mode(struct mv643xx_eth_private *mp)
+{
+ struct device *dev = mp->dev->dev.parent;
+ phy_interface_t iface;
+ int err;
+
+ if (dev->of_node)
+ err = of_get_phy_mode(dev->of_node, &iface);
+
+ /* Historical default if unspecified. We could also read/write
+ * the interface state in the PSC1
+ */
+ if (!dev->of_node || err)
+ iface = PHY_INTERFACE_MODE_GMII;
+ return iface;
+}
+
+static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
+ int phy_addr)
+{
+ struct phy_device *phydev;
+ int start;
+ int num;
+ int i;
+ char phy_id[MII_BUS_ID_SIZE + 3];
+
+ if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
+ start = phy_addr_get(mp) & 0x1f;
+ num = 32;
+ } else {
+ start = phy_addr & 0x1f;
+ num = 1;
+ }
+
+ /* Attempt to connect to the PHY using orion-mdio */
+ phydev = ERR_PTR(-ENODEV);
+ for (i = 0; i < num; i++) {
+ int addr = (start + i) & 0x1f;
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ "orion-mdio-mii", addr);
+
+ phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
+ get_phy_mode(mp));
+ if (!IS_ERR(phydev)) {
+ phy_addr_set(mp, addr);
+ break;
+ }
+ }
+
+ return phydev;
+}
+
+static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
+{
+ struct net_device *dev = mp->dev;
+ struct phy_device *phy = dev->phydev;
+
+ if (speed == 0) {
+ phy->autoneg = AUTONEG_ENABLE;
+ phy->speed = 0;
+ phy->duplex = 0;
+ linkmode_copy(phy->advertising, phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phy->advertising);
+ } else {
+ phy->autoneg = AUTONEG_DISABLE;
+ linkmode_zero(phy->advertising);
+ phy->speed = speed;
+ phy->duplex = duplex;
+ }
+ phy_start_aneg(phy);
+}
+
+static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
+{
+ struct net_device *dev = mp->dev;
+ u32 pscr;
+
+ pscr = rdlp(mp, PORT_SERIAL_CONTROL);
+ if (pscr & SERIAL_PORT_ENABLE) {
+ pscr &= ~SERIAL_PORT_ENABLE;
+ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+ }
+
+ pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
+ if (!dev->phydev) {
+ pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
+ if (speed == SPEED_1000)
+ pscr |= SET_GMII_SPEED_TO_1000;
+ else if (speed == SPEED_100)
+ pscr |= SET_MII_SPEED_TO_100;
+
+ pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
+
+ pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
+ if (duplex == DUPLEX_FULL)
+ pscr |= SET_FULL_DUPLEX_MODE;
+ }
+
+ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+}
+
+static const struct net_device_ops mv643xx_eth_netdev_ops = {
+ .ndo_open = mv643xx_eth_open,
+ .ndo_stop = mv643xx_eth_stop,
+ .ndo_start_xmit = mv643xx_eth_xmit,
+ .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
+ .ndo_set_mac_address = mv643xx_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = mv643xx_eth_ioctl,
+ .ndo_change_mtu = mv643xx_eth_change_mtu,
+ .ndo_set_features = mv643xx_eth_set_features,
+ .ndo_tx_timeout = mv643xx_eth_tx_timeout,
+ .ndo_get_stats = mv643xx_eth_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mv643xx_eth_netpoll,
+#endif
+};
+
+static int mv643xx_eth_probe(struct platform_device *pdev)
+{
+ struct mv643xx_eth_platform_data *pd;
+ struct mv643xx_eth_private *mp;
+ struct net_device *dev;
+ struct phy_device *phydev = NULL;
+ u32 psc1r;
+ int err, irq;
+
+ pd = dev_get_platdata(&pdev->dev);
+ if (pd == NULL) {
+ dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
+ return -ENODEV;
+ }
+
+ if (pd->shared == NULL) {
+ dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
+ if (!dev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ mp = netdev_priv(dev);
+ platform_set_drvdata(pdev, mp);
+
+ mp->shared = platform_get_drvdata(pd->shared);
+ mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
+ mp->port_num = pd->port_number;
+
+ mp->dev = dev;
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "marvell,kirkwood-eth-port")) {
+ psc1r = rdlp(mp, PORT_SERIAL_CONTROL1);
+
+ /* Kirkwood resets some registers on gated clocks. Especially
+ * CLK125_BYPASS_EN must be cleared but is not available on
+ * all other SoCs/System Controllers using this driver.
+ */
+ psc1r &= ~CLK125_BYPASS_EN;
+
+ /* On Kirkwood with two Ethernet controllers, if both of them
+ * have RGMII_EN disabled, the first controller will be in GMII
+ * mode and the second one is effectively disabled, instead of
+ * two MII interfaces.
+ *
+ * To enable GMII in the first controller, the second one must
+ * also be configured (and may be enabled) with RGMII_EN
+ * disabled too, even though it cannot be used at all.
+ */
+ switch (pd->interface) {
+ /* Use internal to denote second controller being disabled */
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ psc1r &= ~RGMII_EN;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ psc1r |= RGMII_EN;
+ break;
+ default:
+ /* Unknown; don't touch */
+ break;
+ }
+
+ wrlp(mp, PORT_SERIAL_CONTROL1, psc1r);
+ }
+
+ /*
+ * Start with a default rate, and if there is a clock, allow
+ * it to override the default.
+ */
+ mp->t_clk = 133000000;
+ mp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(mp->clk)) {
+ clk_prepare_enable(mp->clk);
+ mp->t_clk = clk_get_rate(mp->clk);
+ } else if (!IS_ERR(mp->shared->clk)) {
+ mp->t_clk = clk_get_rate(mp->shared->clk);
+ }
+
+ set_params(mp, pd);
+ netif_set_real_num_tx_queues(dev, mp->txq_count);
+ netif_set_real_num_rx_queues(dev, mp->rxq_count);
+
+ err = 0;
+ if (pd->phy_node) {
+ phydev = of_phy_connect(mp->dev, pd->phy_node,
+ mv643xx_eth_adjust_link, 0,
+ get_phy_mode(mp));
+ if (!phydev)
+ err = -ENODEV;
+ else
+ phy_addr_set(mp, phydev->mdio.addr);
+ } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
+ phydev = phy_scan(mp, pd->phy_addr);
+
+ if (IS_ERR(phydev))
+ err = PTR_ERR(phydev);
+ else
+ phy_init(mp, pd->speed, pd->duplex);
+ }
+ if (err == -ENODEV) {
+ err = -EPROBE_DEFER;
+ goto out;
+ }
+ if (err)
+ goto out;
+
+ dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
+
+ init_pscr(mp, pd->speed, pd->duplex);
+
+
+ mib_counters_clear(mp);
+
+ timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
+ mp->mib_counters_timer.expires = jiffies + 30 * HZ;
+
+ spin_lock_init(&mp->mib_counters_lock);
+
+ INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
+
+ netif_napi_add(dev, &mp->napi, mv643xx_eth_poll);
+
+ timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
+
+
+ irq = platform_get_irq(pdev, 0);
+ if (WARN_ON(irq < 0)) {
+ err = irq;
+ goto out;
+ }
+ dev->irq = irq;
+
+ dev->netdev_ops = &mv643xx_eth_netdev_ops;
+
+ dev->watchdog_timeo = 2 * HZ;
+ dev->base_addr = 0;
+
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ dev->vlan_features = dev->features;
+
+ dev->features |= NETIF_F_RXCSUM;
+ dev->hw_features = dev->features;
+
+ dev->priv_flags |= IFF_UNICAST_FLT;
+ netif_set_tso_max_segs(dev, MV643XX_MAX_TSO_SEGS);
+
+ /* MTU range: 64 - 9500 */
+ dev->min_mtu = 64;
+ dev->max_mtu = 9500;
+
+ if (mp->shared->win_protect)
+ wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
+
+ netif_carrier_off(dev);
+
+ wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
+
+ set_rx_coal(mp, 250);
+ set_tx_coal(mp, 0);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+
+ netdev_notice(dev, "port %d with MAC address %pM\n",
+ mp->port_num, dev->dev_addr);
+
+ if (mp->tx_desc_sram_size > 0)
+ netdev_notice(dev, "configured with sram\n");
+
+ return 0;
+
+out:
+ if (!IS_ERR(mp->clk))
+ clk_disable_unprepare(mp->clk);
+ free_netdev(dev);
+
+ return err;
+}
+
+static int mv643xx_eth_remove(struct platform_device *pdev)
+{
+ struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
+ struct net_device *dev = mp->dev;
+
+ unregister_netdev(mp->dev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
+ cancel_work_sync(&mp->tx_timeout_task);
+
+ if (!IS_ERR(mp->clk))
+ clk_disable_unprepare(mp->clk);
+
+ free_netdev(mp->dev);
+
+ return 0;
+}
+
+static void mv643xx_eth_shutdown(struct platform_device *pdev)
+{
+ struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
+
+ /* Mask all interrupts on ethernet port */
+ wrlp(mp, INT_MASK, 0);
+ rdlp(mp, INT_MASK);
+
+ if (netif_running(mp->dev))
+ port_reset(mp);
+}
+
+static struct platform_driver mv643xx_eth_driver = {
+ .probe = mv643xx_eth_probe,
+ .remove = mv643xx_eth_remove,
+ .shutdown = mv643xx_eth_shutdown,
+ .driver = {
+ .name = MV643XX_ETH_NAME,
+ },
+};
+
+static struct platform_driver * const drivers[] = {
+ &mv643xx_eth_shared_driver,
+ &mv643xx_eth_driver,
+};
+
+static int __init mv643xx_eth_init_module(void)
+{
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_init(mv643xx_eth_init_module);
+
+static void __exit mv643xx_eth_cleanup_module(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_exit(mv643xx_eth_cleanup_module);
+
+MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
+ "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
+MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
+MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
new file mode 100644
index 000000000..674913184
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -0,0 +1,442 @@
+/*
+ * Driver for the MDIO interface of Marvell network interfaces.
+ *
+ * Since the MDIO interface of Marvell network interfaces is shared
+ * between all network interfaces, having a single driver allows to
+ * handle concurrent accesses properly (you may have four Ethernet
+ * ports, but they in fact share the same SMI interface to access
+ * the MDIO bus). This driver is currently used by the mvneta and
+ * mv643xx_eth drivers.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_mdio.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#define MVMDIO_SMI_DATA_SHIFT 0
+#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
+#define MVMDIO_SMI_PHY_REG_SHIFT 21
+#define MVMDIO_SMI_READ_OPERATION BIT(26)
+#define MVMDIO_SMI_WRITE_OPERATION 0
+#define MVMDIO_SMI_READ_VALID BIT(27)
+#define MVMDIO_SMI_BUSY BIT(28)
+#define MVMDIO_ERR_INT_CAUSE 0x007C
+#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
+#define MVMDIO_ERR_INT_MASK 0x0080
+
+#define MVMDIO_XSMI_MGNT_REG 0x0
+#define MVMDIO_XSMI_PHYADDR_SHIFT 16
+#define MVMDIO_XSMI_DEVADDR_SHIFT 21
+#define MVMDIO_XSMI_WRITE_OPERATION (0x5 << 26)
+#define MVMDIO_XSMI_READ_OPERATION (0x7 << 26)
+#define MVMDIO_XSMI_READ_VALID BIT(29)
+#define MVMDIO_XSMI_BUSY BIT(30)
+#define MVMDIO_XSMI_ADDR_REG 0x8
+
+/*
+ * SMI Timeout measurements:
+ * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
+ * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled)
+ */
+#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
+#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
+#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
+
+#define MVMDIO_XSMI_POLL_INTERVAL_MIN 150
+#define MVMDIO_XSMI_POLL_INTERVAL_MAX 160
+
+struct orion_mdio_dev {
+ void __iomem *regs;
+ struct clk *clk[4];
+ /*
+ * If we have access to the error interrupt pin (which is
+ * somewhat misnamed as it not only reflects internal errors
+ * but also reflects SMI completion), use that to wait for
+ * SMI access completion instead of polling the SMI busy bit.
+ */
+ int err_interrupt;
+ wait_queue_head_t smi_busy_wait;
+};
+
+enum orion_mdio_bus_type {
+ BUS_TYPE_SMI,
+ BUS_TYPE_XSMI
+};
+
+struct orion_mdio_ops {
+ int (*is_done)(struct orion_mdio_dev *);
+ unsigned int poll_interval_min;
+ unsigned int poll_interval_max;
+};
+
+/* Wait for the SMI unit to be ready for another operation
+ */
+static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops,
+ struct mii_bus *bus)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
+ unsigned long end = jiffies + timeout;
+ int timedout = 0;
+
+ while (1) {
+ if (ops->is_done(dev))
+ return 0;
+ else if (timedout)
+ break;
+
+ if (dev->err_interrupt <= 0) {
+ usleep_range(ops->poll_interval_min,
+ ops->poll_interval_max);
+
+ if (time_is_before_jiffies(end))
+ ++timedout;
+ } else {
+ /* wait_event_timeout does not guarantee a delay of at
+ * least one whole jiffie, so timeout must be no less
+ * than two.
+ */
+ if (timeout < 2)
+ timeout = 2;
+ wait_event_timeout(dev->smi_busy_wait,
+ ops->is_done(dev), timeout);
+
+ ++timedout;
+ }
+ }
+
+ dev_err(bus->parent, "Timeout: SMI busy for too long\n");
+ return -ETIMEDOUT;
+}
+
+static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
+{
+ return !(readl(dev->regs) & MVMDIO_SMI_BUSY);
+}
+
+static const struct orion_mdio_ops orion_mdio_smi_ops = {
+ .is_done = orion_mdio_smi_is_done,
+ .poll_interval_min = MVMDIO_SMI_POLL_INTERVAL_MIN,
+ .poll_interval_max = MVMDIO_SMI_POLL_INTERVAL_MAX,
+};
+
+static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id,
+ int regnum)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ u32 val;
+ int ret;
+
+ ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
+ (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
+ MVMDIO_SMI_READ_OPERATION),
+ dev->regs);
+
+ ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ val = readl(dev->regs);
+ if (!(val & MVMDIO_SMI_READ_VALID)) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ return -ENODEV;
+ }
+
+ return val & GENMASK(15, 0);
+}
+
+static int orion_mdio_smi_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int ret;
+
+ ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
+ (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
+ MVMDIO_SMI_WRITE_OPERATION |
+ (value << MVMDIO_SMI_DATA_SHIFT)),
+ dev->regs);
+
+ return 0;
+}
+
+static int orion_mdio_xsmi_is_done(struct orion_mdio_dev *dev)
+{
+ return !(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & MVMDIO_XSMI_BUSY);
+}
+
+static const struct orion_mdio_ops orion_mdio_xsmi_ops = {
+ .is_done = orion_mdio_xsmi_is_done,
+ .poll_interval_min = MVMDIO_XSMI_POLL_INTERVAL_MIN,
+ .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX,
+};
+
+static int orion_mdio_xsmi_read_c45(struct mii_bus *bus, int mii_id,
+ int dev_addr, int regnum)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int ret;
+
+ ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
+ (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
+ MVMDIO_XSMI_READ_OPERATION,
+ dev->regs + MVMDIO_XSMI_MGNT_REG);
+
+ ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ if (!(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) &
+ MVMDIO_XSMI_READ_VALID)) {
+ dev_err(bus->parent, "XSMI bus read not valid\n");
+ return -ENODEV;
+ }
+
+ return readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & GENMASK(15, 0);
+}
+
+static int orion_mdio_xsmi_write_c45(struct mii_bus *bus, int mii_id,
+ int dev_addr, int regnum, u16 value)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int ret;
+
+ ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
+ (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
+ MVMDIO_XSMI_WRITE_OPERATION | value,
+ dev->regs + MVMDIO_XSMI_MGNT_REG);
+
+ return 0;
+}
+
+static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
+{
+ struct orion_mdio_dev *dev = dev_id;
+
+ if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) &
+ MVMDIO_ERR_INT_SMI_DONE) {
+ writel(~MVMDIO_ERR_INT_SMI_DONE,
+ dev->regs + MVMDIO_ERR_INT_CAUSE);
+ wake_up(&dev->smi_busy_wait);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int orion_mdio_probe(struct platform_device *pdev)
+{
+ enum orion_mdio_bus_type type;
+ struct resource *r;
+ struct mii_bus *bus;
+ struct orion_mdio_dev *dev;
+ int i, ret;
+
+ type = (uintptr_t)device_get_match_data(&pdev->dev);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No SMI register address given\n");
+ return -ENODEV;
+ }
+
+ bus = devm_mdiobus_alloc_size(&pdev->dev,
+ sizeof(struct orion_mdio_dev));
+ if (!bus)
+ return -ENOMEM;
+
+ switch (type) {
+ case BUS_TYPE_SMI:
+ bus->read = orion_mdio_smi_read;
+ bus->write = orion_mdio_smi_write;
+ break;
+ case BUS_TYPE_XSMI:
+ bus->read_c45 = orion_mdio_xsmi_read_c45;
+ bus->write_c45 = orion_mdio_xsmi_write_c45;
+ break;
+ }
+
+ bus->name = "orion_mdio_bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
+ dev_name(&pdev->dev));
+ bus->parent = &pdev->dev;
+
+ dev = bus->priv;
+ dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!dev->regs) {
+ dev_err(&pdev->dev, "Unable to remap SMI register\n");
+ return -ENODEV;
+ }
+
+ init_waitqueue_head(&dev->smi_busy_wait);
+
+ if (pdev->dev.of_node) {
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+ if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk;
+ }
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_prepare_enable(dev->clk[i]);
+ }
+
+ if (!IS_ERR(of_clk_get(pdev->dev.of_node,
+ ARRAY_SIZE(dev->clk))))
+ dev_warn(&pdev->dev,
+ "unsupported number of clocks, limiting to the first "
+ __stringify(ARRAY_SIZE(dev->clk)) "\n");
+ } else {
+ dev->clk[0] = clk_get(&pdev->dev, NULL);
+ if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk;
+ }
+ if (!IS_ERR(dev->clk[0]))
+ clk_prepare_enable(dev->clk[0]);
+ }
+
+
+ dev->err_interrupt = platform_get_irq_optional(pdev, 0);
+ if (dev->err_interrupt > 0 &&
+ resource_size(r) < MVMDIO_ERR_INT_MASK + 4) {
+ dev_err(&pdev->dev,
+ "disabling interrupt, resource size is too small\n");
+ dev->err_interrupt = 0;
+ }
+ if (dev->err_interrupt > 0) {
+ ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
+ orion_mdio_err_irq,
+ IRQF_SHARED, pdev->name, dev);
+ if (ret)
+ goto out_mdio;
+
+ writel(MVMDIO_ERR_INT_SMI_DONE,
+ dev->regs + MVMDIO_ERR_INT_MASK);
+
+ } else if (dev->err_interrupt == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_mdio;
+ }
+
+ /* For the platforms not supporting DT/ACPI fall-back
+ * to mdiobus_register via of_mdiobus_register.
+ */
+ if (is_acpi_node(pdev->dev.fwnode))
+ ret = acpi_mdiobus_register(bus, pdev->dev.fwnode);
+ else
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ goto out_mdio;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+
+out_mdio:
+ if (dev->err_interrupt > 0)
+ writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
+
+out_clk:
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_disable_unprepare(dev->clk[i]);
+ clk_put(dev->clk[i]);
+ }
+
+ return ret;
+}
+
+static int orion_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct orion_mdio_dev *dev = bus->priv;
+ int i;
+
+ if (dev->err_interrupt > 0)
+ writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
+ mdiobus_unregister(bus);
+
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_disable_unprepare(dev->clk[i]);
+ clk_put(dev->clk[i]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id orion_mdio_match[] = {
+ { .compatible = "marvell,orion-mdio", .data = (void *)BUS_TYPE_SMI },
+ { .compatible = "marvell,xmdio", .data = (void *)BUS_TYPE_XSMI },
+ { }
+};
+MODULE_DEVICE_TABLE(of, orion_mdio_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id orion_mdio_acpi_match[] = {
+ { "MRVL0100", BUS_TYPE_SMI },
+ { "MRVL0101", BUS_TYPE_XSMI },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
+#endif
+
+static struct platform_driver orion_mdio_driver = {
+ .probe = orion_mdio_probe,
+ .remove = orion_mdio_remove,
+ .driver = {
+ .name = "orion-mdio",
+ .of_match_table = orion_mdio_match,
+ .acpi_match_table = ACPI_PTR(orion_mdio_acpi_match),
+ },
+};
+
+module_platform_driver(orion_mdio_driver);
+
+MODULE_DESCRIPTION("Marvell MDIO interface driver");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:orion-mdio");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
new file mode 100644
index 000000000..165f76d12
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -0,0 +1,5941 @@
+/*
+ * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Rami Rosen <rosenr@marvell.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/inetdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy/phy.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <net/hwbm.h>
+#include "mvneta_bm.h"
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tso.h>
+#include <net/page_pool/helpers.h>
+#include <net/pkt_sched.h>
+#include <linux/bpf_trace.h>
+
+/* Registers */
+#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
+#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
+#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
+#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
+#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
+#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
+#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
+#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
+#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
+#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
+#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
+#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
+#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
+#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
+#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
+#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
+#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
+#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
+#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
+#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
+#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
+#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
+#define MVNETA_PORT_RX_RESET 0x1cc0
+#define MVNETA_PORT_RX_DMA_RESET BIT(0)
+#define MVNETA_PHY_ADDR 0x2000
+#define MVNETA_PHY_ADDR_MASK 0x1f
+#define MVNETA_MBUS_RETRY 0x2010
+#define MVNETA_UNIT_INTR_CAUSE 0x2080
+#define MVNETA_UNIT_CONTROL 0x20B0
+#define MVNETA_PHY_POLLING_ENABLE BIT(1)
+#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
+#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
+#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
+#define MVNETA_BASE_ADDR_ENABLE 0x2290
+#define MVNETA_AC5_CNM_DDR_TARGET 0x2
+#define MVNETA_AC5_CNM_DDR_ATTR 0xb
+#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
+#define MVNETA_PORT_CONFIG 0x2400
+#define MVNETA_UNI_PROMISC_MODE BIT(0)
+#define MVNETA_DEF_RXQ(q) ((q) << 1)
+#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
+#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
+#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
+#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
+#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
+#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
+#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
+ MVNETA_DEF_RXQ_ARP(q) | \
+ MVNETA_DEF_RXQ_TCP(q) | \
+ MVNETA_DEF_RXQ_UDP(q) | \
+ MVNETA_DEF_RXQ_BPDU(q) | \
+ MVNETA_TX_UNSET_ERR_SUM | \
+ MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
+#define MVNETA_PORT_CONFIG_EXTEND 0x2404
+#define MVNETA_MAC_ADDR_LOW 0x2414
+#define MVNETA_MAC_ADDR_HIGH 0x2418
+#define MVNETA_SDMA_CONFIG 0x241c
+#define MVNETA_SDMA_BRST_SIZE_16 4
+#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
+#define MVNETA_RX_NO_DATA_SWAP BIT(4)
+#define MVNETA_TX_NO_DATA_SWAP BIT(5)
+#define MVNETA_DESC_SWAP BIT(6)
+#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
+#define MVNETA_VLAN_PRIO_TO_RXQ 0x2440
+#define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
+#define MVNETA_PORT_STATUS 0x2444
+#define MVNETA_TX_IN_PRGRS BIT(0)
+#define MVNETA_TX_FIFO_EMPTY BIT(8)
+#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+/* Only exists on Armada XP and Armada 370 */
+#define MVNETA_SERDES_CFG 0x24A0
+#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
+#define MVNETA_QSGMII_SERDES_PROTO 0x0667
+#define MVNETA_HSGMII_SERDES_PROTO 0x1107
+#define MVNETA_TYPE_PRIO 0x24bc
+#define MVNETA_FORCE_UNI BIT(21)
+#define MVNETA_TXQ_CMD_1 0x24e4
+#define MVNETA_TXQ_CMD 0x2448
+#define MVNETA_TXQ_DISABLE_SHIFT 8
+#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
+#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
+#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
+#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
+#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
+#define MVNETA_ACC_MODE 0x2500
+#define MVNETA_BM_ADDRESS 0x2504
+#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
+#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
+#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
+#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
+#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
+#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
+
+/* Exception Interrupt Port/Queue Cause register
+ *
+ * Their behavior depend of the mapping done using the PCPX2Q
+ * registers. For a given CPU if the bit associated to a queue is not
+ * set, then for the register a read from this CPU will always return
+ * 0 and a write won't do anything
+ */
+
+#define MVNETA_INTR_NEW_CAUSE 0x25a0
+#define MVNETA_INTR_NEW_MASK 0x25a4
+
+/* bits 0..7 = TXQ SENT, one bit per queue.
+ * bits 8..15 = RXQ OCCUP, one bit per queue.
+ * bits 16..23 = RXQ FREE, one bit per queue.
+ * bit 29 = OLD_REG_SUM, see old reg ?
+ * bit 30 = TX_ERR_SUM, one bit for 4 ports
+ * bit 31 = MISC_SUM, one bit for 4 ports
+ */
+#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
+#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
+#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
+#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
+#define MVNETA_MISCINTR_INTR_MASK BIT(31)
+
+#define MVNETA_INTR_OLD_CAUSE 0x25a8
+#define MVNETA_INTR_OLD_MASK 0x25ac
+
+/* Data Path Port/Queue Cause Register */
+#define MVNETA_INTR_MISC_CAUSE 0x25b0
+#define MVNETA_INTR_MISC_MASK 0x25b4
+
+#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
+#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
+#define MVNETA_CAUSE_PTP BIT(4)
+
+#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
+#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
+#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
+#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
+#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
+#define MVNETA_CAUSE_PRBS_ERR BIT(12)
+#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
+#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
+
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
+
+#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
+#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
+#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
+
+#define MVNETA_INTR_ENABLE 0x25b8
+#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
+#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
+
+#define MVNETA_RXQ_CMD 0x2680
+#define MVNETA_RXQ_DISABLE_SHIFT 8
+#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
+#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
+#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
+#define MVNETA_GMAC_CTRL_0 0x2c00
+#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
+#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
+#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
+#define MVNETA_GMAC_CTRL_2 0x2c08
+#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
+#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
+#define MVNETA_GMAC2_PORT_RGMII BIT(4)
+#define MVNETA_GMAC2_PORT_RESET BIT(6)
+#define MVNETA_GMAC_STATUS 0x2c10
+#define MVNETA_GMAC_LINK_UP BIT(0)
+#define MVNETA_GMAC_SPEED_1000 BIT(1)
+#define MVNETA_GMAC_SPEED_100 BIT(2)
+#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
+#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
+#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
+#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
+#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
+#define MVNETA_GMAC_AN_COMPLETE BIT(11)
+#define MVNETA_GMAC_SYNC_OK BIT(14)
+#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
+#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
+#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
+#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
+#define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
+#define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
+#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
+#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
+#define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
+#define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
+#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
+#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
+#define MVNETA_GMAC_CTRL_4 0x2c90
+#define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1)
+#define MVNETA_MIB_COUNTERS_BASE 0x3000
+#define MVNETA_MIB_LATE_COLLISION 0x7c
+#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
+#define MVNETA_DA_FILT_OTH_MCAST 0x3500
+#define MVNETA_DA_FILT_UCAST_BASE 0x3600
+#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
+#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
+#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
+#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
+#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
+#define MVNETA_TXQ_DEC_SENT_SHIFT 16
+#define MVNETA_TXQ_DEC_SENT_MASK 0xff
+#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
+#define MVNETA_TXQ_SENT_DESC_SHIFT 16
+#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
+#define MVNETA_PORT_TX_RESET 0x3cf0
+#define MVNETA_PORT_TX_DMA_RESET BIT(0)
+#define MVNETA_TXQ_CMD1_REG 0x3e00
+#define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3)
+#define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0)
+#define MVNETA_REFILL_NUM_CLK_REG 0x3e08
+#define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff
+#define MVNETA_TX_MTU 0x3e0c
+#define MVNETA_TX_TOKEN_SIZE 0x3e14
+#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
+#define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2))
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20
+#define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff
+#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
+#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+
+/* The values of the bucket refill base period and refill period are taken from
+ * the reference manual, and adds up to a base resolution of 10Kbps. This allows
+ * to cover all rate-limit values from 10Kbps up to 5Gbps
+ */
+
+/* Base period for the rate limit algorithm */
+#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100
+
+/* Number of Base Period to wait between each bucket refill */
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000
+
+/* The base resolution for rate limiting, in bps. Any max_rate value should be
+ * a multiple of that value.
+ */
+#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \
+ (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \
+ MVNETA_TXQ_BUCKET_REFILL_PERIOD))
+
+#define MVNETA_LPI_CTRL_0 0x2cc0
+#define MVNETA_LPI_CTRL_1 0x2cc4
+#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_2 0x2cc8
+#define MVNETA_LPI_STATUS 0x2ccc
+
+#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
+
+/* Descriptor ring Macros */
+#define MVNETA_QUEUE_NEXT_DESC(q, index) \
+ (((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
+#define MVNETA_RX_COAL_PKTS 32
+#define MVNETA_RX_COAL_USEC 100
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVNETA_MH_SIZE 2
+
+#define MVNETA_VLAN_TAG_LEN 4
+
+#define MVNETA_TX_CSUM_DEF_SIZE 1600
+#define MVNETA_TX_CSUM_MAX_SIZE 9800
+#define MVNETA_ACC_MODE_EXT1 1
+#define MVNETA_ACC_MODE_EXT2 2
+
+#define MVNETA_MAX_DECODE_WIN 6
+
+/* Timeout constants */
+#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
+#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
+#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
+
+#define MVNETA_TX_MTU_MAX 0x3ffff
+
+/* The RSS lookup table actually has 256 entries but we do not use
+ * them yet
+ */
+#define MVNETA_RSS_LU_TABLE_SIZE 1
+
+/* Max number of Rx descriptors */
+#define MVNETA_MAX_RXD 512
+
+/* Max number of Tx descriptors */
+#define MVNETA_MAX_TXD 1024
+
+/* Max number of allowed TCP segments for software TSO */
+#define MVNETA_MAX_TSO_SEGS 100
+
+#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+/* The size of a TSO header page */
+#define MVNETA_TSO_PAGE_SIZE (2 * PAGE_SIZE)
+
+/* Number of TSO headers per page. This should be a power of 2 */
+#define MVNETA_TSO_PER_PAGE (MVNETA_TSO_PAGE_SIZE / TSO_HEADER_SIZE)
+
+/* Maximum number of TSO header pages */
+#define MVNETA_MAX_TSO_PAGES (MVNETA_MAX_TXD / MVNETA_TSO_PER_PAGE)
+
+/* descriptor aligned size */
+#define MVNETA_DESC_ALIGNED_SIZE 32
+
+/* Number of bytes to be taken into account by HW when putting incoming data
+ * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
+ * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
+ */
+#define MVNETA_RX_PKT_OFFSET_CORRECTION 64
+
+#define MVNETA_RX_PKT_SIZE(mtu) \
+ ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
+ ETH_HLEN + ETH_FCS_LEN, \
+ cache_line_size())
+
+/* Driver assumes that the last 3 bits are 0 */
+#define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
+#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
+ MVNETA_SKB_HEADROOM))
+#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
+
+#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
+ (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
+
+enum {
+ ETHTOOL_STAT_EEE_WAKEUP,
+ ETHTOOL_STAT_SKB_ALLOC_ERR,
+ ETHTOOL_STAT_REFILL_ERR,
+ ETHTOOL_XDP_REDIRECT,
+ ETHTOOL_XDP_PASS,
+ ETHTOOL_XDP_DROP,
+ ETHTOOL_XDP_TX,
+ ETHTOOL_XDP_TX_ERR,
+ ETHTOOL_XDP_XMIT,
+ ETHTOOL_XDP_XMIT_ERR,
+ ETHTOOL_MAX_STATS,
+};
+
+struct mvneta_statistic {
+ unsigned short offset;
+ unsigned short type;
+ const char name[ETH_GSTRING_LEN];
+};
+
+#define T_REG_32 32
+#define T_REG_64 64
+#define T_SW 1
+
+#define MVNETA_XDP_PASS 0
+#define MVNETA_XDP_DROPPED BIT(0)
+#define MVNETA_XDP_TX BIT(1)
+#define MVNETA_XDP_REDIR BIT(2)
+
+static const struct mvneta_statistic mvneta_statistics[] = {
+ { 0x3000, T_REG_64, "good_octets_received", },
+ { 0x3010, T_REG_32, "good_frames_received", },
+ { 0x3008, T_REG_32, "bad_octets_received", },
+ { 0x3014, T_REG_32, "bad_frames_received", },
+ { 0x3018, T_REG_32, "broadcast_frames_received", },
+ { 0x301c, T_REG_32, "multicast_frames_received", },
+ { 0x3050, T_REG_32, "unrec_mac_control_received", },
+ { 0x3058, T_REG_32, "good_fc_received", },
+ { 0x305c, T_REG_32, "bad_fc_received", },
+ { 0x3060, T_REG_32, "undersize_received", },
+ { 0x3064, T_REG_32, "fragments_received", },
+ { 0x3068, T_REG_32, "oversize_received", },
+ { 0x306c, T_REG_32, "jabber_received", },
+ { 0x3070, T_REG_32, "mac_receive_error", },
+ { 0x3074, T_REG_32, "bad_crc_event", },
+ { 0x3078, T_REG_32, "collision", },
+ { 0x307c, T_REG_32, "late_collision", },
+ { 0x2484, T_REG_32, "rx_discard", },
+ { 0x2488, T_REG_32, "rx_overrun", },
+ { 0x3020, T_REG_32, "frames_64_octets", },
+ { 0x3024, T_REG_32, "frames_65_to_127_octets", },
+ { 0x3028, T_REG_32, "frames_128_to_255_octets", },
+ { 0x302c, T_REG_32, "frames_256_to_511_octets", },
+ { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
+ { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
+ { 0x3038, T_REG_64, "good_octets_sent", },
+ { 0x3040, T_REG_32, "good_frames_sent", },
+ { 0x3044, T_REG_32, "excessive_collision", },
+ { 0x3048, T_REG_32, "multicast_frames_sent", },
+ { 0x304c, T_REG_32, "broadcast_frames_sent", },
+ { 0x3054, T_REG_32, "fc_sent", },
+ { 0x300c, T_REG_32, "internal_mac_transmit_err", },
+ { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
+ { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
+ { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
+ { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
+ { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
+ { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
+ { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
+ { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
+ { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
+ { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
+};
+
+struct mvneta_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ /* xdp */
+ u64 xdp_redirect;
+ u64 xdp_pass;
+ u64 xdp_drop;
+ u64 xdp_xmit;
+ u64 xdp_xmit_err;
+ u64 xdp_tx;
+ u64 xdp_tx_err;
+};
+
+struct mvneta_ethtool_stats {
+ struct mvneta_stats ps;
+ u64 skb_alloc_error;
+ u64 refill_error;
+};
+
+struct mvneta_pcpu_stats {
+ struct u64_stats_sync syncp;
+
+ struct mvneta_ethtool_stats es;
+ u64 rx_dropped;
+ u64 rx_errors;
+};
+
+struct mvneta_pcpu_port {
+ /* Pointer to the shared port */
+ struct mvneta_port *pp;
+
+ /* Pointer to the CPU-local NAPI struct */
+ struct napi_struct napi;
+
+ /* Cause of the previous interrupt */
+ u32 cause_rx_tx;
+};
+
+enum {
+ __MVNETA_DOWN,
+};
+
+struct mvneta_port {
+ u8 id;
+ struct mvneta_pcpu_port __percpu *ports;
+ struct mvneta_pcpu_stats __percpu *stats;
+
+ unsigned long state;
+
+ int pkt_size;
+ void __iomem *base;
+ struct mvneta_rx_queue *rxqs;
+ struct mvneta_tx_queue *txqs;
+ struct net_device *dev;
+ struct hlist_node node_online;
+ struct hlist_node node_dead;
+ int rxq_def;
+ /* Protect the access to the percpu interrupt registers,
+ * ensuring that the configuration remains coherent.
+ */
+ spinlock_t lock;
+ bool is_stopped;
+
+ u32 cause_rx_tx;
+ struct napi_struct napi;
+
+ struct bpf_prog *xdp_prog;
+
+ /* Core clock */
+ struct clk *clk;
+ /* AXI clock */
+ struct clk *clk_bus;
+ u8 mcast_count[256];
+ u16 tx_ring_size;
+ u16 rx_ring_size;
+
+ phy_interface_t phy_interface;
+ struct device_node *dn;
+ unsigned int tx_csum_limit;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
+ struct phy *comphy;
+
+ struct mvneta_bm *bm_priv;
+ struct mvneta_bm_pool *pool_long;
+ struct mvneta_bm_pool *pool_short;
+ int bm_win_id;
+
+ bool eee_enabled;
+ bool eee_active;
+ bool tx_lpi_enabled;
+
+ u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
+
+ u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
+
+ /* Flags for special SoC configurations */
+ bool neta_armada3700;
+ bool neta_ac5;
+ u16 rx_offset_correction;
+ const struct mbus_dram_target_info *dram_target_info;
+};
+
+/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+
+#define MVNETA_TX_L3_OFF_SHIFT 0
+#define MVNETA_TX_IP_HLEN_SHIFT 8
+#define MVNETA_TX_L4_UDP BIT(16)
+#define MVNETA_TX_L3_IP6 BIT(17)
+#define MVNETA_TXD_IP_CSUM BIT(18)
+#define MVNETA_TXD_Z_PAD BIT(19)
+#define MVNETA_TXD_L_DESC BIT(20)
+#define MVNETA_TXD_F_DESC BIT(21)
+#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
+ MVNETA_TXD_L_DESC | \
+ MVNETA_TXD_F_DESC)
+#define MVNETA_TX_L4_CSUM_FULL BIT(30)
+#define MVNETA_TX_L4_CSUM_NOT BIT(31)
+
+#define MVNETA_RXD_ERR_CRC 0x0
+#define MVNETA_RXD_BM_POOL_SHIFT 13
+#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
+#define MVNETA_RXD_ERR_SUMMARY BIT(16)
+#define MVNETA_RXD_ERR_OVERRUN BIT(17)
+#define MVNETA_RXD_ERR_LEN BIT(18)
+#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
+#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
+#define MVNETA_RXD_L3_IP4 BIT(25)
+#define MVNETA_RXD_LAST_DESC BIT(26)
+#define MVNETA_RXD_FIRST_DESC BIT(27)
+#define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
+ MVNETA_RXD_LAST_DESC)
+#define MVNETA_RXD_L4_CSUM_OK BIT(30)
+
+#if defined(__LITTLE_ENDIAN)
+struct mvneta_tx_desc {
+ u32 command; /* Options used by HW for packet transmitting.*/
+ u16 reserved1; /* csum_l4 (for future use) */
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u32 status; /* Info about received packet */
+ u16 reserved1; /* pnc_info - (for future use, PnC) */
+ u16 data_size; /* Size of received packet in bytes */
+
+ u32 buf_phys_addr; /* Physical address of the buffer */
+ u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+
+ u32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ u16 reserved3; /* prefetch_cmd, for future use */
+ u16 reserved4; /* csum_l4 - (for future use, PnC) */
+
+ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
+ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
+};
+#else
+struct mvneta_tx_desc {
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u16 reserved1; /* csum_l4 (for future use) */
+ u32 command; /* Options used by HW for packet transmitting.*/
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u16 data_size; /* Size of received packet in bytes */
+ u16 reserved1; /* pnc_info - (for future use, PnC) */
+ u32 status; /* Info about received packet */
+
+ u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+ u32 buf_phys_addr; /* Physical address of the buffer */
+
+ u16 reserved4; /* csum_l4 - (for future use, PnC) */
+ u16 reserved3; /* prefetch_cmd, for future use */
+ u32 buf_cookie; /* cookie for access to RX buffer in rx path */
+
+ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
+ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
+};
+#endif
+
+enum mvneta_tx_buf_type {
+ MVNETA_TYPE_TSO,
+ MVNETA_TYPE_SKB,
+ MVNETA_TYPE_XDP_TX,
+ MVNETA_TYPE_XDP_NDO,
+};
+
+struct mvneta_tx_buf {
+ enum mvneta_tx_buf_type type;
+ union {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ };
+};
+
+struct mvneta_tx_queue {
+ /* Number of this TX queue, in the range 0-7 */
+ u8 id;
+
+ /* Number of TX DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Number of currently used TX DMA descriptor in the
+ * descriptor ring
+ */
+ int count;
+ int pending;
+ int tx_stop_threshold;
+ int tx_wake_threshold;
+
+ /* Array of transmitted buffers */
+ struct mvneta_tx_buf *buf;
+
+ /* Index of last TX DMA descriptor that was inserted */
+ int txq_put_index;
+
+ /* Index of the TX DMA descriptor to be cleaned up */
+ int txq_get_index;
+
+ u32 done_pkts_coal;
+
+ /* Virtual address of the TX DMA descriptors array */
+ struct mvneta_tx_desc *descs;
+
+ /* DMA address of the TX DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last TX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next TX DMA descriptor to process */
+ int next_desc_to_proc;
+
+ /* DMA buffers for TSO headers */
+ char *tso_hdrs[MVNETA_MAX_TSO_PAGES];
+
+ /* DMA address of TSO headers */
+ dma_addr_t tso_hdrs_phys[MVNETA_MAX_TSO_PAGES];
+
+ /* Affinity mask for CPUs*/
+ cpumask_t affinity_mask;
+};
+
+struct mvneta_rx_queue {
+ /* rx queue number, in the range 0-7 */
+ u8 id;
+
+ /* num of rx descriptors in the rx descriptor ring */
+ int size;
+
+ u32 pkts_coal;
+ u32 time_coal;
+
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+
+ /* Virtual address of the RX buffer */
+ void **buf_virt_addr;
+
+ /* Virtual address of the RX DMA descriptors array */
+ struct mvneta_rx_desc *descs;
+
+ /* DMA address of the RX DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last RX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next RX DMA descriptor to process */
+ int next_desc_to_proc;
+
+ /* Index of first RX DMA descriptor to refill */
+ int first_to_refill;
+ u32 refill_num;
+};
+
+static enum cpuhp_state online_hpstate;
+/* The hardware supports eight (8) rx queues, but we are only allowing
+ * the first one to be used. Therefore, let's just allocate one queue.
+ */
+static int rxq_number = 8;
+static int txq_number = 8;
+
+static int rxq_def;
+
+static int rx_copybreak __read_mostly = 256;
+
+/* HW BM need that each port be identify by a unique ID */
+static int global_port_id;
+
+#define MVNETA_DRIVER_NAME "mvneta"
+#define MVNETA_DRIVER_VERSION "1.0"
+
+/* Utility/helper methods */
+
+/* Write helper method */
+static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
+{
+ writel(data, pp->base + offset);
+}
+
+/* Read helper method */
+static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
+{
+ return readl(pp->base + offset);
+}
+
+/* Increment txq get counter */
+static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
+{
+ txq->txq_get_index++;
+ if (txq->txq_get_index == txq->size)
+ txq->txq_get_index = 0;
+}
+
+/* Increment txq put counter */
+static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
+{
+ txq->txq_put_index++;
+ if (txq->txq_put_index == txq->size)
+ txq->txq_put_index = 0;
+}
+
+
+/* Clear all MIB counters */
+static void mvneta_mib_counters_clear(struct mvneta_port *pp)
+{
+ int i;
+
+ /* Perform dummy reads from MIB counters */
+ for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
+ mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+ mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+ mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
+}
+
+/* Get System Network Statistics */
+static void
+mvneta_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *cpu_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+
+ cpu_stats = per_cpu_ptr(pp->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
+ rx_packets = cpu_stats->es.ps.rx_packets;
+ rx_bytes = cpu_stats->es.ps.rx_bytes;
+ rx_dropped = cpu_stats->rx_dropped;
+ rx_errors = cpu_stats->rx_errors;
+ tx_packets = cpu_stats->es.ps.tx_packets;
+ tx_bytes = cpu_stats->es.ps.tx_bytes;
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->rx_dropped += rx_dropped;
+ stats->rx_errors += rx_errors;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ stats->tx_dropped = dev->stats.tx_dropped;
+}
+
+/* Rx descriptors helper methods */
+
+/* Checks whether the RX descriptor having this status is both the first
+ * and the last descriptor for the RX packet. Each RX packet is currently
+ * received through a single RX descriptor, so not having each RX
+ * descriptor with its first and last bits set is an error
+ */
+static int mvneta_rxq_desc_is_first_last(u32 status)
+{
+ return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
+ MVNETA_RXD_FIRST_LAST_DESC;
+}
+
+/* Add number of descriptors ready to receive new packets */
+static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int ndescs)
+{
+ /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
+ * be added at once
+ */
+ while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
+ (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
+ MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
+ ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
+ }
+
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
+ (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
+}
+
+/* Get number of RX descriptors occupied by received packets */
+static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
+ return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
+}
+
+/* Update num of rx desc called upon return from rx path or
+ * from mvneta_rxq_drop_pkts().
+ */
+static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int rx_done, int rx_filled)
+{
+ u32 val;
+
+ if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
+ val = rx_done |
+ (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
+ return;
+ }
+
+ /* Only 255 descriptors can be added at once */
+ while ((rx_done > 0) || (rx_filled > 0)) {
+ if (rx_done <= 0xff) {
+ val = rx_done;
+ rx_done = 0;
+ } else {
+ val = 0xff;
+ rx_done -= 0xff;
+ }
+ if (rx_filled <= 0xff) {
+ val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
+ rx_filled = 0;
+ } else {
+ val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
+ rx_filled -= 0xff;
+ }
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
+ }
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static struct mvneta_rx_desc *
+mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
+{
+ int rx_desc = rxq->next_desc_to_proc;
+
+ rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
+ prefetch(rxq->descs + rxq->next_desc_to_proc);
+ return rxq->descs + rx_desc;
+}
+
+/* Change maximum receive size of the port. */
+static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
+ val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
+ MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+}
+
+
+/* Set rx queue offset */
+static void mvneta_rxq_offset_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int offset)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
+
+ /* Offset is in */
+ val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+
+/* Tx descriptors helper methods */
+
+/* Update HW with number of TX descriptors to be sent */
+static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq,
+ int pend_desc)
+{
+ u32 val;
+
+ pend_desc += txq->pending;
+
+ /* Only 255 Tx descriptors can be added at once */
+ do {
+ val = min(pend_desc, 255);
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ pend_desc -= val;
+ } while (pend_desc > 0);
+ txq->pending = 0;
+}
+
+/* Get pointer to next TX descriptor to be processed (send) by HW */
+static struct mvneta_tx_desc *
+mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
+{
+ int tx_desc = txq->next_desc_to_proc;
+
+ txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
+ return txq->descs + tx_desc;
+}
+
+/* Release the last allocated TX descriptor. Useful to handle DMA
+ * mapping failures in the TX path.
+ */
+static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
+{
+ if (txq->next_desc_to_proc == 0)
+ txq->next_desc_to_proc = txq->last_desc - 1;
+ else
+ txq->next_desc_to_proc--;
+}
+
+/* Set rxq buf size */
+static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int buf_size)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
+
+ val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
+ val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
+
+ mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
+}
+
+/* Disable buffer management (BM) */
+static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+/* Enable buffer management (BM) */
+static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val |= MVNETA_RXQ_HW_BUF_ALLOC;
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+/* Notify HW about port's assignment of pool for bigger packets */
+static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
+ val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
+
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+/* Notify HW about port's assignment of pool for smaller packets */
+static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
+ val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
+
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+/* Set port's receive buffer size for assigned BM pool */
+static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
+ int buf_size,
+ u8 pool_id)
+{
+ u32 val;
+
+ if (!IS_ALIGNED(buf_size, 8)) {
+ dev_warn(pp->dev->dev.parent,
+ "illegal buf_size value %d, round to %d\n",
+ buf_size, ALIGN(buf_size, 8));
+ buf_size = ALIGN(buf_size, 8);
+ }
+
+ val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
+ val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
+ mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
+}
+
+/* Configure MBUS window in order to enable access BM internal SRAM */
+static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
+ u8 target, u8 attr)
+{
+ u32 win_enable, win_protect;
+ int i;
+
+ win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
+
+ if (pp->bm_win_id < 0) {
+ /* Find first not occupied window */
+ for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
+ if (win_enable & (1 << i)) {
+ pp->bm_win_id = i;
+ break;
+ }
+ }
+ if (i == MVNETA_MAX_DECODE_WIN)
+ return -ENOMEM;
+ } else {
+ i = pp->bm_win_id;
+ }
+
+ mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
+ mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
+
+ if (i < 4)
+ mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
+
+ mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
+ (attr << 8) | target);
+
+ mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
+
+ win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
+ win_protect |= 3 << (2 * i);
+ mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
+
+ win_enable &= ~(1 << i);
+ mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
+
+ return 0;
+}
+
+static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
+{
+ u32 wsize;
+ u8 target, attr;
+ int err;
+
+ /* Get BM window information */
+ err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
+ &target, &attr);
+ if (err < 0)
+ return err;
+
+ pp->bm_win_id = -1;
+
+ /* Open NETA -> BM window */
+ err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
+ target, attr);
+ if (err < 0) {
+ netdev_info(pp->dev, "fail to configure mbus window to BM\n");
+ return err;
+ }
+ return 0;
+}
+
+/* Assign and initialize pools for port. In case of fail
+ * buffer manager will remain disabled for current port.
+ */
+static int mvneta_bm_port_init(struct platform_device *pdev,
+ struct mvneta_port *pp)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ u32 long_pool_id, short_pool_id;
+
+ if (!pp->neta_armada3700) {
+ int ret;
+
+ ret = mvneta_bm_port_mbus_init(pp);
+ if (ret)
+ return ret;
+ }
+
+ if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
+ netdev_info(pp->dev, "missing long pool id\n");
+ return -EINVAL;
+ }
+
+ /* Create port's long pool depending on mtu */
+ pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
+ MVNETA_BM_LONG, pp->id,
+ MVNETA_RX_PKT_SIZE(pp->dev->mtu));
+ if (!pp->pool_long) {
+ netdev_info(pp->dev, "fail to obtain long pool for port\n");
+ return -ENOMEM;
+ }
+
+ pp->pool_long->port_map |= 1 << pp->id;
+
+ mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
+ pp->pool_long->id);
+
+ /* If short pool id is not defined, assume using single pool */
+ if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
+ short_pool_id = long_pool_id;
+
+ /* Create port's short pool */
+ pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
+ MVNETA_BM_SHORT, pp->id,
+ MVNETA_BM_SHORT_PKT_SIZE);
+ if (!pp->pool_short) {
+ netdev_info(pp->dev, "fail to obtain short pool for port\n");
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
+ return -ENOMEM;
+ }
+
+ if (short_pool_id != long_pool_id) {
+ pp->pool_short->port_map |= 1 << pp->id;
+ mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
+ pp->pool_short->id);
+ }
+
+ return 0;
+}
+
+/* Update settings of a pool for bigger packets */
+static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
+{
+ struct mvneta_bm_pool *bm_pool = pp->pool_long;
+ struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
+ int num;
+
+ /* Release all buffers from long pool */
+ mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
+ if (hwbm_pool->buf_num) {
+ WARN(1, "cannot free all buffers in pool %d\n",
+ bm_pool->id);
+ goto bm_mtu_err;
+ }
+
+ bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
+ bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
+ hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
+
+ /* Fill entire long pool */
+ num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
+ if (num != hwbm_pool->size) {
+ WARN(1, "pool %d: %d of %d allocated\n",
+ bm_pool->id, num, hwbm_pool->size);
+ goto bm_mtu_err;
+ }
+ mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
+
+ return;
+
+bm_mtu_err:
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
+
+ pp->bm_priv = NULL;
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
+ mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
+ netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
+}
+
+/* Start the Ethernet port RX and TX activity */
+static void mvneta_port_up(struct mvneta_port *pp)
+{
+ int queue;
+ u32 q_map;
+
+ /* Enable all initialized TXs. */
+ q_map = 0;
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ if (txq->descs)
+ q_map |= (1 << queue);
+ }
+ mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
+
+ q_map = 0;
+ /* Enable all initialized RXQs. */
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+
+ if (rxq->descs)
+ q_map |= (1 << queue);
+ }
+ mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
+}
+
+/* Stop the Ethernet port activity */
+static void mvneta_port_down(struct mvneta_port *pp)
+{
+ u32 val;
+ int count;
+
+ /* Stop Rx port activity. Check port Rx activity. */
+ val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
+
+ /* Issue stop command for active channels only */
+ if (val != 0)
+ mvreg_write(pp, MVNETA_RXQ_CMD,
+ val << MVNETA_RXQ_DISABLE_SHIFT);
+
+ /* Wait for all Rx activity to terminate. */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(pp->dev,
+ "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ val = mvreg_read(pp, MVNETA_RXQ_CMD);
+ } while (val & MVNETA_RXQ_ENABLE_MASK);
+
+ /* Stop Tx port activity. Check port Tx activity. Issue stop
+ * command for active channels only
+ */
+ val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
+
+ if (val != 0)
+ mvreg_write(pp, MVNETA_TXQ_CMD,
+ (val << MVNETA_TXQ_DISABLE_SHIFT));
+
+ /* Wait for all Tx activity to terminate. */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(pp->dev,
+ "TIMEOUT for TX stopped status=0x%08x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ /* Check TX Command reg that all Txqs are stopped */
+ val = mvreg_read(pp, MVNETA_TXQ_CMD);
+
+ } while (val & MVNETA_TXQ_ENABLE_MASK);
+
+ /* Double check to verify that TX FIFO is empty */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
+ netdev_warn(pp->dev,
+ "TX FIFO empty timeout status=0x%08x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ val = mvreg_read(pp, MVNETA_PORT_STATUS);
+ } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
+ (val & MVNETA_TX_IN_PRGRS));
+
+ udelay(200);
+}
+
+/* Enable the port by setting the port enable bit of the MAC control register */
+static void mvneta_port_enable(struct mvneta_port *pp)
+{
+ u32 val;
+
+ /* Enable port */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val |= MVNETA_GMAC0_PORT_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+}
+
+/* Disable the port and wait for about 200 usec before retuning */
+static void mvneta_port_disable(struct mvneta_port *pp)
+{
+ u32 val;
+
+ /* Reset the Enable bit in the Serial Control Register */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val &= ~MVNETA_GMAC0_PORT_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+
+ udelay(200);
+}
+
+/* Multicast tables methods */
+
+/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
+static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ val = 0;
+ } else {
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
+}
+
+/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
+static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ val = 0;
+ } else {
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xfc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
+
+}
+
+/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
+static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
+ val = 0;
+ } else {
+ memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xfc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
+}
+
+static void mvneta_percpu_unmask_interrupt(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ /* All the queue are unmasked, but actually only the ones
+ * mapped to this CPU will be unmasked
+ */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK_ALL |
+ MVNETA_TX_INTR_MASK_ALL |
+ MVNETA_MISCINTR_INTR_MASK);
+}
+
+static void mvneta_percpu_mask_interrupt(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ /* All the queue are masked, but actually only the ones
+ * mapped to this CPU will be masked
+ */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+}
+
+static void mvneta_percpu_clear_intr_cause(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ /* All the queue are cleared, but actually only the ones
+ * mapped to this CPU will be cleared
+ */
+ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+}
+
+/* This method sets defaults to the NETA port:
+ * Clears interrupt Cause and Mask registers.
+ * Clears all MAC tables.
+ * Sets defaults to all registers.
+ * Resets RX and TX descriptor rings.
+ * Resets PHY.
+ * This method can be called after mvneta_port_down() to return the port
+ * settings to defaults.
+ */
+static void mvneta_defaults_set(struct mvneta_port *pp)
+{
+ int cpu;
+ int queue;
+ u32 val;
+ int max_cpu = num_present_cpus();
+
+ /* Clear all Cause registers */
+ on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
+
+ /* Mask all interrupts */
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+ mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
+
+ /* Enable MBUS Retry bit16 */
+ mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
+
+ /* Set CPU queue access map. CPUs are assigned to the RX and
+ * TX queues modulo their number. If there is only one TX
+ * queue then it is assigned to the CPU associated to the
+ * default RX queue.
+ */
+ for_each_present_cpu(cpu) {
+ int rxq_map = 0, txq_map = 0;
+ int rxq, txq;
+ if (!pp->neta_armada3700) {
+ for (rxq = 0; rxq < rxq_number; rxq++)
+ if ((rxq % max_cpu) == cpu)
+ rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
+
+ for (txq = 0; txq < txq_number; txq++)
+ if ((txq % max_cpu) == cpu)
+ txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
+
+ /* With only one TX queue we configure a special case
+ * which will allow to get all the irq on a single
+ * CPU
+ */
+ if (txq_number == 1)
+ txq_map = (cpu == pp->rxq_def) ?
+ MVNETA_CPU_TXQ_ACCESS(0) : 0;
+
+ } else {
+ txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
+ rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
+ }
+
+ mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
+ }
+
+ /* Reset RX and TX DMAs */
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
+
+ /* Disable Legacy WRR, Disable EJP, Release from reset */
+ mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
+ for (queue = 0; queue < txq_number; queue++) {
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
+ }
+
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
+
+ /* Set Port Acceleration Mode */
+ if (pp->bm_priv)
+ /* HW buffer management + legacy parser */
+ val = MVNETA_ACC_MODE_EXT2;
+ else
+ /* SW buffer management + legacy parser */
+ val = MVNETA_ACC_MODE_EXT1;
+ mvreg_write(pp, MVNETA_ACC_MODE, val);
+
+ if (pp->bm_priv)
+ mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
+
+ /* Update val of portCfg register accordingly with all RxQueue types */
+ val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
+ mvreg_write(pp, MVNETA_PORT_CONFIG, val);
+
+ val = 0;
+ mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
+ mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
+
+ /* Build PORT_SDMA_CONFIG_REG */
+ val = 0;
+
+ /* Default burst size */
+ val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+ val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+ val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
+
+#if defined(__BIG_ENDIAN)
+ val |= MVNETA_DESC_SWAP;
+#endif
+
+ /* Assign port SDMA configuration */
+ mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
+
+ /* Disable PHY polling in hardware, since we're using the
+ * kernel phylib to do this.
+ */
+ val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
+ val &= ~MVNETA_PHY_POLLING_ENABLE;
+ mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
+
+ mvneta_set_ucast_table(pp, -1);
+ mvneta_set_special_mcast_table(pp, -1);
+ mvneta_set_other_mcast_table(pp, -1);
+
+ /* Set port interrupt enable register - default enable all */
+ mvreg_write(pp, MVNETA_INTR_ENABLE,
+ (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
+ | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
+
+ mvneta_mib_counters_clear(pp);
+}
+
+/* Set max sizes for tx queues */
+static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
+
+{
+ u32 val, size, mtu;
+ int queue;
+
+ mtu = max_tx_size * 8;
+ if (mtu > MVNETA_TX_MTU_MAX)
+ mtu = MVNETA_TX_MTU_MAX;
+
+ /* Set MTU */
+ val = mvreg_read(pp, MVNETA_TX_MTU);
+ val &= ~MVNETA_TX_MTU_MAX;
+ val |= mtu;
+ mvreg_write(pp, MVNETA_TX_MTU, val);
+
+ /* TX token size and all TXQs token size must be larger that MTU */
+ val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
+
+ size = val & MVNETA_TX_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
+ val |= size;
+ mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
+ }
+ for (queue = 0; queue < txq_number; queue++) {
+ val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
+
+ size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
+ val |= size;
+ mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
+ }
+ }
+}
+
+/* Set unicast address */
+static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
+ int queue)
+{
+ unsigned int unicast_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ /* Locate the Unicast table entry */
+ last_nibble = (0xf & last_nibble);
+
+ /* offset from unicast tbl base */
+ tbl_offset = (last_nibble / 4) * 4;
+
+ /* offset within the above reg */
+ reg_offset = last_nibble % 4;
+
+ unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
+
+ if (queue == -1) {
+ /* Clear accepts frame bit at specified unicast DA tbl entry */
+ unicast_reg &= ~(0xff << (8 * reg_offset));
+ } else {
+ unicast_reg &= ~(0xff << (8 * reg_offset));
+ unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
+}
+
+/* Set mac address */
+static void mvneta_mac_addr_set(struct mvneta_port *pp,
+ const unsigned char *addr, int queue)
+{
+ unsigned int mac_h;
+ unsigned int mac_l;
+
+ if (queue != -1) {
+ mac_l = (addr[4] << 8) | (addr[5]);
+ mac_h = (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | (addr[3] << 0);
+
+ mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
+ mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
+ }
+
+ /* Accept frames of this address */
+ mvneta_set_ucast_addr(pp, addr[5], queue);
+}
+
+/* Set the number of packets that will be received before RX interrupt
+ * will be generated by HW.
+ */
+static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, u32 value)
+{
+ mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
+ value | MVNETA_RXQ_NON_OCCUPIED(0));
+}
+
+/* Set the time delay in usec before RX interrupt will be generated by
+ * HW.
+ */
+static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, u32 value)
+{
+ u32 val;
+ unsigned long clk_rate;
+
+ clk_rate = clk_get_rate(pp->clk);
+ val = (clk_rate / 1000000) * value;
+
+ mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
+}
+
+/* Set threshold for TX_DONE pkts coalescing */
+static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq, u32 value)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
+
+ val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
+ val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
+
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
+}
+
+/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
+static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
+ u32 phys_addr, void *virt_addr,
+ struct mvneta_rx_queue *rxq)
+{
+ int i;
+
+ rx_desc->buf_phys_addr = phys_addr;
+ i = rx_desc - rxq->descs;
+ rxq->buf_virt_addr[i] = virt_addr;
+}
+
+/* Decrement sent descriptors counter */
+static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq,
+ int sent_desc)
+{
+ u32 val;
+
+ /* Only 255 TX descriptors can be updated at once */
+ while (sent_desc > 0xff) {
+ val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ sent_desc = sent_desc - 0xff;
+ }
+
+ val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+}
+
+/* Get number of TX descriptors already sent by HW */
+static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ u32 val;
+ int sent_desc;
+
+ val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
+ sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
+ MVNETA_TXQ_SENT_DESC_SHIFT;
+
+ return sent_desc;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ */
+static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ int sent_desc;
+
+ /* Get number of sent descriptors */
+ sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
+
+ /* Decrement sent descriptors counter */
+ if (sent_desc)
+ mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
+
+ return sent_desc;
+}
+
+/* Set TXQ descriptors fields relevant for CSUM calculation */
+static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
+ int ip_hdr_len, int l4_proto)
+{
+ u32 command;
+
+ /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+ * G_L4_chk, L4_type; required only for checksum
+ * calculation
+ */
+ command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
+ command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
+
+ if (l3_proto == htons(ETH_P_IP))
+ command |= MVNETA_TXD_IP_CSUM;
+ else
+ command |= MVNETA_TX_L3_IP6;
+
+ if (l4_proto == IPPROTO_TCP)
+ command |= MVNETA_TX_L4_CSUM_FULL;
+ else if (l4_proto == IPPROTO_UDP)
+ command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
+ else
+ command |= MVNETA_TX_L4_CSUM_NOT;
+
+ return command;
+}
+
+
+/* Display more error info */
+static void mvneta_rx_error(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc)
+{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ u32 status = rx_desc->status;
+
+ /* update per-cpu counter */
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_errors++;
+ u64_stats_update_end(&stats->syncp);
+
+ switch (status & MVNETA_RXD_ERR_CODE_MASK) {
+ case MVNETA_RXD_ERR_CRC:
+ netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_OVERRUN:
+ netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_LEN:
+ netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_RESOURCE:
+ netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ }
+}
+
+/* Handle RX checksum offload based on the descriptor's status */
+static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
+{
+ if ((pp->dev->features & NETIF_F_RXCSUM) &&
+ (status & MVNETA_RXD_L3_IP4) &&
+ (status & MVNETA_RXD_L4_CSUM_OK))
+ return CHECKSUM_UNNECESSARY;
+
+ return CHECKSUM_NONE;
+}
+
+/* Return tx queue pointer (find last set bit) according to <cause> returned
+ * form tx_done reg. <cause> must not be null. The return value is always a
+ * valid queue for matching the first one found in <cause>.
+ */
+static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
+ u32 cause)
+{
+ int queue = fls(cause) - 1;
+
+ return &pp->txqs[queue];
+}
+
+/* Free tx queue skbuffs */
+static void mvneta_txq_bufs_free(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq, int num,
+ struct netdev_queue *nq, bool napi)
+{
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ struct xdp_frame_bulk bq;
+ int i;
+
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
+ for (i = 0; i < num; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
+ struct mvneta_tx_desc *tx_desc = txq->descs +
+ txq->txq_get_index;
+
+ mvneta_txq_inc_get(txq);
+
+ if (buf->type == MVNETA_TYPE_XDP_NDO ||
+ buf->type == MVNETA_TYPE_SKB)
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
+ if ((buf->type == MVNETA_TYPE_TSO ||
+ buf->type == MVNETA_TYPE_SKB) && buf->skb) {
+ bytes_compl += buf->skb->len;
+ pkts_compl++;
+ dev_kfree_skb_any(buf->skb);
+ } else if ((buf->type == MVNETA_TYPE_XDP_TX ||
+ buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) {
+ if (napi && buf->type == MVNETA_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(buf->xdpf);
+ else
+ xdp_return_frame_bulk(buf->xdpf, &bq);
+ }
+ }
+ xdp_flush_frame_bulk(&bq);
+
+ rcu_read_unlock();
+
+ netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
+}
+
+/* Handle end of transmission */
+static void mvneta_txq_done(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+ int tx_done;
+
+ tx_done = mvneta_txq_sent_desc_proc(pp, txq);
+ if (!tx_done)
+ return;
+
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
+
+ txq->count -= tx_done;
+
+ if (netif_tx_queue_stopped(nq)) {
+ if (txq->count <= txq->tx_wake_threshold)
+ netif_tx_wake_queue(nq);
+ }
+}
+
+/* Refill processing for SW buffer management */
+/* Allocate page per descriptor */
+static int mvneta_rx_refill(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct mvneta_rx_queue *rxq,
+ gfp_t gfp_mask)
+{
+ dma_addr_t phys_addr;
+ struct page *page;
+
+ page = page_pool_alloc_pages(rxq->page_pool,
+ gfp_mask | __GFP_NOWARN);
+ if (!page)
+ return -ENOMEM;
+
+ phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
+ mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
+
+ return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvneta_skb_tx_csum(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int ip_hdr_len = 0;
+ __be16 l3_proto = vlan_get_protocol(skb);
+ u8 l4_proto;
+
+ if (l3_proto == htons(ETH_P_IP)) {
+ struct iphdr *ip4h = ip_hdr(skb);
+
+ /* Calculate IPv4 checksum and L4 checksum */
+ ip_hdr_len = ip4h->ihl;
+ l4_proto = ip4h->protocol;
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ /* Read l4_protocol from one of IPv6 extra headers */
+ if (skb_network_header_len(skb) > 0)
+ ip_hdr_len = (skb_network_header_len(skb) >> 2);
+ l4_proto = ip6h->nexthdr;
+ } else
+ return MVNETA_TX_L4_CSUM_NOT;
+
+ return mvneta_txq_desc_csum(skb_network_offset(skb),
+ l3_proto, ip_hdr_len, l4_proto);
+ }
+
+ return MVNETA_TX_L4_CSUM_NOT;
+}
+
+/* Drop packets received by the RXQ and free buffers */
+static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ int rx_done, i;
+
+ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+ if (rx_done)
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+
+ if (pp->bm_priv) {
+ for (i = 0; i < rx_done; i++) {
+ struct mvneta_rx_desc *rx_desc =
+ mvneta_rxq_next_desc_get(rxq);
+ u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
+ struct mvneta_bm_pool *bm_pool;
+
+ bm_pool = &pp->bm_priv->bm_pools[pool_id];
+ /* Return dropped buffer to the pool */
+ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
+ rx_desc->buf_phys_addr);
+ }
+ return;
+ }
+
+ for (i = 0; i < rxq->size; i++) {
+ struct mvneta_rx_desc *rx_desc = rxq->descs + i;
+ void *data = rxq->buf_virt_addr[i];
+ if (!data || !(rx_desc->buf_phys_addr))
+ continue;
+
+ page_pool_put_full_page(rxq->page_pool, data, false);
+ }
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+}
+
+static void
+mvneta_update_stats(struct mvneta_port *pp,
+ struct mvneta_stats *ps)
+{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.rx_packets += ps->rx_packets;
+ stats->es.ps.rx_bytes += ps->rx_bytes;
+ /* xdp */
+ stats->es.ps.xdp_redirect += ps->xdp_redirect;
+ stats->es.ps.xdp_pass += ps->xdp_pass;
+ stats->es.ps.xdp_drop += ps->xdp_drop;
+ u64_stats_update_end(&stats->syncp);
+}
+
+static inline
+int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
+{
+ struct mvneta_rx_desc *rx_desc;
+ int curr_desc = rxq->first_to_refill;
+ int i;
+
+ for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
+ rx_desc = rxq->descs + curr_desc;
+ if (!(rx_desc->buf_phys_addr)) {
+ if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
+ struct mvneta_pcpu_stats *stats;
+
+ pr_err("Can't refill queue %d. Done %d from %d\n",
+ rxq->id, i, rxq->refill_num);
+
+ stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.refill_error++;
+ u64_stats_update_end(&stats->syncp);
+ break;
+ }
+ }
+ curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
+ }
+ rxq->refill_num -= i;
+ rxq->first_to_refill = curr_desc;
+
+ return i;
+}
+
+static void
+mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+ struct xdp_buff *xdp, int sync_len)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ int i;
+
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+ for (i = 0; i < sinfo->nr_frags; i++)
+ page_pool_put_full_page(rxq->page_pool,
+ skb_frag_page(&sinfo->frags[i]), true);
+
+out:
+ page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
+ sync_len, true);
+}
+
+static int
+mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
+ struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct device *dev = pp->dev->dev.parent;
+ struct mvneta_tx_desc *tx_desc;
+ int i, num_frames = 1;
+ struct page *page;
+
+ if (unlikely(xdp_frame_has_frags(xdpf)))
+ num_frames += sinfo->nr_frags;
+
+ if (txq->count + num_frames >= txq->size)
+ return MVNETA_XDP_DROPPED;
+
+ for (i = 0; i < num_frames; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ skb_frag_t *frag = NULL;
+ int len = xdpf->len;
+ dma_addr_t dma_addr;
+
+ if (unlikely(i)) { /* paged area */
+ frag = &sinfo->frags[i - 1];
+ len = skb_frag_size(frag);
+ }
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ if (dma_map) {
+ /* ndo_xdp_xmit */
+ void *data;
+
+ data = unlikely(frag) ? skb_frag_address(frag)
+ : xdpf->data;
+ dma_addr = dma_map_single(dev, data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ mvneta_txq_desc_put(txq);
+ goto unmap;
+ }
+
+ buf->type = MVNETA_TYPE_XDP_NDO;
+ } else {
+ page = unlikely(frag) ? skb_frag_page(frag)
+ : virt_to_page(xdpf->data);
+ dma_addr = page_pool_get_dma_addr(page);
+ if (unlikely(frag))
+ dma_addr += skb_frag_off(frag);
+ else
+ dma_addr += sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(dev, dma_addr, len,
+ DMA_BIDIRECTIONAL);
+ buf->type = MVNETA_TYPE_XDP_TX;
+ }
+ buf->xdpf = unlikely(i) ? NULL : xdpf;
+
+ tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC;
+ tx_desc->buf_phys_addr = dma_addr;
+ tx_desc->data_size = len;
+ *nxmit_byte += len;
+
+ mvneta_txq_inc_put(txq);
+ }
+ /*last descriptor */
+ tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+
+ txq->pending += num_frames;
+ txq->count += num_frames;
+
+ return MVNETA_XDP_TX;
+
+unmap:
+ for (i--; i >= 0; i--) {
+ mvneta_txq_desc_put(txq);
+ tx_desc = txq->descs + txq->next_desc_to_proc;
+ dma_unmap_single(dev, tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ }
+
+ return MVNETA_XDP_DROPPED;
+}
+
+static int
+mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
+{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ int cpu, nxmit_byte = 0;
+ struct xdp_frame *xdpf;
+ u32 ret;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ return MVNETA_XDP_DROPPED;
+
+ cpu = smp_processor_id();
+ txq = &pp->txqs[cpu % txq_number];
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ __netif_tx_lock(nq, cpu);
+ ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
+ if (ret == MVNETA_XDP_TX) {
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += nxmit_byte;
+ stats->es.ps.tx_packets++;
+ stats->es.ps.xdp_tx++;
+ u64_stats_update_end(&stats->syncp);
+
+ mvneta_txq_pend_desc_add(pp, txq, 0);
+ } else {
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.xdp_tx_err++;
+ u64_stats_update_end(&stats->syncp);
+ }
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
+static int
+mvneta_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ int i, nxmit_byte = 0, nxmit = 0;
+ int cpu = smp_processor_id();
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ u32 ret;
+
+ if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ txq = &pp->txqs[cpu % txq_number];
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ __netif_tx_lock(nq, cpu);
+ for (i = 0; i < num_frame; i++) {
+ ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
+ true);
+ if (ret != MVNETA_XDP_TX)
+ break;
+
+ nxmit++;
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ mvneta_txq_pend_desc_add(pp, txq, 0);
+ __netif_tx_unlock(nq);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += nxmit_byte;
+ stats->es.ps.tx_packets += nxmit;
+ stats->es.ps.xdp_xmit += nxmit;
+ stats->es.ps.xdp_xmit_err += num_frame - nxmit;
+ u64_stats_update_end(&stats->syncp);
+
+ return nxmit;
+}
+
+static int
+mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+ struct bpf_prog *prog, struct xdp_buff *xdp,
+ u32 frame_sz, struct mvneta_stats *stats)
+{
+ unsigned int len, data_len, sync;
+ u32 ret, act;
+
+ len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+ data_len = xdp->data_end - xdp->data;
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+ sync = max(sync, len);
+
+ switch (act) {
+ case XDP_PASS:
+ stats->xdp_pass++;
+ return MVNETA_XDP_PASS;
+ case XDP_REDIRECT: {
+ int err;
+
+ err = xdp_do_redirect(pp->dev, xdp, prog);
+ if (unlikely(err)) {
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
+ ret = MVNETA_XDP_DROPPED;
+ } else {
+ ret = MVNETA_XDP_REDIR;
+ stats->xdp_redirect++;
+ }
+ break;
+ }
+ case XDP_TX:
+ ret = mvneta_xdp_xmit_back(pp, xdp);
+ if (ret != MVNETA_XDP_TX)
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(pp->dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(pp->dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
+ ret = MVNETA_XDP_DROPPED;
+ stats->xdp_drop++;
+ break;
+ }
+
+ stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len;
+ stats->rx_packets++;
+
+ return ret;
+}
+
+static void
+mvneta_swbm_rx_frame(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct mvneta_rx_queue *rxq,
+ struct xdp_buff *xdp, int *size,
+ struct page *page)
+{
+ unsigned char *data = page_address(page);
+ int data_len = -MVNETA_MH_SIZE, len;
+ struct net_device *dev = pp->dev;
+ enum dma_data_direction dma_dir;
+
+ if (*size > MVNETA_MAX_RX_BUF_SIZE) {
+ len = MVNETA_MAX_RX_BUF_SIZE;
+ data_len += len;
+ } else {
+ len = *size;
+ data_len += len - ETH_FCS_LEN;
+ }
+ *size = *size - len;
+
+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+ dma_sync_single_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ len, dma_dir);
+
+ rx_desc->buf_phys_addr = 0;
+
+ /* Prefetch header */
+ prefetch(data);
+ xdp_buff_clear_frags_flag(xdp);
+ xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
+ data_len, false);
+}
+
+static void
+mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct mvneta_rx_queue *rxq,
+ struct xdp_buff *xdp, int *size,
+ struct page *page)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ struct net_device *dev = pp->dev;
+ enum dma_data_direction dma_dir;
+ int data_len, len;
+
+ if (*size > MVNETA_MAX_RX_BUF_SIZE) {
+ len = MVNETA_MAX_RX_BUF_SIZE;
+ data_len = len;
+ } else {
+ len = *size;
+ data_len = len - ETH_FCS_LEN;
+ }
+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+ dma_sync_single_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ len, dma_dir);
+ rx_desc->buf_phys_addr = 0;
+
+ if (!xdp_buff_has_frags(xdp))
+ sinfo->nr_frags = 0;
+
+ if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];
+
+ skb_frag_fill_page_desc(frag, page,
+ pp->rx_offset_correction, data_len);
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->xdp_frags_size = *size;
+ xdp_buff_set_frags_flag(xdp);
+ }
+ if (page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+ } else {
+ page_pool_put_full_page(rxq->page_pool, page, true);
+ }
+ *size -= len;
+}
+
+static struct sk_buff *
+mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
+ struct xdp_buff *xdp, u32 desc_status)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ struct sk_buff *skb;
+ u8 num_frags;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ num_frags = sinfo->nr_frags;
+
+ skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb_mark_for_recycle(skb);
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ skb_put(skb, xdp->data_end - xdp->data);
+ skb->ip_summed = mvneta_rx_csum(pp, desc_status);
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, num_frags,
+ sinfo->xdp_frags_size,
+ num_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+
+ return skb;
+}
+
+/* Main rx processing when using software buffer management */
+static int mvneta_rx_swbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int budget,
+ struct mvneta_rx_queue *rxq)
+{
+ int rx_proc = 0, rx_todo, refill, size = 0;
+ struct net_device *dev = pp->dev;
+ struct mvneta_stats ps = {};
+ struct bpf_prog *xdp_prog;
+ u32 desc_status, frame_sz;
+ struct xdp_buff xdp_buf;
+
+ xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_buf.data_hard_start = NULL;
+
+ /* Get number of received packets */
+ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
+
+ xdp_prog = READ_ONCE(pp->xdp_prog);
+
+ /* Fairness NAPI loop */
+ while (rx_proc < budget && rx_proc < rx_todo) {
+ struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
+ u32 rx_status, index;
+ struct sk_buff *skb;
+ struct page *page;
+
+ index = rx_desc - rxq->descs;
+ page = (struct page *)rxq->buf_virt_addr[index];
+
+ rx_status = rx_desc->status;
+ rx_proc++;
+ rxq->refill_num++;
+
+ if (rx_status & MVNETA_RXD_FIRST_DESC) {
+ /* Check errors only for FIRST descriptor */
+ if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
+ mvneta_rx_error(pp, rx_desc);
+ goto next;
+ }
+
+ size = rx_desc->data_size;
+ frame_sz = size - ETH_FCS_LEN;
+ desc_status = rx_status;
+
+ mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
+ &size, page);
+ } else {
+ if (unlikely(!xdp_buf.data_hard_start)) {
+ rx_desc->buf_phys_addr = 0;
+ page_pool_put_full_page(rxq->page_pool, page,
+ true);
+ goto next;
+ }
+
+ mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
+ &size, page);
+ } /* Middle or Last descriptor */
+
+ if (!(rx_status & MVNETA_RXD_LAST_DESC))
+ /* no last descriptor this time */
+ continue;
+
+ if (size) {
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
+ goto next;
+ }
+
+ if (xdp_prog &&
+ mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
+ goto next;
+
+ skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
+ if (IS_ERR(skb)) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.skb_alloc_error++;
+ stats->rx_dropped++;
+ u64_stats_update_end(&stats->syncp);
+
+ goto next;
+ }
+
+ ps.rx_bytes += skb->len;
+ ps.rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(napi, skb);
+next:
+ xdp_buf.data_hard_start = NULL;
+ }
+
+ if (xdp_buf.data_hard_start)
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
+
+ if (ps.xdp_redirect)
+ xdp_do_flush_map();
+
+ if (ps.rx_packets)
+ mvneta_update_stats(pp, &ps);
+
+ /* return some buffers to hardware queue, one at a time is too slow */
+ refill = mvneta_rx_refill_queue(pp, rxq);
+
+ /* Update rxq management counters */
+ mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
+
+ return ps.rx_packets;
+}
+
+/* Main rx processing when using hardware buffer management */
+static int mvneta_rx_hwbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int rx_todo,
+ struct mvneta_rx_queue *rxq)
+{
+ struct net_device *dev = pp->dev;
+ int rx_done;
+ u32 rcvd_pkts = 0;
+ u32 rcvd_bytes = 0;
+
+ /* Get number of received packets */
+ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+
+ if (rx_todo > rx_done)
+ rx_todo = rx_done;
+
+ rx_done = 0;
+
+ /* Fairness NAPI loop */
+ while (rx_done < rx_todo) {
+ struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
+ struct mvneta_bm_pool *bm_pool = NULL;
+ struct sk_buff *skb;
+ unsigned char *data;
+ dma_addr_t phys_addr;
+ u32 rx_status, frag_size;
+ int rx_bytes, err;
+ u8 pool_id;
+
+ rx_done++;
+ rx_status = rx_desc->status;
+ rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
+ data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
+ phys_addr = rx_desc->buf_phys_addr;
+ pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
+ bm_pool = &pp->bm_priv->bm_pools[pool_id];
+
+ if (!mvneta_rxq_desc_is_first_last(rx_status) ||
+ (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+err_drop_frame_ret_pool:
+ /* Return the buffer to the pool */
+ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
+ rx_desc->buf_phys_addr);
+err_drop_frame:
+ mvneta_rx_error(pp, rx_desc);
+ /* leave the descriptor untouched */
+ continue;
+ }
+
+ if (rx_bytes <= rx_copybreak) {
+ /* better copy a small frame and not unmap the DMA region */
+ skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
+ if (unlikely(!skb))
+ goto err_drop_frame_ret_pool;
+
+ dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
+ rx_desc->buf_phys_addr,
+ MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes,
+ DMA_FROM_DEVICE);
+ skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = mvneta_rx_csum(pp, rx_status);
+ napi_gro_receive(napi, skb);
+
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
+
+ /* Return the buffer to the pool */
+ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
+ rx_desc->buf_phys_addr);
+
+ /* leave the descriptor and buffer untouched */
+ continue;
+ }
+
+ /* Refill processing */
+ err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
+ if (err) {
+ struct mvneta_pcpu_stats *stats;
+
+ netdev_err(dev, "Linux processing - Can't refill\n");
+
+ stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.refill_error++;
+ u64_stats_update_end(&stats->syncp);
+
+ goto err_drop_frame_ret_pool;
+ }
+
+ frag_size = bm_pool->hwbm_pool.frag_size;
+
+ skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
+
+ /* After refill old buffer has to be unmapped regardless
+ * the skb is successfully built or not.
+ */
+ dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ if (!skb)
+ goto err_drop_frame;
+
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
+
+ /* Linux processing */
+ skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
+ skb_put(skb, rx_bytes);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = mvneta_rx_csum(pp, rx_status);
+
+ napi_gro_receive(napi, skb);
+ }
+
+ if (rcvd_pkts) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.rx_packets += rcvd_pkts;
+ stats->es.ps.rx_bytes += rcvd_bytes;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ /* Update rxq management counters */
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+
+ return rx_done;
+}
+
+static void mvneta_free_tso_hdrs(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ struct device *dev = pp->dev->dev.parent;
+ int i;
+
+ for (i = 0; i < MVNETA_MAX_TSO_PAGES; i++) {
+ if (txq->tso_hdrs[i]) {
+ dma_free_coherent(dev, MVNETA_TSO_PAGE_SIZE,
+ txq->tso_hdrs[i],
+ txq->tso_hdrs_phys[i]);
+ txq->tso_hdrs[i] = NULL;
+ }
+ }
+}
+
+static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ struct device *dev = pp->dev->dev.parent;
+ int i, num;
+
+ num = DIV_ROUND_UP(txq->size, MVNETA_TSO_PER_PAGE);
+ for (i = 0; i < num; i++) {
+ txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE,
+ &txq->tso_hdrs_phys[i],
+ GFP_KERNEL);
+ if (!txq->tso_hdrs[i]) {
+ mvneta_free_tso_hdrs(pp, txq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static char *mvneta_get_tso_hdr(struct mvneta_tx_queue *txq, dma_addr_t *dma)
+{
+ int index, offset;
+
+ index = txq->txq_put_index / MVNETA_TSO_PER_PAGE;
+ offset = (txq->txq_put_index % MVNETA_TSO_PER_PAGE) * TSO_HEADER_SIZE;
+
+ *dma = txq->tso_hdrs_phys[index] + offset;
+
+ return txq->tso_hdrs[index] + offset;
+}
+
+static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq,
+ struct tso_t *tso, int size, bool is_last)
+{
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ int hdr_len = skb_tcp_all_headers(skb);
+ struct mvneta_tx_desc *tx_desc;
+ dma_addr_t hdr_phys;
+ char *hdr;
+
+ hdr = mvneta_get_tso_hdr(txq, &hdr_phys);
+ tso_build_hdr(skb, hdr, tso, size, is_last);
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = hdr_len;
+ tx_desc->command = mvneta_skb_tx_csum(skb);
+ tx_desc->command |= MVNETA_TXD_F_DESC;
+ tx_desc->buf_phys_addr = hdr_phys;
+ buf->type = MVNETA_TYPE_TSO;
+ buf->skb = NULL;
+
+ mvneta_txq_inc_put(txq);
+}
+
+static inline int
+mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
+ struct sk_buff *skb, char *data, int size,
+ bool last_tcp, bool is_last)
+{
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ struct mvneta_tx_desc *tx_desc;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = size;
+ tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
+ size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ tx_desc->buf_phys_addr))) {
+ mvneta_txq_desc_put(txq);
+ return -ENOMEM;
+ }
+
+ tx_desc->command = 0;
+ buf->type = MVNETA_TYPE_SKB;
+ buf->skb = NULL;
+
+ if (last_tcp) {
+ /* last descriptor in the TCP packet */
+ tx_desc->command = MVNETA_TXD_L_DESC;
+
+ /* last descriptor in SKB */
+ if (is_last)
+ buf->skb = skb;
+ }
+ mvneta_txq_inc_put(txq);
+ return 0;
+}
+
+static void mvneta_release_descs(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq,
+ int first, int num)
+{
+ int desc_idx, i;
+
+ desc_idx = first + num;
+ if (desc_idx >= txq->size)
+ desc_idx -= txq->size;
+
+ for (i = num; i >= 0; i--) {
+ struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx;
+ struct mvneta_tx_buf *buf = &txq->buf[desc_idx];
+
+ if (buf->type == MVNETA_TYPE_SKB)
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+
+ mvneta_txq_desc_put(txq);
+
+ if (desc_idx == 0)
+ desc_idx = txq->size;
+ desc_idx -= 1;
+ }
+}
+
+static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
+ struct mvneta_tx_queue *txq)
+{
+ int hdr_len, total_len, data_left;
+ int first_desc, desc_count = 0;
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct tso_t tso;
+
+ /* Count needed descriptors */
+ if ((txq->count + tso_count_descs(skb)) >= txq->size)
+ return 0;
+
+ if (skb_headlen(skb) < skb_tcp_all_headers(skb)) {
+ pr_info("*** Is this even possible?\n");
+ return 0;
+ }
+
+ first_desc = txq->txq_put_index;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ desc_count++;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ mvneta_tso_put_hdr(skb, txq, &tso, data_left, total_len == 0);
+
+ while (data_left > 0) {
+ int size;
+ desc_count++;
+
+ size = min_t(int, tso.size, data_left);
+
+ if (mvneta_tso_put_data(dev, txq, skb,
+ tso.data, size,
+ size == data_left,
+ total_len == 0))
+ goto err_release;
+ data_left -= size;
+
+ tso_build_data(skb, &tso, size);
+ }
+ }
+
+ return desc_count;
+
+err_release:
+ /* Release all used data descriptors; header descriptors must not
+ * be DMA-unmapped.
+ */
+ mvneta_release_descs(pp, txq, first_desc, desc_count - 1);
+ return 0;
+}
+
+/* Handle tx fragmentation processing */
+static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
+ struct mvneta_tx_queue *txq)
+{
+ struct mvneta_tx_desc *tx_desc;
+ int i, nr_frags = skb_shinfo(skb)->nr_frags;
+ int first_desc = txq->txq_put_index;
+
+ for (i = 0; i < nr_frags; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *addr = skb_frag_address(frag);
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = skb_frag_size(frag);
+
+ tx_desc->buf_phys_addr =
+ dma_map_single(pp->dev->dev.parent, addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr)) {
+ mvneta_txq_desc_put(txq);
+ goto error;
+ }
+
+ if (i == nr_frags - 1) {
+ /* Last descriptor */
+ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+ buf->skb = skb;
+ } else {
+ /* Descriptor in the middle: Not First, Not Last */
+ tx_desc->command = 0;
+ buf->skb = NULL;
+ }
+ buf->type = MVNETA_TYPE_SKB;
+ mvneta_txq_inc_put(txq);
+ }
+
+ return 0;
+
+error:
+ /* Release all descriptors that were used to map fragments of
+ * this packet, as well as the corresponding DMA mappings
+ */
+ mvneta_release_descs(pp, txq, first_desc, i - 1);
+ return -ENOMEM;
+}
+
+/* Main tx processing */
+static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u16 txq_id = skb_get_queue_mapping(skb);
+ struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ struct mvneta_tx_desc *tx_desc;
+ int len = skb->len;
+ int frags = 0;
+ u32 tx_cmd;
+
+ if (!netif_running(dev))
+ goto out;
+
+ if (skb_is_gso(skb)) {
+ frags = mvneta_tx_tso(skb, dev, txq);
+ goto out;
+ }
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+
+ /* Get a descriptor for the first part of the packet */
+ tx_desc = mvneta_txq_next_desc_get(txq);
+
+ tx_cmd = mvneta_skb_tx_csum(skb);
+
+ tx_desc->data_size = skb_headlen(skb);
+
+ tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ tx_desc->buf_phys_addr))) {
+ mvneta_txq_desc_put(txq);
+ frags = 0;
+ goto out;
+ }
+
+ buf->type = MVNETA_TYPE_SKB;
+ if (frags == 1) {
+ /* First and Last descriptor */
+ tx_cmd |= MVNETA_TXD_FLZ_DESC;
+ tx_desc->command = tx_cmd;
+ buf->skb = skb;
+ mvneta_txq_inc_put(txq);
+ } else {
+ /* First but not Last */
+ tx_cmd |= MVNETA_TXD_F_DESC;
+ buf->skb = NULL;
+ mvneta_txq_inc_put(txq);
+ tx_desc->command = tx_cmd;
+ /* Continue with other skb fragments */
+ if (mvneta_tx_frag_process(pp, skb, txq)) {
+ dma_unmap_single(dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ mvneta_txq_desc_put(txq);
+ frags = 0;
+ goto out;
+ }
+ }
+
+out:
+ if (frags > 0) {
+ struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ netdev_tx_sent_queue(nq, len);
+
+ txq->count += frags;
+ if (txq->count >= txq->tx_stop_threshold)
+ netif_tx_stop_queue(nq);
+
+ if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
+ txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+ else
+ txq->pending += frags;
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += len;
+ stats->es.ps.tx_packets++;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+
+/* Free tx resources, when resetting a port */
+static void mvneta_txq_done_force(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+ int tx_done = txq->count;
+
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
+
+ /* reset txq */
+ txq->count = 0;
+ txq->txq_put_index = 0;
+ txq->txq_get_index = 0;
+}
+
+/* Handle tx done - called in softirq context. The <cause_tx_done> argument
+ * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
+ */
+static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
+{
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ int cpu = smp_processor_id();
+
+ while (cause_tx_done) {
+ txq = mvneta_tx_done_policy(pp, cause_tx_done);
+
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+ __netif_tx_lock(nq, cpu);
+
+ if (txq->count)
+ mvneta_txq_done(pp, txq);
+
+ __netif_tx_unlock(nq);
+ cause_tx_done &= ~((1 << txq->id));
+ }
+}
+
+/* Compute crc8 of the specified address, using a unique algorithm ,
+ * according to hw spec, different than generic crc8 algorithm
+ */
+static int mvneta_addr_crc(unsigned char *addr)
+{
+ int crc = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ int j;
+
+ crc = (crc ^ addr[i]) << 8;
+ for (j = 7; j >= 0; j--) {
+ if (crc & (0x100 << j))
+ crc ^= 0x107 << j;
+ }
+ }
+
+ return crc;
+}
+
+/* This method controls the net device special MAC multicast support.
+ * The Special Multicast Table for MAC addresses supports MAC of the form
+ * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+ * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ * Table entries in the DA-Filter table. This method set the Special
+ * Multicast Table appropriate entry.
+ */
+static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
+ unsigned char last_byte,
+ int queue)
+{
+ unsigned int smc_table_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ /* Register offset from SMC table base */
+ tbl_offset = (last_byte / 4);
+ /* Entry offset within the above reg */
+ reg_offset = last_byte % 4;
+
+ smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
+ + tbl_offset * 4));
+
+ if (queue == -1)
+ smc_table_reg &= ~(0xff << (8 * reg_offset));
+ else {
+ smc_table_reg &= ~(0xff << (8 * reg_offset));
+ smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
+ smc_table_reg);
+}
+
+/* This method controls the network device Other MAC multicast support.
+ * The Other Multicast Table is used for multicast of another type.
+ * A CRC-8 is used as an index to the Other Multicast Table entries
+ * in the DA-Filter table.
+ * The method gets the CRC-8 value from the calling routine and
+ * sets the Other Multicast Table appropriate entry according to the
+ * specified CRC-8 .
+ */
+static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
+ unsigned char crc8,
+ int queue)
+{
+ unsigned int omc_table_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
+ reg_offset = crc8 % 4; /* Entry offset within the above reg */
+
+ omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
+
+ if (queue == -1) {
+ /* Clear accepts frame bit at specified Other DA table entry */
+ omc_table_reg &= ~(0xff << (8 * reg_offset));
+ } else {
+ omc_table_reg &= ~(0xff << (8 * reg_offset));
+ omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
+}
+
+/* The network device supports multicast using two tables:
+ * 1) Special Multicast Table for MAC addresses of the form
+ * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+ * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ * Table entries in the DA-Filter table.
+ * 2) Other Multicast Table for multicast of another type. A CRC-8 value
+ * is used as an index to the Other Multicast Table entries in the
+ * DA-Filter table.
+ */
+static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
+ int queue)
+{
+ unsigned char crc_result = 0;
+
+ if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
+ mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
+ return 0;
+ }
+
+ crc_result = mvneta_addr_crc(p_addr);
+ if (queue == -1) {
+ if (pp->mcast_count[crc_result] == 0) {
+ netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
+ crc_result);
+ return -EINVAL;
+ }
+
+ pp->mcast_count[crc_result]--;
+ if (pp->mcast_count[crc_result] != 0) {
+ netdev_info(pp->dev,
+ "After delete there are %d valid Mcast for crc8=0x%02x\n",
+ pp->mcast_count[crc_result], crc_result);
+ return -EINVAL;
+ }
+ } else
+ pp->mcast_count[crc_result]++;
+
+ mvneta_set_other_mcast_addr(pp, crc_result, queue);
+
+ return 0;
+}
+
+/* Configure Fitering mode of Ethernet port */
+static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
+ int is_promisc)
+{
+ u32 port_cfg_reg, val;
+
+ port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
+
+ val = mvreg_read(pp, MVNETA_TYPE_PRIO);
+
+ /* Set / Clear UPM bit in port configuration register */
+ if (is_promisc) {
+ /* Accept all Unicast addresses */
+ port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
+ val |= MVNETA_FORCE_UNI;
+ mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
+ mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
+ } else {
+ /* Reject all Unicast addresses */
+ port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
+ val &= ~MVNETA_FORCE_UNI;
+ }
+
+ mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
+ mvreg_write(pp, MVNETA_TYPE_PRIO, val);
+}
+
+/* register unicast and multicast addresses */
+static void mvneta_set_rx_mode(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Accept all: Multicast + Unicast */
+ mvneta_rx_unicast_promisc_set(pp, 1);
+ mvneta_set_ucast_table(pp, pp->rxq_def);
+ mvneta_set_special_mcast_table(pp, pp->rxq_def);
+ mvneta_set_other_mcast_table(pp, pp->rxq_def);
+ } else {
+ /* Accept single Unicast */
+ mvneta_rx_unicast_promisc_set(pp, 0);
+ mvneta_set_ucast_table(pp, -1);
+ mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Accept all multicast */
+ mvneta_set_special_mcast_table(pp, pp->rxq_def);
+ mvneta_set_other_mcast_table(pp, pp->rxq_def);
+ } else {
+ /* Accept only initialized multicast */
+ mvneta_set_special_mcast_table(pp, -1);
+ mvneta_set_other_mcast_table(pp, -1);
+
+ if (!netdev_mc_empty(dev)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ mvneta_mcast_addr_set(pp, ha->addr,
+ pp->rxq_def);
+ }
+ }
+ }
+ }
+}
+
+/* Interrupt handling - the callback for request_irq() */
+static irqreturn_t mvneta_isr(int irq, void *dev_id)
+{
+ struct mvneta_port *pp = (struct mvneta_port *)dev_id;
+
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ napi_schedule(&pp->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handling - the callback for request_percpu_irq() */
+static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
+{
+ struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
+
+ disable_percpu_irq(port->pp->dev->irq);
+ napi_schedule(&port->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void mvneta_link_change(struct mvneta_port *pp)
+{
+ u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
+
+ phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
+}
+
+/* NAPI handler
+ * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
+ * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
+ * Bits 8 -15 of the cause Rx Tx register indicate that are received
+ * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
+ * Each CPU has its own causeRxTx register
+ */
+static int mvneta_poll(struct napi_struct *napi, int budget)
+{
+ int rx_done = 0;
+ u32 cause_rx_tx;
+ int rx_queue;
+ struct mvneta_port *pp = netdev_priv(napi->dev);
+ struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
+
+ if (!netif_running(pp->dev)) {
+ napi_complete(napi);
+ return rx_done;
+ }
+
+ /* Read cause register */
+ cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
+ if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
+ u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
+
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+
+ if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE))
+ mvneta_link_change(pp);
+ }
+
+ /* Release Tx descriptors */
+ if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
+ mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
+ cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
+ }
+
+ /* For the case where the last mvneta_poll did not process all
+ * RX packets
+ */
+ cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
+ port->cause_rx_tx;
+
+ rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
+ if (rx_queue) {
+ rx_queue = rx_queue - 1;
+ if (pp->bm_priv)
+ rx_done = mvneta_rx_hwbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
+ else
+ rx_done = mvneta_rx_swbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
+ }
+
+ if (rx_done < budget) {
+ cause_rx_tx = 0;
+ napi_complete_done(napi, rx_done);
+
+ if (pp->neta_armada3700) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number) |
+ MVNETA_TX_INTR_MASK(txq_number) |
+ MVNETA_MISCINTR_INTR_MASK);
+ local_irq_restore(flags);
+ } else {
+ enable_percpu_irq(pp->dev->irq, 0);
+ }
+ }
+
+ if (pp->neta_armada3700)
+ pp->cause_rx_tx = cause_rx_tx;
+ else
+ port->cause_rx_tx = cause_rx_tx;
+
+ return rx_done;
+}
+
+static int mvneta_create_page_pool(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, int size)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = pp->dev->dev.parent,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = pp->rx_offset_correction,
+ .max_len = MVNETA_MAX_RX_BUF_SIZE,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
+ PAGE_SIZE);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
+/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
+static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+ int num)
+{
+ int i, err;
+
+ err = mvneta_create_page_pool(pp, rxq, num);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < num; i++) {
+ memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
+ if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
+ GFP_KERNEL) != 0) {
+ netdev_err(pp->dev,
+ "%s:rxq %d, %d of %d buffs filled\n",
+ __func__, rxq->id, i, num);
+ break;
+ }
+ }
+
+ /* Add this number of RX descriptors as non occupied (ready to
+ * get packets)
+ */
+ mvneta_rxq_non_occup_desc_add(pp, rxq, i);
+
+ return i;
+}
+
+/* Free all packets pending transmit from all TXQs and reset TX port */
+static void mvneta_tx_reset(struct mvneta_port *pp)
+{
+ int queue;
+
+ /* free the skb's in the tx ring */
+ for (queue = 0; queue < txq_number; queue++)
+ mvneta_txq_done_force(pp, &pp->txqs[queue]);
+
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
+}
+
+static void mvneta_rx_reset(struct mvneta_port *pp)
+{
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+static int mvneta_rxq_sw_init(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ rxq->size = pp->rx_ring_size;
+
+ /* Allocate memory for RX descriptors */
+ rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+ rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ &rxq->descs_phys, GFP_KERNEL);
+ if (!rxq->descs)
+ return -ENOMEM;
+
+ rxq->last_desc = rxq->size - 1;
+
+ return 0;
+}
+
+static void mvneta_rxq_hw_init(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ /* Set Rx descriptors queue starting address */
+ mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
+ mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
+
+ /* Set coalescing pkts and time */
+ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+ mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
+
+ if (!pp->bm_priv) {
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq, 0);
+ mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
+ MVNETA_MAX_RX_BUF_SIZE :
+ MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ mvneta_rxq_bm_disable(pp, rxq);
+ mvneta_rxq_fill(pp, rxq, rxq->size);
+ } else {
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq,
+ NET_SKB_PAD - pp->rx_offset_correction);
+
+ mvneta_rxq_bm_enable(pp, rxq);
+ /* Fill RXQ with buffers from RX pool */
+ mvneta_rxq_long_pool_set(pp, rxq);
+ mvneta_rxq_short_pool_set(pp, rxq);
+ mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
+ }
+}
+
+/* Create a specified RX queue */
+static int mvneta_rxq_init(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+
+{
+ int ret;
+
+ ret = mvneta_rxq_sw_init(pp, rxq);
+ if (ret < 0)
+ return ret;
+
+ mvneta_rxq_hw_init(pp, rxq);
+
+ return 0;
+}
+
+/* Cleanup Rx queue */
+static void mvneta_rxq_deinit(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ mvneta_rxq_drop_pkts(pp, rxq);
+
+ if (rxq->descs)
+ dma_free_coherent(pp->dev->dev.parent,
+ rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ rxq->descs,
+ rxq->descs_phys);
+
+ rxq->descs = NULL;
+ rxq->last_desc = 0;
+ rxq->next_desc_to_proc = 0;
+ rxq->descs_phys = 0;
+ rxq->first_to_refill = 0;
+ rxq->refill_num = 0;
+}
+
+static int mvneta_txq_sw_init(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ int cpu, err;
+
+ txq->size = pp->tx_ring_size;
+
+ /* A queue must always have room for at least one skb.
+ * Therefore, stop the queue when the free entries reaches
+ * the maximum number of descriptors per skb.
+ */
+ txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
+ txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
+ /* Allocate memory for TX descriptors */
+ txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ &txq->descs_phys, GFP_KERNEL);
+ if (!txq->descs)
+ return -ENOMEM;
+
+ txq->last_desc = txq->size - 1;
+
+ txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
+ if (!txq->buf)
+ return -ENOMEM;
+
+ /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+ err = mvneta_alloc_tso_hdrs(pp, txq);
+ if (err)
+ return err;
+
+ /* Setup XPS mapping */
+ if (pp->neta_armada3700)
+ cpu = 0;
+ else if (txq_number > 1)
+ cpu = txq->id % num_present_cpus();
+ else
+ cpu = pp->rxq_def % num_present_cpus();
+ cpumask_set_cpu(cpu, &txq->affinity_mask);
+ netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
+
+ return 0;
+}
+
+static void mvneta_txq_hw_init(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ /* Set maximum bandwidth for enabled TXQs */
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
+
+ /* Set Tx descriptors queue starting address */
+ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
+
+ mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+}
+
+/* Create and initialize a tx queue */
+static int mvneta_txq_init(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ int ret;
+
+ ret = mvneta_txq_sw_init(pp, txq);
+ if (ret < 0)
+ return ret;
+
+ mvneta_txq_hw_init(pp, txq);
+
+ return 0;
+}
+
+/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
+static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ kfree(txq->buf);
+
+ mvneta_free_tso_hdrs(pp, txq);
+ if (txq->descs)
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+
+ netdev_tx_reset_queue(nq);
+
+ txq->buf = NULL;
+ txq->descs = NULL;
+ txq->last_desc = 0;
+ txq->next_desc_to_proc = 0;
+ txq->descs_phys = 0;
+}
+
+static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ /* Set minimum bandwidth for disabled TXQs */
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
+
+ /* Set Tx descriptors queue starting address and size */
+ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
+}
+
+static void mvneta_txq_deinit(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ mvneta_txq_sw_deinit(pp, txq);
+ mvneta_txq_hw_deinit(pp, txq);
+}
+
+/* Cleanup all Tx queues */
+static void mvneta_cleanup_txqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < txq_number; queue++)
+ mvneta_txq_deinit(pp, &pp->txqs[queue]);
+}
+
+/* Cleanup all Rx queues */
+static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++)
+ mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
+}
+
+
+/* Init all Rx queues */
+static int mvneta_setup_rxqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++) {
+ int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
+
+ if (err) {
+ netdev_err(pp->dev, "%s: can't create rxq=%d\n",
+ __func__, queue);
+ mvneta_cleanup_rxqs(pp);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* Init all tx queues */
+static int mvneta_setup_txqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < txq_number; queue++) {
+ int err = mvneta_txq_init(pp, &pp->txqs[queue]);
+ if (err) {
+ netdev_err(pp->dev, "%s: can't create txq=%d\n",
+ __func__, queue);
+ mvneta_cleanup_txqs(pp);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
+{
+ int ret;
+
+ ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
+ if (ret)
+ return ret;
+
+ return phy_power_on(pp->comphy);
+}
+
+static int mvneta_config_interface(struct mvneta_port *pp,
+ phy_interface_t interface)
+{
+ int ret = 0;
+
+ if (pp->comphy) {
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_2500BASEX) {
+ ret = mvneta_comphy_init(pp, interface);
+ }
+ } else {
+ switch (interface) {
+ case PHY_INTERFACE_MODE_QSGMII:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_QSGMII_SERDES_PROTO);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_SGMII_SERDES_PROTO);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_HSGMII_SERDES_PROTO);
+ break;
+ default:
+ break;
+ }
+ }
+
+ pp->phy_interface = interface;
+
+ return ret;
+}
+
+static void mvneta_start_dev(struct mvneta_port *pp)
+{
+ int cpu;
+
+ WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
+
+ mvneta_max_rx_size_set(pp, pp->pkt_size);
+ mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
+
+ /* start the Rx/Tx activity */
+ mvneta_port_enable(pp);
+
+ if (!pp->neta_armada3700) {
+ /* Enable polling on the port */
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *port =
+ per_cpu_ptr(pp->ports, cpu);
+
+ napi_enable(&port->napi);
+ }
+ } else {
+ napi_enable(&pp->napi);
+ }
+
+ /* Unmask interrupts. It has to be done from each CPU */
+ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
+
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+ MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE);
+
+ phylink_start(pp->phylink);
+
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(pp->phylink);
+
+ netif_tx_start_all_queues(pp->dev);
+
+ clear_bit(__MVNETA_DOWN, &pp->state);
+}
+
+static void mvneta_stop_dev(struct mvneta_port *pp)
+{
+ unsigned int cpu;
+
+ set_bit(__MVNETA_DOWN, &pp->state);
+
+ if (device_may_wakeup(&pp->dev->dev))
+ phylink_speed_down(pp->phylink, false);
+
+ phylink_stop(pp->phylink);
+
+ if (!pp->neta_armada3700) {
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *port =
+ per_cpu_ptr(pp->ports, cpu);
+
+ napi_disable(&port->napi);
+ }
+ } else {
+ napi_disable(&pp->napi);
+ }
+
+ netif_carrier_off(pp->dev);
+
+ mvneta_port_down(pp);
+ netif_tx_stop_all_queues(pp->dev);
+
+ /* Stop the port activity */
+ mvneta_port_disable(pp);
+
+ /* Clear all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
+
+ /* Mask all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+
+ mvneta_tx_reset(pp);
+ mvneta_rx_reset(pp);
+
+ WARN_ON(phy_power_off(pp->comphy));
+}
+
+static void mvneta_percpu_enable(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
+}
+
+static void mvneta_percpu_disable(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ disable_percpu_irq(pp->dev->irq);
+}
+
+/* Change the device mtu */
+static int mvneta_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct bpf_prog *prog = pp->xdp_prog;
+ int ret;
+
+ if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
+ netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
+ mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
+ mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
+ }
+
+ if (prog && !prog->aux->xdp_has_frags &&
+ mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n",
+ mtu);
+
+ return -EINVAL;
+ }
+
+ dev->mtu = mtu;
+
+ if (!netif_running(dev)) {
+ if (pp->bm_priv)
+ mvneta_bm_update_mtu(pp, mtu);
+
+ netdev_update_features(dev);
+ return 0;
+ }
+
+ /* The interface is running, so we have to force a
+ * reallocation of the queues
+ */
+ mvneta_stop_dev(pp);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
+
+ mvneta_cleanup_txqs(pp);
+ mvneta_cleanup_rxqs(pp);
+
+ if (pp->bm_priv)
+ mvneta_bm_update_mtu(pp, mtu);
+
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
+
+ ret = mvneta_setup_rxqs(pp);
+ if (ret) {
+ netdev_err(dev, "unable to setup rxqs after MTU change\n");
+ return ret;
+ }
+
+ ret = mvneta_setup_txqs(pp);
+ if (ret) {
+ netdev_err(dev, "unable to setup txqs after MTU change\n");
+ return ret;
+ }
+
+ on_each_cpu(mvneta_percpu_enable, pp, true);
+ mvneta_start_dev(pp);
+
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static netdev_features_t mvneta_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
+ features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
+ netdev_info(dev,
+ "Disable IP checksum for MTU greater than %dB\n",
+ pp->tx_csum_limit);
+ }
+
+ return features;
+}
+
+/* Get mac address */
+static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
+{
+ u32 mac_addr_l, mac_addr_h;
+
+ mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
+ mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
+ addr[0] = (mac_addr_h >> 24) & 0xFF;
+ addr[1] = (mac_addr_h >> 16) & 0xFF;
+ addr[2] = (mac_addr_h >> 8) & 0xFF;
+ addr[3] = mac_addr_h & 0xFF;
+ addr[4] = (mac_addr_l >> 8) & 0xFF;
+ addr[5] = mac_addr_l & 0xFF;
+}
+
+/* Handle setting mac address */
+static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct sockaddr *sockaddr = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(dev, addr);
+ if (ret < 0)
+ return ret;
+ /* Remove previous address table entry */
+ mvneta_mac_addr_set(pp, dev->dev_addr, -1);
+
+ /* Set new addr in hw */
+ mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
+
+ eth_commit_mac_addr_change(dev, addr);
+ return 0;
+}
+
+static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mvneta_port, phylink_pcs);
+}
+
+static int mvneta_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
+ * When in 802.3z mode, we must have AN enabled:
+ * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1."
+ */
+ if (phy_interface_mode_is_8023z(state->interface) &&
+ !phylink_test(state->advertising, Autoneg))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
+ u32 gmac_stat;
+
+ gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
+
+ if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+ state->speed =
+ state->interface == PHY_INTERFACE_MODE_2500BASEX ?
+ SPEED_2500 : SPEED_1000;
+ else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+
+ state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
+ state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+ state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
+
+ if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
+ state->pause |= MLO_PAUSE_RX;
+ if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int mvneta_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
+ u32 mask, val, an, old_an, changed;
+
+ mask = MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_INBAND_RESTART_AN |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_FLOW_CTRL_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+ mask |= MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+ val = MVNETA_GMAC_INBAND_AN_ENABLE;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the speed and duplex from PHY */
+ val |= MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z mode has fixed speed and duplex */
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ /* The FLOW_CTRL_EN bit selects either the hardware
+ * automatically or the CONFIG_FLOW_CTRL manually
+ * controls the GMAC pause mode.
+ */
+ if (permit_pause_to_mac)
+ val |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
+
+ /* Update the advertisement bits */
+ mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ if (phylink_test(advertising, Pause))
+ val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ }
+ } else {
+ /* Phy or fixed speed - disable in-band AN modes */
+ val = 0;
+ }
+
+ old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ an = (an & ~mask) | val;
+ changed = old_an ^ an;
+ if (changed)
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an);
+
+ /* We are only interested in the advertisement bits changing */
+ return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL);
+}
+
+static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
+ u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
+}
+
+static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
+ .pcs_validate = mvneta_pcs_validate,
+ .pcs_get_state = mvneta_pcs_get_state,
+ .pcs_config = mvneta_pcs_config,
+ .pcs_an_restart = mvneta_pcs_an_restart,
+};
+
+static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+
+ return &pp->phylink_pcs;
+}
+
+static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
+
+ if (pp->phy_interface != interface ||
+ phylink_autoneg_inband(mode)) {
+ /* Force the link down when changing the interface or if in
+ * in-band mode. According to Armada 370 documentation, we
+ * can only change the port mode and in-band enable when the
+ * link is down.
+ */
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+ val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ if (pp->phy_interface != interface)
+ WARN_ON(phy_power_off(pp->comphy));
+
+ /* Enable the 1ms clock */
+ if (phylink_autoneg_inband(mode)) {
+ unsigned long rate = clk_get_rate(pp->clk);
+
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER,
+ MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000));
+ }
+
+ return 0;
+}
+
+static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
+
+ new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
+ new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
+ MVNETA_GMAC2_PORT_RESET);
+ new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
+
+ /* Even though it might look weird, when we're configured in
+ * SGMII or QSGMII mode, the RGMII bit needs to be set.
+ */
+ new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
+
+ if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
+
+ if (!phylink_autoneg_inband(mode)) {
+ /* Phy or fixed speed - nothing to do, leave the
+ * configured speed, duplex and flow control as-is.
+ */
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the state from the PHY */
+ new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+ } else {
+ /* 802.3z negotiation - only 1000base-X */
+ new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
+ }
+
+ /* When at 2.5G, the link partner can send frames with shortened
+ * preambles.
+ */
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
+
+ if (new_ctrl0 != gmac_ctrl0)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
+ if (new_ctrl2 != gmac_ctrl2)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
+ if (new_ctrl4 != gmac_ctrl4)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
+
+ if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
+ while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+ MVNETA_GMAC2_PORT_RESET) != 0)
+ continue;
+ }
+}
+
+static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val, clk;
+
+ /* Disable 1ms clock if not in in-band mode */
+ if (!phylink_autoneg_inband(mode)) {
+ clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk);
+ }
+
+ if (pp->phy_interface != interface)
+ /* Enable the Serdes PHY */
+ WARN_ON(mvneta_config_interface(pp, interface));
+
+ /* Allow the link to come up if in in-band mode, otherwise the
+ * link is forced via mac_link_down()/mac_link_up()
+ */
+ if (phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ return 0;
+}
+
+static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
+{
+ u32 lpi_ctl1;
+
+ lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ if (enable)
+ lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
+ else
+ lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
+}
+
+static void mvneta_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
+
+ mvneta_port_down(pp);
+
+ if (!phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+ val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ pp->eee_active = false;
+ mvneta_set_eee(pp, false);
+}
+
+static void mvneta_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
+
+ if (!phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+ MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FLOW_CTRL |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+ val |= MVNETA_GMAC_FORCE_LINK_PASS;
+
+ if (speed == SPEED_1000 || speed == SPEED_2500)
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+ else if (speed == SPEED_100)
+ val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+
+ if (duplex == DUPLEX_FULL)
+ val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (tx_pause || rx_pause)
+ val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ } else {
+ /* When inband doesn't cover flow control or flow control is
+ * disabled, we need to manually configure it. This bit will
+ * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
+ */
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+ if (tx_pause || rx_pause)
+ val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ mvneta_port_up(pp);
+
+ if (phy && pp->eee_enabled) {
+ pp->eee_active = phy_init_eee(phy, false) >= 0;
+ mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+ }
+}
+
+static const struct phylink_mac_ops mvneta_phylink_ops = {
+ .mac_select_pcs = mvneta_mac_select_pcs,
+ .mac_prepare = mvneta_mac_prepare,
+ .mac_config = mvneta_mac_config,
+ .mac_finish = mvneta_mac_finish,
+ .mac_link_down = mvneta_mac_link_down,
+ .mac_link_up = mvneta_mac_link_up,
+};
+
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
+
+ if (err)
+ netdev_err(pp->dev, "could not attach PHY: %d\n", err);
+
+ phylink_ethtool_get_wol(pp->phylink, &wol);
+ device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+
+ /* PHY WoL may be enabled but device wakeup disabled */
+ if (wol.supported)
+ device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
+
+ return err;
+}
+
+static void mvneta_mdio_remove(struct mvneta_port *pp)
+{
+ phylink_disconnect_phy(pp->phylink);
+}
+
+/* Electing a CPU must be done in an atomic way: it should be done
+ * after or before the removal/insertion of a CPU and this function is
+ * not reentrant.
+ */
+static void mvneta_percpu_elect(struct mvneta_port *pp)
+{
+ int elected_cpu = 0, max_cpu, cpu;
+
+ /* Use the cpu associated to the rxq when it is online, in all
+ * the other cases, use the cpu 0 which can't be offline.
+ */
+ if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
+ elected_cpu = pp->rxq_def;
+
+ max_cpu = num_present_cpus();
+
+ for_each_online_cpu(cpu) {
+ int rxq_map = 0, txq_map = 0;
+ int rxq;
+
+ for (rxq = 0; rxq < rxq_number; rxq++)
+ if ((rxq % max_cpu) == cpu)
+ rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
+
+ if (cpu == elected_cpu)
+ /* Map the default receive queue to the elected CPU */
+ rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
+
+ /* We update the TX queue map only if we have one
+ * queue. In this case we associate the TX queue to
+ * the CPU bound to the default RX queue
+ */
+ if (txq_number == 1)
+ txq_map = (cpu == elected_cpu) ?
+ MVNETA_CPU_TXQ_ACCESS(0) : 0;
+ else
+ txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
+ MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
+
+ mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
+
+ /* Update the interrupt mask on each CPU according the
+ * new mapping
+ */
+ smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
+ pp, true);
+ }
+};
+
+static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ int other_cpu;
+ struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+ node_online);
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+ /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
+ * are routed to CPU 0, so we don't need all the cpu-hotplug support
+ */
+ if (pp->neta_armada3700)
+ return 0;
+
+ spin_lock(&pp->lock);
+ /*
+ * Configuring the driver for a new CPU while the driver is
+ * stopping is racy, so just avoid it.
+ */
+ if (pp->is_stopped) {
+ spin_unlock(&pp->lock);
+ return 0;
+ }
+ netif_tx_stop_all_queues(pp->dev);
+
+ /*
+ * We have to synchronise on tha napi of each CPU except the one
+ * just being woken up
+ */
+ for_each_online_cpu(other_cpu) {
+ if (other_cpu != cpu) {
+ struct mvneta_pcpu_port *other_port =
+ per_cpu_ptr(pp->ports, other_cpu);
+
+ napi_synchronize(&other_port->napi);
+ }
+ }
+
+ /* Mask all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+ napi_enable(&port->napi);
+
+ /*
+ * Enable per-CPU interrupts on the CPU that is
+ * brought up.
+ */
+ mvneta_percpu_enable(pp);
+
+ /*
+ * Enable per-CPU interrupt on the one CPU we care
+ * about.
+ */
+ mvneta_percpu_elect(pp);
+
+ /* Unmask all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+ MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE);
+ netif_tx_start_all_queues(pp->dev);
+ spin_unlock(&pp->lock);
+ return 0;
+}
+
+static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
+{
+ struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+ node_online);
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+ /*
+ * Thanks to this lock we are sure that any pending cpu election is
+ * done.
+ */
+ spin_lock(&pp->lock);
+ /* Mask all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+ spin_unlock(&pp->lock);
+
+ napi_synchronize(&port->napi);
+ napi_disable(&port->napi);
+ /* Disable per-CPU interrupts on the CPU that is brought down. */
+ mvneta_percpu_disable(pp);
+ return 0;
+}
+
+static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+ node_dead);
+
+ /* Check if a new CPU must be elected now this on is down */
+ spin_lock(&pp->lock);
+ mvneta_percpu_elect(pp);
+ spin_unlock(&pp->lock);
+ /* Unmask all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+ MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE);
+ netif_tx_start_all_queues(pp->dev);
+ return 0;
+}
+
+static int mvneta_open(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+
+ ret = mvneta_setup_rxqs(pp);
+ if (ret)
+ return ret;
+
+ ret = mvneta_setup_txqs(pp);
+ if (ret)
+ goto err_cleanup_rxqs;
+
+ /* Connect to port interrupt line */
+ if (pp->neta_armada3700)
+ ret = request_irq(pp->dev->irq, mvneta_isr, 0,
+ dev->name, pp);
+ else
+ ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
+ dev->name, pp->ports);
+ if (ret) {
+ netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
+ goto err_cleanup_txqs;
+ }
+
+ if (!pp->neta_armada3700) {
+ /* Enable per-CPU interrupt on all the CPU to handle our RX
+ * queue interrupts
+ */
+ on_each_cpu(mvneta_percpu_enable, pp, true);
+
+ pp->is_stopped = false;
+ /* Register a CPU notifier to handle the case where our CPU
+ * might be taken offline.
+ */
+ ret = cpuhp_state_add_instance_nocalls(online_hpstate,
+ &pp->node_online);
+ if (ret)
+ goto err_free_irq;
+
+ ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
+ if (ret)
+ goto err_free_online_hp;
+ }
+
+ ret = mvneta_mdio_probe(pp);
+ if (ret < 0) {
+ netdev_err(dev, "cannot probe MDIO bus\n");
+ goto err_free_dead_hp;
+ }
+
+ mvneta_start_dev(pp);
+
+ return 0;
+
+err_free_dead_hp:
+ if (!pp->neta_armada3700)
+ cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
+err_free_online_hp:
+ if (!pp->neta_armada3700)
+ cpuhp_state_remove_instance_nocalls(online_hpstate,
+ &pp->node_online);
+err_free_irq:
+ if (pp->neta_armada3700) {
+ free_irq(pp->dev->irq, pp);
+ } else {
+ on_each_cpu(mvneta_percpu_disable, pp, true);
+ free_percpu_irq(pp->dev->irq, pp->ports);
+ }
+err_cleanup_txqs:
+ mvneta_cleanup_txqs(pp);
+err_cleanup_rxqs:
+ mvneta_cleanup_rxqs(pp);
+ return ret;
+}
+
+/* Stop the port, free port interrupt line */
+static int mvneta_stop(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (!pp->neta_armada3700) {
+ /* Inform that we are stopping so we don't want to setup the
+ * driver for new CPUs in the notifiers. The code of the
+ * notifier for CPU online is protected by the same spinlock,
+ * so when we get the lock, the notifer work is done.
+ */
+ spin_lock(&pp->lock);
+ pp->is_stopped = true;
+ spin_unlock(&pp->lock);
+
+ mvneta_stop_dev(pp);
+ mvneta_mdio_remove(pp);
+
+ cpuhp_state_remove_instance_nocalls(online_hpstate,
+ &pp->node_online);
+ cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
+ free_percpu_irq(dev->irq, pp->ports);
+ } else {
+ mvneta_stop_dev(pp);
+ mvneta_mdio_remove(pp);
+ free_irq(dev->irq, pp);
+ }
+
+ mvneta_cleanup_rxqs(pp);
+ mvneta_cleanup_txqs(pp);
+
+ return 0;
+}
+
+static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_mii_ioctl(pp->phylink, ifr, cmd);
+}
+
+static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(dev);
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+
+ if (prog && !prog->aux->xdp_has_frags &&
+ dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags");
+ return -EOPNOTSUPP;
+ }
+
+ if (pp->bm_priv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Hardware Buffer Management not supported on XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!pp->xdp_prog != !!prog;
+ if (running && need_update)
+ mvneta_stop(dev);
+
+ old_prog = xchg(&pp->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (running && need_update)
+ return mvneta_open(dev);
+
+ return 0;
+}
+
+static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Ethtool methods */
+
+/* Set link ksettings (phy address, speed) for ethtools */
+static int
+mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(pp->phylink, cmd);
+}
+
+/* Get link ksettings for ethtools */
+static int
+mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(pp->phylink, cmd);
+}
+
+static int mvneta_ethtool_nway_reset(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_nway_reset(pp->phylink);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int
+mvneta_ethtool_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ rxq->time_coal = c->rx_coalesce_usecs;
+ rxq->pkts_coal = c->rx_max_coalesced_frames;
+ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+ mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
+ }
+
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ txq->done_pkts_coal = c->tx_max_coalesced_frames;
+ mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+ }
+
+ return 0;
+}
+
+/* get coalescing for ethtools */
+static int
+mvneta_ethtool_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
+ c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
+
+ c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
+ return 0;
+}
+
+
+static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, MVNETA_DRIVER_NAME,
+ sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, MVNETA_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+
+static void
+mvneta_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mvneta_port *pp = netdev_priv(netdev);
+
+ ring->rx_max_pending = MVNETA_MAX_RXD;
+ ring->tx_max_pending = MVNETA_MAX_TXD;
+ ring->rx_pending = pp->rx_ring_size;
+ ring->tx_pending = pp->tx_ring_size;
+}
+
+static int
+mvneta_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
+ return -EINVAL;
+ pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
+ ring->rx_pending : MVNETA_MAX_RXD;
+
+ pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
+ MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
+ if (pp->tx_ring_size != ring->tx_pending)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ pp->tx_ring_size, ring->tx_pending);
+
+ if (netif_running(dev)) {
+ mvneta_stop(dev);
+ if (mvneta_open(dev)) {
+ netdev_err(dev,
+ "error on opening device after ring param change\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(pp->phylink, pause);
+}
+
+static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(pp->phylink, pause);
+}
+
+static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ if (sset == ETH_SS_STATS) {
+ struct mvneta_port *pp = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mvneta_statistics[i].name, ETH_GSTRING_LEN);
+
+ if (!pp->bm_priv) {
+ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+ page_pool_ethtool_stats_get_strings(data);
+ }
+ }
+}
+
+static void
+mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
+ struct mvneta_ethtool_stats *es)
+{
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *stats;
+ u64 skb_alloc_error;
+ u64 refill_error;
+ u64 xdp_redirect;
+ u64 xdp_xmit_err;
+ u64 xdp_tx_err;
+ u64 xdp_pass;
+ u64 xdp_drop;
+ u64 xdp_xmit;
+ u64 xdp_tx;
+
+ stats = per_cpu_ptr(pp->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ skb_alloc_error = stats->es.skb_alloc_error;
+ refill_error = stats->es.refill_error;
+ xdp_redirect = stats->es.ps.xdp_redirect;
+ xdp_pass = stats->es.ps.xdp_pass;
+ xdp_drop = stats->es.ps.xdp_drop;
+ xdp_xmit = stats->es.ps.xdp_xmit;
+ xdp_xmit_err = stats->es.ps.xdp_xmit_err;
+ xdp_tx = stats->es.ps.xdp_tx;
+ xdp_tx_err = stats->es.ps.xdp_tx_err;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ es->skb_alloc_error += skb_alloc_error;
+ es->refill_error += refill_error;
+ es->ps.xdp_redirect += xdp_redirect;
+ es->ps.xdp_pass += xdp_pass;
+ es->ps.xdp_drop += xdp_drop;
+ es->ps.xdp_xmit += xdp_xmit;
+ es->ps.xdp_xmit_err += xdp_xmit_err;
+ es->ps.xdp_tx += xdp_tx;
+ es->ps.xdp_tx_err += xdp_tx_err;
+ }
+}
+
+static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
+{
+ struct mvneta_ethtool_stats stats = {};
+ const struct mvneta_statistic *s;
+ void __iomem *base = pp->base;
+ u32 high, low;
+ u64 val;
+ int i;
+
+ mvneta_ethtool_update_pcpu_stats(pp, &stats);
+ for (i = 0, s = mvneta_statistics;
+ s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
+ s++, i++) {
+ switch (s->type) {
+ case T_REG_32:
+ val = readl_relaxed(base + s->offset);
+ pp->ethtool_stats[i] += val;
+ break;
+ case T_REG_64:
+ /* Docs say to read low 32-bit then high */
+ low = readl_relaxed(base + s->offset);
+ high = readl_relaxed(base + s->offset + 4);
+ val = (u64)high << 32 | low;
+ pp->ethtool_stats[i] += val;
+ break;
+ case T_SW:
+ switch (s->offset) {
+ case ETHTOOL_STAT_EEE_WAKEUP:
+ val = phylink_get_eee_err(pp->phylink);
+ pp->ethtool_stats[i] += val;
+ break;
+ case ETHTOOL_STAT_SKB_ALLOC_ERR:
+ pp->ethtool_stats[i] = stats.skb_alloc_error;
+ break;
+ case ETHTOOL_STAT_REFILL_ERR:
+ pp->ethtool_stats[i] = stats.refill_error;
+ break;
+ case ETHTOOL_XDP_REDIRECT:
+ pp->ethtool_stats[i] = stats.ps.xdp_redirect;
+ break;
+ case ETHTOOL_XDP_PASS:
+ pp->ethtool_stats[i] = stats.ps.xdp_pass;
+ break;
+ case ETHTOOL_XDP_DROP:
+ pp->ethtool_stats[i] = stats.ps.xdp_drop;
+ break;
+ case ETHTOOL_XDP_TX:
+ pp->ethtool_stats[i] = stats.ps.xdp_tx;
+ break;
+ case ETHTOOL_XDP_TX_ERR:
+ pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
+ break;
+ case ETHTOOL_XDP_XMIT:
+ pp->ethtool_stats[i] = stats.ps.xdp_xmit;
+ break;
+ case ETHTOOL_XDP_XMIT_ERR:
+ pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
+ break;
+ }
+ break;
+ }
+ }
+}
+
+static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < rxq_number; i++) {
+ if (pp->rxqs[i].page_pool)
+ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
+ }
+
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
+static void mvneta_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int i;
+
+ mvneta_ethtool_update_stats(pp);
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ *data++ = pp->ethtool_stats[i];
+
+ if (!pp->bm_priv)
+ mvneta_ethtool_pp_stats(pp, data);
+}
+
+static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ if (sset == ETH_SS_STATS) {
+ int count = ARRAY_SIZE(mvneta_statistics);
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (!pp->bm_priv)
+ count += page_pool_ethtool_stats_get_count();
+
+ return count;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
+{
+ return MVNETA_RSS_LU_TABLE_SIZE;
+}
+
+static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = rxq_number;
+ return 0;
+ case ETHTOOL_GRXFH:
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mvneta_config_rss(struct mvneta_port *pp)
+{
+ int cpu;
+ u32 val;
+
+ netif_tx_stop_all_queues(pp->dev);
+
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+
+ if (!pp->neta_armada3700) {
+ /* We have to synchronise on the napi of each CPU */
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *pcpu_port =
+ per_cpu_ptr(pp->ports, cpu);
+
+ napi_synchronize(&pcpu_port->napi);
+ napi_disable(&pcpu_port->napi);
+ }
+ } else {
+ napi_synchronize(&pp->napi);
+ napi_disable(&pp->napi);
+ }
+
+ pp->rxq_def = pp->indir[0];
+
+ /* Update unicast mapping */
+ mvneta_set_rx_mode(pp->dev);
+
+ /* Update val of portCfg register accordingly with all RxQueue types */
+ val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
+ mvreg_write(pp, MVNETA_PORT_CONFIG, val);
+
+ /* Update the elected CPU matching the new rxq_def */
+ spin_lock(&pp->lock);
+ mvneta_percpu_elect(pp);
+ spin_unlock(&pp->lock);
+
+ if (!pp->neta_armada3700) {
+ /* We have to synchronise on the napi of each CPU */
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *pcpu_port =
+ per_cpu_ptr(pp->ports, cpu);
+
+ napi_enable(&pcpu_port->napi);
+ }
+ } else {
+ napi_enable(&pp->napi);
+ }
+
+ netif_tx_start_all_queues(pp->dev);
+
+ return 0;
+}
+
+static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ /* Current code for Armada 3700 doesn't support RSS features yet */
+ if (pp->neta_armada3700)
+ return -EOPNOTSUPP;
+
+ /* We require at least one supported parameter to be changed
+ * and no change in any of the unsupported parameters
+ */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (!indir)
+ return 0;
+
+ memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
+
+ return mvneta_config_rss(pp);
+}
+
+static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ /* Current code for Armada 3700 doesn't support RSS features yet */
+ if (pp->neta_armada3700)
+ return -EOPNOTSUPP;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
+
+ return 0;
+}
+
+static void mvneta_ethtool_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ phylink_ethtool_get_wol(pp->phylink, wol);
+}
+
+static int mvneta_ethtool_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ ret = phylink_ethtool_set_wol(pp->phylink, wol);
+ if (!ret)
+ device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
+
+ return ret;
+}
+
+static int mvneta_ethtool_get_eee(struct net_device *dev,
+ struct ethtool_eee *eee)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u32 lpi_ctl0;
+
+ lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+
+ eee->eee_enabled = pp->eee_enabled;
+ eee->eee_active = pp->eee_active;
+ eee->tx_lpi_enabled = pp->tx_lpi_enabled;
+ eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
+
+ return phylink_ethtool_get_eee(pp->phylink, eee);
+}
+
+static int mvneta_ethtool_set_eee(struct net_device *dev,
+ struct ethtool_eee *eee)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u32 lpi_ctl0;
+
+ /* The Armada 37x documents do not give limits for this other than
+ * it being an 8-bit register.
+ */
+ if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
+ return -EINVAL;
+
+ lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+ lpi_ctl0 &= ~(0xff << 8);
+ lpi_ctl0 |= eee->tx_lpi_timer << 8;
+ mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
+
+ pp->eee_enabled = eee->eee_enabled;
+ pp->tx_lpi_enabled = eee->tx_lpi_enabled;
+
+ mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
+
+ return phylink_ethtool_set_eee(pp->phylink, eee);
+}
+
+static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
+{
+ mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
+}
+
+static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
+{
+ u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
+
+ val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
+ val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
+
+ mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
+}
+
+static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
+{
+ unsigned long core_clk_rate;
+ u32 refill_cycles;
+ u32 val;
+
+ core_clk_rate = clk_get_rate(pp->clk);
+ if (!core_clk_rate)
+ return -EINVAL;
+
+ refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS /
+ (NSEC_PER_SEC / core_clk_rate);
+
+ if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK)
+ return -EINVAL;
+
+ /* Enable bw limit algorithm version 3 */
+ val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
+ val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
+ mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
+
+ /* Set the base refill rate */
+ mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
+
+ return 0;
+}
+
+static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
+{
+ u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
+
+ val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
+ mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
+}
+
+static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
+ u64 min_rate, u64 max_rate)
+{
+ u32 refill_val, rem;
+ u32 val = 0;
+
+ /* Convert to from Bps to bps */
+ max_rate *= 8;
+
+ if (min_rate)
+ return -EINVAL;
+
+ refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION,
+ &rem);
+
+ if (rem || !refill_val ||
+ refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX)
+ return -EINVAL;
+
+ val = refill_val;
+ val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD <<
+ MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT);
+
+ mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
+
+ return 0;
+}
+
+static int mvneta_setup_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int rxq, txq, tc, ret;
+ u8 num_tc;
+
+ if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
+ return 0;
+
+ num_tc = mqprio->qopt.num_tc;
+
+ if (num_tc > rxq_number)
+ return -EINVAL;
+
+ mvneta_clear_rx_prio_map(pp);
+
+ if (!num_tc) {
+ mvneta_disable_per_queue_rate_limit(pp);
+ netdev_reset_tc(dev);
+ return 0;
+ }
+
+ netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+ netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
+ mqprio->qopt.offset[tc]);
+
+ for (rxq = mqprio->qopt.offset[tc];
+ rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
+ rxq++) {
+ if (rxq >= rxq_number)
+ return -EINVAL;
+
+ mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
+ }
+ }
+
+ if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
+ mvneta_disable_per_queue_rate_limit(pp);
+ return 0;
+ }
+
+ if (mqprio->qopt.num_tc > txq_number)
+ return -EINVAL;
+
+ ret = mvneta_enable_per_queue_rate_limit(pp);
+ if (ret)
+ return ret;
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+ for (txq = mqprio->qopt.offset[tc];
+ txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
+ txq++) {
+ if (txq >= txq_number)
+ return -EINVAL;
+
+ ret = mvneta_setup_queue_rates(pp, txq,
+ mqprio->min_rate[tc],
+ mqprio->max_rate[tc]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return mvneta_setup_mqprio(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops mvneta_netdev_ops = {
+ .ndo_open = mvneta_open,
+ .ndo_stop = mvneta_stop,
+ .ndo_start_xmit = mvneta_tx,
+ .ndo_set_rx_mode = mvneta_set_rx_mode,
+ .ndo_set_mac_address = mvneta_set_mac_addr,
+ .ndo_change_mtu = mvneta_change_mtu,
+ .ndo_fix_features = mvneta_fix_features,
+ .ndo_get_stats64 = mvneta_get_stats64,
+ .ndo_eth_ioctl = mvneta_ioctl,
+ .ndo_bpf = mvneta_xdp,
+ .ndo_xdp_xmit = mvneta_xdp_xmit,
+ .ndo_setup_tc = mvneta_setup_tc,
+};
+
+static const struct ethtool_ops mvneta_eth_tool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .nway_reset = mvneta_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .set_coalesce = mvneta_ethtool_set_coalesce,
+ .get_coalesce = mvneta_ethtool_get_coalesce,
+ .get_drvinfo = mvneta_ethtool_get_drvinfo,
+ .get_ringparam = mvneta_ethtool_get_ringparam,
+ .set_ringparam = mvneta_ethtool_set_ringparam,
+ .get_pauseparam = mvneta_ethtool_get_pauseparam,
+ .set_pauseparam = mvneta_ethtool_set_pauseparam,
+ .get_strings = mvneta_ethtool_get_strings,
+ .get_ethtool_stats = mvneta_ethtool_get_stats,
+ .get_sset_count = mvneta_ethtool_get_sset_count,
+ .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
+ .get_rxnfc = mvneta_ethtool_get_rxnfc,
+ .get_rxfh = mvneta_ethtool_get_rxfh,
+ .set_rxfh = mvneta_ethtool_set_rxfh,
+ .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
+ .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
+ .get_wol = mvneta_ethtool_get_wol,
+ .set_wol = mvneta_ethtool_set_wol,
+ .get_eee = mvneta_ethtool_get_eee,
+ .set_eee = mvneta_ethtool_set_eee,
+};
+
+/* Initialize hw */
+static int mvneta_init(struct device *dev, struct mvneta_port *pp)
+{
+ int queue;
+
+ /* Disable port */
+ mvneta_port_disable(pp);
+
+ /* Set port default values */
+ mvneta_defaults_set(pp);
+
+ pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
+ if (!pp->txqs)
+ return -ENOMEM;
+
+ /* Initialize TX descriptor rings */
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ txq->id = queue;
+ txq->size = pp->tx_ring_size;
+ txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
+ }
+
+ pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
+ if (!pp->rxqs)
+ return -ENOMEM;
+
+ /* Create Rx descriptor rings */
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ rxq->id = queue;
+ rxq->size = pp->rx_ring_size;
+ rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
+ rxq->time_coal = MVNETA_RX_COAL_USEC;
+ rxq->buf_virt_addr
+ = devm_kmalloc_array(pp->dev->dev.parent,
+ rxq->size,
+ sizeof(*rxq->buf_virt_addr),
+ GFP_KERNEL);
+ if (!rxq->buf_virt_addr)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* platform glue : initialize decoding windows */
+static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
+ const struct mbus_dram_target_info *dram)
+{
+ u32 win_enable;
+ u32 win_protect;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
+ mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
+
+ if (i < 4)
+ mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
+ }
+
+ win_enable = 0x3f;
+ win_protect = 0;
+
+ if (dram) {
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ mvreg_write(pp, MVNETA_WIN_BASE(i),
+ (cs->base & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ dram->mbus_dram_target_id);
+
+ mvreg_write(pp, MVNETA_WIN_SIZE(i),
+ (cs->size - 1) & 0xffff0000);
+
+ win_enable &= ~(1 << i);
+ win_protect |= 3 << (2 * i);
+ }
+ } else {
+ if (pp->neta_ac5)
+ mvreg_write(pp, MVNETA_WIN_BASE(0),
+ (MVNETA_AC5_CNM_DDR_ATTR << 8) |
+ MVNETA_AC5_CNM_DDR_TARGET);
+ /* For Armada3700 open default 4GB Mbus window, leaving
+ * arbitration of target/attribute to a different layer
+ * of configuration.
+ */
+ mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
+ win_enable &= ~BIT(0);
+ win_protect = 3;
+ }
+
+ mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
+ mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
+}
+
+/* Power up the port */
+static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+{
+ /* MAC Cause register should be cleared */
+ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
+
+ if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
+ phy_mode != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(phy_mode) &&
+ !phy_interface_mode_is_rgmii(phy_mode))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Device initialization routine */
+static int mvneta_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct device_node *bm_node;
+ struct mvneta_port *pp;
+ struct net_device *dev;
+ struct phylink *phylink;
+ struct phy *comphy;
+ char hw_mac_addr[ETH_ALEN];
+ phy_interface_t phy_mode;
+ const char *mac_from;
+ int tx_csum_limit;
+ int err;
+ int cpu;
+
+ dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
+ txq_number, rxq_number);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->tx_queue_len = MVNETA_MAX_TXD;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &mvneta_netdev_ops;
+ dev->ethtool_ops = &mvneta_eth_tool_ops;
+
+ pp = netdev_priv(dev);
+ spin_lock_init(&pp->lock);
+ pp->dn = dn;
+
+ pp->rxq_def = rxq_def;
+ pp->indir[0] = rxq_def;
+
+ err = of_get_phy_mode(dn, &phy_mode);
+ if (err) {
+ dev_err(&pdev->dev, "incorrect phy-mode\n");
+ return err;
+ }
+
+ pp->phy_interface = phy_mode;
+
+ comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
+ if (comphy == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ if (IS_ERR(comphy))
+ comphy = NULL;
+
+ pp->comphy = comphy;
+
+ pp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pp->base))
+ return PTR_ERR(pp->base);
+
+ /* Get special SoC configurations */
+ if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
+ pp->neta_armada3700 = true;
+ if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) {
+ pp->neta_armada3700 = true;
+ pp->neta_ac5 = true;
+ }
+
+ dev->irq = irq_of_parse_and_map(dn, 0);
+ if (dev->irq == 0)
+ return -EINVAL;
+
+ pp->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(pp->clk))
+ pp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pp->clk)) {
+ err = PTR_ERR(pp->clk);
+ goto err_free_irq;
+ }
+
+ clk_prepare_enable(pp->clk);
+
+ pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
+ if (!IS_ERR(pp->clk_bus))
+ clk_prepare_enable(pp->clk_bus);
+
+ pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
+ pp->phylink_pcs.neg_mode = true;
+
+ pp->phylink_config.dev = &dev->dev;
+ pp->phylink_config.type = PHYLINK_NETDEV;
+ pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD | MAC_2500FD;
+
+ phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ pp->phylink_config.supported_interfaces);
+ if (comphy) {
+ /* If a COMPHY is present, we can support any of the serdes
+ * modes and switch between them.
+ */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pp->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* No COMPHY, with only 2500BASE-X mode supported */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pp->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
+ phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ /* No COMPHY, we can switch between 1000BASE-X and SGMII */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pp->phylink_config.supported_interfaces);
+ }
+
+ phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
+ phy_mode, &mvneta_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_clk;
+ }
+
+ pp->phylink = phylink;
+
+ /* Alloc per-cpu port structure */
+ pp->ports = alloc_percpu(struct mvneta_pcpu_port);
+ if (!pp->ports) {
+ err = -ENOMEM;
+ goto err_free_phylink;
+ }
+
+ /* Alloc per-cpu stats */
+ pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
+ if (!pp->stats) {
+ err = -ENOMEM;
+ goto err_free_ports;
+ }
+
+ err = of_get_ethdev_address(dn, dev);
+ if (!err) {
+ mac_from = "device tree";
+ } else {
+ mvneta_get_mac_addr(pp, hw_mac_addr);
+ if (is_valid_ether_addr(hw_mac_addr)) {
+ mac_from = "hardware";
+ eth_hw_addr_set(dev, hw_mac_addr);
+ } else {
+ mac_from = "random";
+ eth_hw_addr_random(dev);
+ }
+ }
+
+ if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
+ if (tx_csum_limit < 0 ||
+ tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
+ tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
+ dev_info(&pdev->dev,
+ "Wrong TX csum limit in DT, set to %dB\n",
+ MVNETA_TX_CSUM_DEF_SIZE);
+ }
+ } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
+ tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
+ } else {
+ tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
+ }
+
+ pp->tx_csum_limit = tx_csum_limit;
+
+ pp->dram_target_info = mv_mbus_dram_info();
+ /* Armada3700 requires setting default configuration of Mbus
+ * windows, however without using filled mbus_dram_target_info
+ * structure.
+ */
+ if (pp->dram_target_info || pp->neta_armada3700)
+ mvneta_conf_mbus_windows(pp, pp->dram_target_info);
+
+ pp->tx_ring_size = MVNETA_MAX_TXD;
+ pp->rx_ring_size = MVNETA_MAX_RXD;
+
+ pp->dev = dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ pp->id = global_port_id++;
+
+ /* Obtain access to BM resources if enabled and already initialized */
+ bm_node = of_parse_phandle(dn, "buffer-manager", 0);
+ if (bm_node) {
+ pp->bm_priv = mvneta_bm_get(bm_node);
+ if (pp->bm_priv) {
+ err = mvneta_bm_port_init(pdev, pp);
+ if (err < 0) {
+ dev_info(&pdev->dev,
+ "use SW buffer management\n");
+ mvneta_bm_put(pp->bm_priv);
+ pp->bm_priv = NULL;
+ }
+ }
+ /* Set RX packet offset correction for platforms, whose
+ * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
+ * platforms and 0B for 32-bit ones.
+ */
+ pp->rx_offset_correction = max(0,
+ NET_SKB_PAD -
+ MVNETA_RX_PKT_OFFSET_CORRECTION);
+ }
+ of_node_put(bm_node);
+
+ /* sw buffer management */
+ if (!pp->bm_priv)
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
+
+ err = mvneta_init(&pdev->dev, pp);
+ if (err < 0)
+ goto err_netdev;
+
+ err = mvneta_port_power_up(pp, pp->phy_interface);
+ if (err < 0) {
+ dev_err(&pdev->dev, "can't power up port\n");
+ goto err_netdev;
+ }
+
+ /* Armada3700 network controller does not support per-cpu
+ * operation, so only single NAPI should be initialized.
+ */
+ if (pp->neta_armada3700) {
+ netif_napi_add(dev, &pp->napi, mvneta_poll);
+ } else {
+ for_each_present_cpu(cpu) {
+ struct mvneta_pcpu_port *port =
+ per_cpu_ptr(pp->ports, cpu);
+
+ netif_napi_add(dev, &port->napi, mvneta_poll);
+ port->pp = pp;
+ }
+ }
+
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_RXCSUM;
+ dev->hw_features |= dev->features;
+ dev->vlan_features |= dev->features;
+ if (!pp->bm_priv)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
+
+ /* MTU range: 68 - 9676 */
+ dev->min_mtu = ETH_MIN_MTU;
+ /* 9676 == 9700 - 20 and rounding to 8 */
+ dev->max_mtu = 9676;
+
+ err = register_netdev(dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register\n");
+ goto err_netdev;
+ }
+
+ netdev_info(dev, "Using %s mac address %pM\n", mac_from,
+ dev->dev_addr);
+
+ platform_set_drvdata(pdev, pp->dev);
+
+ return 0;
+
+err_netdev:
+ if (pp->bm_priv) {
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
+ 1 << pp->id);
+ mvneta_bm_put(pp->bm_priv);
+ }
+ free_percpu(pp->stats);
+err_free_ports:
+ free_percpu(pp->ports);
+err_free_phylink:
+ if (pp->phylink)
+ phylink_destroy(pp->phylink);
+err_clk:
+ clk_disable_unprepare(pp->clk_bus);
+ clk_disable_unprepare(pp->clk);
+err_free_irq:
+ irq_dispose_mapping(dev->irq);
+ return err;
+}
+
+/* Device removal routine */
+static int mvneta_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ clk_disable_unprepare(pp->clk_bus);
+ clk_disable_unprepare(pp->clk);
+ free_percpu(pp->ports);
+ free_percpu(pp->stats);
+ irq_dispose_mapping(dev->irq);
+ phylink_destroy(pp->phylink);
+
+ if (pp->bm_priv) {
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
+ 1 << pp->id);
+ mvneta_bm_put(pp->bm_priv);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mvneta_suspend(struct device *device)
+{
+ int queue;
+ struct net_device *dev = dev_get_drvdata(device);
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ goto clean_exit;
+
+ if (!pp->neta_armada3700) {
+ spin_lock(&pp->lock);
+ pp->is_stopped = true;
+ spin_unlock(&pp->lock);
+
+ cpuhp_state_remove_instance_nocalls(online_hpstate,
+ &pp->node_online);
+ cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
+ }
+
+ rtnl_lock();
+ mvneta_stop_dev(pp);
+ rtnl_unlock();
+
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+
+ mvneta_rxq_drop_pkts(pp, rxq);
+ }
+
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+
+ mvneta_txq_hw_deinit(pp, txq);
+ }
+
+clean_exit:
+ netif_device_detach(dev);
+ clk_disable_unprepare(pp->clk_bus);
+ clk_disable_unprepare(pp->clk);
+
+ return 0;
+}
+
+static int mvneta_resume(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct net_device *dev = dev_get_drvdata(device);
+ struct mvneta_port *pp = netdev_priv(dev);
+ int err, queue;
+
+ clk_prepare_enable(pp->clk);
+ if (!IS_ERR(pp->clk_bus))
+ clk_prepare_enable(pp->clk_bus);
+ if (pp->dram_target_info || pp->neta_armada3700)
+ mvneta_conf_mbus_windows(pp, pp->dram_target_info);
+ if (pp->bm_priv) {
+ err = mvneta_bm_port_init(pdev, pp);
+ if (err < 0) {
+ dev_info(&pdev->dev, "use SW buffer management\n");
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
+ pp->bm_priv = NULL;
+ }
+ }
+ mvneta_defaults_set(pp);
+ err = mvneta_port_power_up(pp, pp->phy_interface);
+ if (err < 0) {
+ dev_err(device, "can't power up port\n");
+ return err;
+ }
+
+ netif_device_attach(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+
+ rxq->next_desc_to_proc = 0;
+ mvneta_rxq_hw_init(pp, rxq);
+ }
+
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+
+ txq->next_desc_to_proc = 0;
+ mvneta_txq_hw_init(pp, txq);
+ }
+
+ if (!pp->neta_armada3700) {
+ spin_lock(&pp->lock);
+ pp->is_stopped = false;
+ spin_unlock(&pp->lock);
+ cpuhp_state_add_instance_nocalls(online_hpstate,
+ &pp->node_online);
+ cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
+ }
+
+ rtnl_lock();
+ mvneta_start_dev(pp);
+ rtnl_unlock();
+ mvneta_set_rx_mode(dev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
+
+static const struct of_device_id mvneta_match[] = {
+ { .compatible = "marvell,armada-370-neta" },
+ { .compatible = "marvell,armada-xp-neta" },
+ { .compatible = "marvell,armada-3700-neta" },
+ { .compatible = "marvell,armada-ac5-neta" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mvneta_match);
+
+static struct platform_driver mvneta_driver = {
+ .probe = mvneta_probe,
+ .remove = mvneta_remove,
+ .driver = {
+ .name = MVNETA_DRIVER_NAME,
+ .of_match_table = mvneta_match,
+ .pm = &mvneta_pm_ops,
+ },
+};
+
+static int __init mvneta_driver_init(void)
+{
+ int ret;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(MVNETA_TSO_PER_PAGE);
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
+ mvneta_cpu_online,
+ mvneta_cpu_down_prepare);
+ if (ret < 0)
+ goto out;
+ online_hpstate = ret;
+ ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
+ NULL, mvneta_cpu_dead);
+ if (ret)
+ goto err_dead;
+
+ ret = platform_driver_register(&mvneta_driver);
+ if (ret)
+ goto err;
+ return 0;
+
+err:
+ cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
+err_dead:
+ cpuhp_remove_multi_state(online_hpstate);
+out:
+ return ret;
+}
+module_init(mvneta_driver_init);
+
+static void __exit mvneta_driver_exit(void)
+{
+ platform_driver_unregister(&mvneta_driver);
+ cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
+ cpuhp_remove_multi_state(online_hpstate);
+}
+module_exit(mvneta_driver_exit);
+
+MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
+
+module_param(rxq_number, int, 0444);
+module_param(txq_number, int, 0444);
+
+module_param(rxq_def, int, 0444);
+module_param(rx_copybreak, int, 0644);
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
new file mode 100644
index 000000000..46c942ef2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -0,0 +1,501 @@
+/*
+ * Driver for Marvell NETA network controller Buffer Manager.
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <net/hwbm.h>
+#include "mvneta_bm.h"
+
+#define MVNETA_BM_DRIVER_NAME "mvneta_bm"
+#define MVNETA_BM_DRIVER_VERSION "1.0"
+
+static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data)
+{
+ writel(data, priv->reg_base + offset);
+}
+
+static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset)
+{
+ return readl(priv->reg_base + offset);
+}
+
+static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id)
+{
+ u32 val;
+
+ val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
+ val |= MVNETA_BM_POOL_ENABLE_MASK;
+ mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
+
+ /* Clear BM cause register */
+ mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
+}
+
+static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id)
+{
+ u32 val;
+
+ val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
+ val &= ~MVNETA_BM_POOL_ENABLE_MASK;
+ mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
+}
+
+static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask)
+{
+ u32 val;
+
+ val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
+ val |= mask;
+ mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
+}
+
+static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask)
+{
+ u32 val;
+
+ val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
+ val &= ~mask;
+ mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
+}
+
+static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
+ u8 target_id, u8 attr)
+{
+ u32 val;
+
+ val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id));
+ val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id);
+ val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id);
+ val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id);
+ val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr);
+
+ mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
+}
+
+int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
+{
+ struct mvneta_bm_pool *bm_pool =
+ (struct mvneta_bm_pool *)hwbm_pool->priv;
+ struct mvneta_bm *priv = bm_pool->priv;
+ dma_addr_t phys_addr;
+
+ /* In order to update buf_cookie field of RX descriptor properly,
+ * BM hardware expects buf virtual address to be placed in the
+ * first four bytes of mapped buffer.
+ */
+ *(u32 *)buf = (u32)buf;
+ phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
+ return -ENOMEM;
+
+ mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_construct);
+
+/* Create pool */
+static int mvneta_bm_pool_create(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool)
+{
+ struct platform_device *pdev = priv->pdev;
+ u8 target_id, attr;
+ int size_bytes, err;
+ size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
+ bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
+ &bm_pool->phys_addr,
+ GFP_KERNEL);
+ if (!bm_pool->virt_addr)
+ return -ENOMEM;
+
+ if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) {
+ dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
+ bm_pool->phys_addr);
+ dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
+ bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN);
+ return -ENOMEM;
+ }
+
+ err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id,
+ &attr);
+ if (err < 0) {
+ dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
+ bm_pool->phys_addr);
+ return err;
+ }
+
+ /* Set pool address */
+ mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id),
+ bm_pool->phys_addr);
+
+ mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr);
+ mvneta_bm_pool_enable(priv, bm_pool->id);
+
+ return 0;
+}
+
+/* Notify the driver that BM pool is being used as specific type and return the
+ * pool pointer on success
+ */
+struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
+ enum mvneta_bm_type type, u8 port_id,
+ int pkt_size)
+{
+ struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id];
+ int num, err;
+
+ if (new_pool->type == MVNETA_BM_LONG &&
+ new_pool->port_map != 1 << port_id) {
+ dev_err(&priv->pdev->dev,
+ "long pool cannot be shared by the ports\n");
+ return NULL;
+ }
+
+ if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) {
+ dev_err(&priv->pdev->dev,
+ "mixing pools' types between the ports is forbidden\n");
+ return NULL;
+ }
+
+ if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT)
+ new_pool->pkt_size = pkt_size;
+
+ /* Allocate buffers in case BM pool hasn't been used yet */
+ if (new_pool->type == MVNETA_BM_FREE) {
+ struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
+
+ new_pool->priv = priv;
+ new_pool->type = type;
+ new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
+ hwbm_pool->frag_size =
+ SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ hwbm_pool->construct = mvneta_bm_construct;
+ hwbm_pool->priv = new_pool;
+ mutex_init(&hwbm_pool->buf_lock);
+
+ /* Create new pool */
+ err = mvneta_bm_pool_create(priv, new_pool);
+ if (err) {
+ dev_err(&priv->pdev->dev, "fail to create pool %d\n",
+ new_pool->id);
+ return NULL;
+ }
+
+ /* Allocate buffers for this pool */
+ num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
+ if (num != hwbm_pool->size) {
+ WARN(1, "pool %d: %d of %d allocated\n",
+ new_pool->id, num, hwbm_pool->size);
+ return NULL;
+ }
+ }
+
+ return new_pool;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_pool_use);
+
+/* Free all buffers from the pool */
+void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
+ u8 port_map)
+{
+ int i;
+
+ bm_pool->port_map &= ~port_map;
+ if (bm_pool->port_map)
+ return;
+
+ mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
+
+ for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
+ dma_addr_t buf_phys_addr;
+ u32 *vaddr;
+
+ /* Get buffer physical address (indirect access) */
+ buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool);
+
+ /* Work-around to the problems when destroying the pool,
+ * when it occurs that a read access to BPPI returns 0.
+ */
+ if (buf_phys_addr == 0)
+ continue;
+
+ vaddr = phys_to_virt(buf_phys_addr);
+ if (!vaddr)
+ break;
+
+ dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
+ }
+
+ mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
+
+ /* Update BM driver with number of buffers removed from pool */
+ bm_pool->hwbm_pool.buf_num -= i;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
+
+/* Cleanup pool */
+void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool, u8 port_map)
+{
+ struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
+ bm_pool->port_map &= ~port_map;
+ if (bm_pool->port_map)
+ return;
+
+ bm_pool->type = MVNETA_BM_FREE;
+
+ mvneta_bm_bufs_free(priv, bm_pool, port_map);
+ if (hwbm_pool->buf_num)
+ WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
+
+ if (bm_pool->virt_addr) {
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(u32) * hwbm_pool->size,
+ bm_pool->virt_addr, bm_pool->phys_addr);
+ bm_pool->virt_addr = NULL;
+ }
+
+ mvneta_bm_pool_disable(priv, bm_pool->id);
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy);
+
+static void mvneta_bm_pools_init(struct mvneta_bm *priv)
+{
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct mvneta_bm_pool *bm_pool;
+ char prop[15];
+ u32 size;
+ int i;
+
+ /* Activate BM unit */
+ mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK);
+
+ /* Create all pools with maximum size */
+ for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
+ bm_pool = &priv->bm_pools[i];
+ bm_pool->id = i;
+ bm_pool->type = MVNETA_BM_FREE;
+
+ /* Reset read pointer */
+ mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0);
+
+ /* Reset write pointer */
+ mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0);
+
+ /* Configure pool size according to DT or use default value */
+ sprintf(prop, "pool%d,capacity", i);
+ if (of_property_read_u32(dn, prop, &size)) {
+ size = MVNETA_BM_POOL_CAP_DEF;
+ } else if (size > MVNETA_BM_POOL_CAP_MAX) {
+ dev_warn(&priv->pdev->dev,
+ "Illegal pool %d capacity %d, set to %d\n",
+ i, size, MVNETA_BM_POOL_CAP_MAX);
+ size = MVNETA_BM_POOL_CAP_MAX;
+ } else if (size < MVNETA_BM_POOL_CAP_MIN) {
+ dev_warn(&priv->pdev->dev,
+ "Illegal pool %d capacity %d, set to %d\n",
+ i, size, MVNETA_BM_POOL_CAP_MIN);
+ size = MVNETA_BM_POOL_CAP_MIN;
+ } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) {
+ dev_warn(&priv->pdev->dev,
+ "Illegal pool %d capacity %d, round to %d\n",
+ i, size, ALIGN(size,
+ MVNETA_BM_POOL_CAP_ALIGN));
+ size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
+ }
+ bm_pool->hwbm_pool.size = size;
+
+ mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
+ bm_pool->hwbm_pool.size);
+
+ /* Obtain custom pkt_size from DT */
+ sprintf(prop, "pool%d,pkt-size", i);
+ if (of_property_read_u32(dn, prop, &bm_pool->pkt_size))
+ bm_pool->pkt_size = 0;
+ }
+}
+
+static void mvneta_bm_default_set(struct mvneta_bm *priv)
+{
+ u32 val;
+
+ /* Mask BM all interrupts */
+ mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0);
+
+ /* Clear BM cause register */
+ mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
+
+ /* Set BM configuration register */
+ val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
+
+ /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
+ val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK;
+ val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP;
+ mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
+}
+
+static int mvneta_bm_init(struct mvneta_bm *priv)
+{
+ mvneta_bm_default_set(priv);
+
+ /* Allocate and initialize BM pools structures */
+ priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM,
+ sizeof(struct mvneta_bm_pool),
+ GFP_KERNEL);
+ if (!priv->bm_pools)
+ return -ENOMEM;
+
+ mvneta_bm_pools_init(priv);
+
+ return 0;
+}
+
+static int mvneta_bm_get_sram(struct device_node *dn,
+ struct mvneta_bm *priv)
+{
+ priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0);
+ if (!priv->bppi_pool)
+ return -ENOMEM;
+
+ priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool,
+ MVNETA_BM_BPPI_SIZE,
+ &priv->bppi_phys_addr);
+ if (!priv->bppi_virt_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mvneta_bm_put_sram(struct mvneta_bm *priv)
+{
+ gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr,
+ MVNETA_BM_BPPI_SIZE);
+}
+
+struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+
+ return pdev ? platform_get_drvdata(pdev) : NULL;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_get);
+
+void mvneta_bm_put(struct mvneta_bm *priv)
+{
+ platform_device_put(priv->pdev);
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_put);
+
+static int mvneta_bm_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct mvneta_bm *priv;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->reg_base))
+ return PTR_ERR(priv->reg_base);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err < 0)
+ return err;
+
+ err = mvneta_bm_get_sram(dn, priv);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to allocate internal memory\n");
+ goto err_clk;
+ }
+
+ priv->pdev = pdev;
+
+ /* Initialize buffer manager internals */
+ err = mvneta_bm_init(priv);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to initialize controller\n");
+ goto err_sram;
+ }
+
+ dn->data = priv;
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n");
+
+ return 0;
+
+err_sram:
+ mvneta_bm_put_sram(priv);
+err_clk:
+ clk_disable_unprepare(priv->clk);
+ return err;
+}
+
+static int mvneta_bm_remove(struct platform_device *pdev)
+{
+ struct mvneta_bm *priv = platform_get_drvdata(pdev);
+ u8 all_ports_map = 0xff;
+ int i = 0;
+
+ for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
+ struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i];
+
+ mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map);
+ }
+
+ mvneta_bm_put_sram(priv);
+
+ /* Dectivate BM unit */
+ mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mvneta_bm_match[] = {
+ { .compatible = "marvell,armada-380-neta-bm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mvneta_bm_match);
+
+static struct platform_driver mvneta_bm_driver = {
+ .probe = mvneta_bm_probe,
+ .remove = mvneta_bm_remove,
+ .driver = {
+ .name = MVNETA_BM_DRIVER_NAME,
+ .of_match_table = mvneta_bm_match,
+ },
+};
+
+module_platform_driver(mvneta_bm_driver);
+
+MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
+MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
new file mode 100644
index 000000000..e47783ce7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -0,0 +1,192 @@
+/*
+ * Driver for Marvell NETA network controller Buffer Manager.
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _MVNETA_BM_H_
+#define _MVNETA_BM_H_
+
+/* BM Configuration Register */
+#define MVNETA_BM_CONFIG_REG 0x0
+#define MVNETA_BM_STATUS_MASK 0x30
+#define MVNETA_BM_ACTIVE_MASK BIT(4)
+#define MVNETA_BM_MAX_IN_BURST_SIZE_MASK 0x60000
+#define MVNETA_BM_MAX_IN_BURST_SIZE_16BP BIT(18)
+#define MVNETA_BM_EMPTY_LIMIT_MASK BIT(19)
+
+/* BM Activation Register */
+#define MVNETA_BM_COMMAND_REG 0x4
+#define MVNETA_BM_START_MASK BIT(0)
+#define MVNETA_BM_STOP_MASK BIT(1)
+#define MVNETA_BM_PAUSE_MASK BIT(2)
+
+/* BM Xbar interface Register */
+#define MVNETA_BM_XBAR_01_REG 0x8
+#define MVNETA_BM_XBAR_23_REG 0xc
+#define MVNETA_BM_XBAR_POOL_REG(pool) \
+ (((pool) < 2) ? MVNETA_BM_XBAR_01_REG : MVNETA_BM_XBAR_23_REG)
+#define MVNETA_BM_TARGET_ID_OFFS(pool) (((pool) & 1) ? 16 : 0)
+#define MVNETA_BM_TARGET_ID_MASK(pool) \
+ (0xf << MVNETA_BM_TARGET_ID_OFFS(pool))
+#define MVNETA_BM_TARGET_ID_VAL(pool, id) \
+ ((id) << MVNETA_BM_TARGET_ID_OFFS(pool))
+#define MVNETA_BM_XBAR_ATTR_OFFS(pool) (((pool) & 1) ? 20 : 4)
+#define MVNETA_BM_XBAR_ATTR_MASK(pool) \
+ (0xff << MVNETA_BM_XBAR_ATTR_OFFS(pool))
+#define MVNETA_BM_XBAR_ATTR_VAL(pool, attr) \
+ ((attr) << MVNETA_BM_XBAR_ATTR_OFFS(pool))
+
+/* Address of External Buffer Pointers Pool Register */
+#define MVNETA_BM_POOL_BASE_REG(pool) (0x10 + ((pool) << 4))
+#define MVNETA_BM_POOL_ENABLE_MASK BIT(0)
+
+/* External Buffer Pointers Pool RD pointer Register */
+#define MVNETA_BM_POOL_READ_PTR_REG(pool) (0x14 + ((pool) << 4))
+#define MVNETA_BM_POOL_SET_READ_PTR_MASK 0xfffc
+#define MVNETA_BM_POOL_GET_READ_PTR_OFFS 16
+#define MVNETA_BM_POOL_GET_READ_PTR_MASK 0xfffc0000
+
+/* External Buffer Pointers Pool WR pointer */
+#define MVNETA_BM_POOL_WRITE_PTR_REG(pool) (0x18 + ((pool) << 4))
+#define MVNETA_BM_POOL_SET_WRITE_PTR_OFFS 0
+#define MVNETA_BM_POOL_SET_WRITE_PTR_MASK 0xfffc
+#define MVNETA_BM_POOL_GET_WRITE_PTR_OFFS 16
+#define MVNETA_BM_POOL_GET_WRITE_PTR_MASK 0xfffc0000
+
+/* External Buffer Pointers Pool Size Register */
+#define MVNETA_BM_POOL_SIZE_REG(pool) (0x1c + ((pool) << 4))
+#define MVNETA_BM_POOL_SIZE_MASK 0x3fff
+
+/* BM Interrupt Cause Register */
+#define MVNETA_BM_INTR_CAUSE_REG (0x50)
+
+/* BM interrupt Mask Register */
+#define MVNETA_BM_INTR_MASK_REG (0x54)
+
+/* Other definitions */
+#define MVNETA_BM_SHORT_PKT_SIZE 256
+#define MVNETA_BM_POOLS_NUM 4
+#define MVNETA_BM_POOL_CAP_MIN 128
+#define MVNETA_BM_POOL_CAP_DEF 2048
+#define MVNETA_BM_POOL_CAP_MAX \
+ (16 * 1024 - MVNETA_BM_POOL_CAP_ALIGN)
+#define MVNETA_BM_POOL_CAP_ALIGN 32
+#define MVNETA_BM_POOL_PTR_ALIGN 32
+
+#define MVNETA_BM_POOL_ACCESS_OFFS 8
+
+#define MVNETA_BM_BPPI_SIZE 0x100000
+
+#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+
+enum mvneta_bm_type {
+ MVNETA_BM_FREE,
+ MVNETA_BM_LONG,
+ MVNETA_BM_SHORT
+};
+
+struct mvneta_bm {
+ void __iomem *reg_base;
+ struct clk *clk;
+ struct platform_device *pdev;
+
+ struct gen_pool *bppi_pool;
+ /* BPPI virtual base address */
+ void __iomem *bppi_virt_addr;
+ /* BPPI physical base address */
+ dma_addr_t bppi_phys_addr;
+
+ /* BM pools */
+ struct mvneta_bm_pool *bm_pools;
+};
+
+struct mvneta_bm_pool {
+ struct hwbm_pool hwbm_pool;
+ /* Pool number in the range 0-3 */
+ u8 id;
+ enum mvneta_bm_type type;
+
+ /* Packet size */
+ int pkt_size;
+ /* Size of the buffer acces through DMA*/
+ u32 buf_size;
+
+ /* BPPE virtual base address */
+ u32 *virt_addr;
+ /* BPPE physical base address */
+ dma_addr_t phys_addr;
+
+ /* Ports using BM pool */
+ u8 port_map;
+
+ struct mvneta_bm *priv;
+};
+
+/* Declarations and definitions */
+#if IS_ENABLED(CONFIG_MVNETA_BM)
+struct mvneta_bm *mvneta_bm_get(struct device_node *node);
+void mvneta_bm_put(struct mvneta_bm *priv);
+
+void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool, u8 port_map);
+void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
+ u8 port_map);
+int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf);
+int mvneta_bm_pool_refill(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool);
+struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
+ enum mvneta_bm_type type, u8 port_id,
+ int pkt_size);
+
+static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool,
+ dma_addr_t buf_phys_addr)
+{
+ writel_relaxed(buf_phys_addr, priv->bppi_virt_addr +
+ (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
+}
+
+static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool)
+{
+ return readl_relaxed(priv->bppi_virt_addr +
+ (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
+}
+#else
+static inline void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool,
+ u8 port_map) {}
+static inline void mvneta_bm_bufs_free(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool,
+ u8 port_map) {}
+static inline int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
+{ return 0; }
+static inline int mvneta_bm_pool_refill(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool)
+{ return 0; }
+static inline struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv,
+ u8 pool_id,
+ enum mvneta_bm_type type,
+ u8 port_id,
+ int pkt_size)
+{ return NULL; }
+
+static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool,
+ dma_addr_t buf_phys_addr) {}
+
+static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool)
+{ return 0; }
+static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{ return NULL; }
+static inline void mvneta_bm_put(struct mvneta_bm *priv) {}
+#endif /* CONFIG_MVNETA_BM */
+#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile
new file mode 100644
index 000000000..9bd8e7964
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Marvell PPv2 driver.
+#
+obj-$(CONFIG_MVPP2) := mvpp2.o
+
+mvpp2-y := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
+mvpp2-$(CONFIG_MVPP2_PTP) += mvpp2_tai.o
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
new file mode 100644
index 000000000..e809f91c0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -0,0 +1,1570 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ */
+#ifndef _MVPP2_H_
+#define _MVPP2_H_
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <net/flow_offload.h>
+#include <net/page_pool/types.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
+
+/* The PacketOffset field is measured in units of 32 bytes and is 3 bits wide,
+ * so the maximum offset is 7 * 32 = 224
+ */
+#define MVPP2_SKB_HEADROOM min(max(XDP_PACKET_HEADROOM, NET_SKB_PAD), 224)
+
+#define MVPP2_XDP_PASS 0
+#define MVPP2_XDP_DROPPED BIT(0)
+#define MVPP2_XDP_TX BIT(1)
+#define MVPP2_XDP_REDIR BIT(2)
+
+/* Fifo Registers */
+#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
+#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
+#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
+#define MVPP2_RX_FIFO_INIT_REG 0x64
+#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port))
+#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
+
+/* RX DMA Top Registers */
+#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
+#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
+#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
+#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
+#define MVPP2_POOL_BUF_SIZE_OFFSET 5
+#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
+#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
+#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
+#define MVPP2_RXQ_POOL_SHORT_OFFS 20
+#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
+#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
+#define MVPP2_RXQ_POOL_LONG_OFFS 24
+#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
+#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
+#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
+#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
+#define MVPP2_RXQ_DISABLE_MASK BIT(31)
+
+/* Top Registers */
+#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
+#define MVPP2_DSA_EXTENDED BIT(5)
+#define MVPP2_VER_ID_REG 0x50b0
+#define MVPP2_VER_PP22 0x10
+#define MVPP2_VER_PP23 0x11
+
+/* Parser Registers */
+#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
+#define MVPP2_PRS_PORT_LU_MAX 0xf
+#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
+#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
+#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
+#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
+#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
+#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_TCAM_IDX_REG 0x1100
+#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
+#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
+#define MVPP2_PRS_SRAM_IDX_REG 0x1200
+#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
+#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
+#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
+#define MVPP2_PRS_TCAM_HIT_IDX_REG 0x1240
+#define MVPP2_PRS_TCAM_HIT_CNT_REG 0x1244
+#define MVPP2_PRS_TCAM_HIT_CNT_MASK GENMASK(15, 0)
+
+/* RSS Registers */
+#define MVPP22_RSS_INDEX 0x1500
+#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
+#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
+#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
+#define MVPP22_RXQ2RSS_TABLE 0x1504
+#define MVPP22_RSS_TABLE_POINTER(p) (p)
+#define MVPP22_RSS_TABLE_ENTRY 0x1508
+#define MVPP22_RSS_WIDTH 0x150c
+
+/* Classifier Registers */
+#define MVPP2_CLS_MODE_REG 0x1800
+#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
+#define MVPP2_CLS_PORT_WAY_REG 0x1810
+#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
+#define MVPP2_CLS_LKP_INDEX_REG 0x1814
+#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
+#define MVPP2_CLS_LKP_TBL_REG 0x1818
+#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
+#define MVPP2_CLS_LKP_FLOW_PTR(flow) ((flow) << 16)
+#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
+#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
+#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
+#define MVPP2_CLS_FLOW_TBL0_LAST BIT(0)
+#define MVPP2_CLS_FLOW_TBL0_ENG_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL0_OFFS 1
+#define MVPP2_CLS_FLOW_TBL0_ENG(x) ((x) << 1)
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK 0xff
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID(port) ((port) << 4)
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL BIT(23)
+#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
+#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x)
+#define MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu) (((lu) & 0x3f) << 3)
+#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9)
+#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL1_SEQ(x) ((x) << 15)
+#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
+#define MVPP2_CLS_FLOW_TBL2_FLD_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n) ((n) * 6)
+#define MVPP2_CLS_FLOW_TBL2_FLD(n, x) ((x) << ((n) * 6))
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
+#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
+#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
+#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
+
+/* Classifier C2 engine Registers */
+#define MVPP22_CLS_C2_TCAM_IDX 0x1b00
+#define MVPP22_CLS_C2_TCAM_DATA0 0x1b10
+#define MVPP22_CLS_C2_TCAM_DATA1 0x1b14
+#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18
+#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c
+#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20
+#define MVPP22_CLS_C2_LU_TYPE(lu) ((lu) & 0x3f)
+#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8)
+#define MVPP22_CLS_C2_PORT_MASK (0xff << 8)
+#define MVPP22_CLS_C2_TCAM_INV 0x1b24
+#define MVPP22_CLS_C2_TCAM_INV_BIT BIT(31)
+#define MVPP22_CLS_C2_HIT_CTR 0x1b50
+#define MVPP22_CLS_C2_ACT 0x1b60
+#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19)
+#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13)
+#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11)
+#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9)
+#define MVPP22_CLS_C2_ACT_COLOR(act) ((act) & 0x7)
+#define MVPP22_CLS_C2_ATTR0 0x1b64
+#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24)
+#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f
+#define MVPP22_CLS_C2_ATTR0_QHIGH_OFFS 24
+#define MVPP22_CLS_C2_ATTR0_QLOW(ql) (((ql) & 0x7) << 21)
+#define MVPP22_CLS_C2_ATTR0_QLOW_MASK 0x7
+#define MVPP22_CLS_C2_ATTR0_QLOW_OFFS 21
+#define MVPP22_CLS_C2_ATTR1 0x1b68
+#define MVPP22_CLS_C2_ATTR2 0x1b6c
+#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30)
+#define MVPP22_CLS_C2_ATTR3 0x1b70
+#define MVPP22_CLS_C2_TCAM_CTRL 0x1b90
+#define MVPP22_CLS_C2_TCAM_BYPASS_FIFO BIT(0)
+
+/* Descriptor Manager Top Registers */
+#define MVPP2_RXQ_NUM_REG 0x2040
+#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
+#define MVPP22_DESC_ADDR_OFFS 8
+#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
+#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
+#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
+#define MVPP2_RXQ_NUM_NEW_OFFSET 16
+#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
+#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
+#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
+#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
+#define MVPP2_RXQ_THRESH_REG 0x204c
+#define MVPP2_OCCUPIED_THRESH_OFFSET 0
+#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
+#define MVPP2_RXQ_INDEX_REG 0x2050
+#define MVPP2_TXQ_NUM_REG 0x2080
+#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
+#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
+#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_TXQ_THRESH_REG 0x2094
+#define MVPP2_TXQ_THRESH_OFFSET 16
+#define MVPP2_TXQ_THRESH_MASK 0x3fff
+#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
+#define MVPP2_TXQ_INDEX_REG 0x2098
+#define MVPP2_TXQ_PREF_BUF_REG 0x209c
+#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
+#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
+#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
+#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
+#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
+#define MVPP2_TXQ_PENDING_REG 0x20a0
+#define MVPP2_TXQ_PENDING_MASK 0x3fff
+#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
+#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
+#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
+#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
+#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
+#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
+#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
+#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
+#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
+#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
+#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
+#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
+#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
+#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
+
+/* MBUS bridge registers */
+#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
+#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
+#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
+#define MVPP2_BASE_ADDR_ENABLE 0x4060
+
+/* AXI Bridge Registers */
+#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
+#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
+#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
+#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
+#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
+#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
+#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
+#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
+#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
+#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
+#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
+#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
+
+/* Values for AXI Bridge registers */
+#define MVPP22_AXI_ATTR_CACHE_OFFS 0
+#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
+
+#define MVPP22_AXI_CODE_CACHE_OFFS 0
+#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
+
+#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
+#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
+#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
+
+#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
+#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
+
+/* Interrupt Cause and Mask registers */
+#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
+#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
+
+#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
+#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
+#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+
+#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
+#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
+
+#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
+#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
+#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
+#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
+#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(version) \
+ ((version) == MVPP21 ? 0xffff : 0xff)
+#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
+#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
+#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
+#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
+#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
+#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
+#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
+#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
+#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
+#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
+#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
+#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
+#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
+#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
+#define MVPP2_ISR_RX_ERR_CAUSE_REG(port) (0x5520 + 4 * (port))
+#define MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK 0x00ff
+
+/* Buffer Manager registers */
+#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
+#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
+#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
+#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
+#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
+#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
+#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
+#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
+#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
+#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8
+#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
+#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
+#define MVPP2_BM_START_MASK BIT(0)
+#define MVPP2_BM_STOP_MASK BIT(1)
+#define MVPP2_BM_STATE_MASK BIT(4)
+#define MVPP2_BM_LOW_THRESH_OFFS 8
+#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
+#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
+ MVPP2_BM_LOW_THRESH_OFFS)
+#define MVPP2_BM_HIGH_THRESH_OFFS 16
+#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
+#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
+ MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_BPPI_HIGH_THRESH 0x1E
+#define MVPP2_BM_BPPI_LOW_THRESH 0x1C
+#define MVPP23_BM_BPPI_HIGH_THRESH 0x34
+#define MVPP23_BM_BPPI_LOW_THRESH 0x28
+#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
+#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
+#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
+#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
+#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
+#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
+#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
+#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
+#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
+#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
+#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
+#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
+#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
+#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
+#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
+#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
+#define MVPP2_BM_VIRT_RLS_REG 0x64c0
+#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
+#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
+#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
+#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
+
+/* Packet Processor per-port counters */
+#define MVPP2_OVERRUN_ETH_DROP 0x7000
+#define MVPP2_CLS_ETH_DROP 0x7020
+
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_REG 0x6310
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_MASK 0xff
+#define MVPP23_BM_8POOL_MODE BIT(8)
+
+/* Hit counters registers */
+#define MVPP2_CTRS_IDX 0x7040
+#define MVPP22_CTRS_TX_CTR(port, txq) ((txq) | ((port) << 3) | BIT(7))
+#define MVPP2_TX_DESC_ENQ_CTR 0x7100
+#define MVPP2_TX_DESC_ENQ_TO_DDR_CTR 0x7104
+#define MVPP2_TX_BUFF_ENQ_TO_DDR_CTR 0x7108
+#define MVPP2_TX_DESC_ENQ_HW_FWD_CTR 0x710c
+#define MVPP2_RX_DESC_ENQ_CTR 0x7120
+#define MVPP2_TX_PKTS_DEQ_CTR 0x7130
+#define MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR 0x7200
+#define MVPP2_TX_PKTS_EARLY_DROP_CTR 0x7204
+#define MVPP2_TX_PKTS_BM_DROP_CTR 0x7208
+#define MVPP2_TX_PKTS_BM_MC_DROP_CTR 0x720c
+#define MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR 0x7220
+#define MVPP2_RX_PKTS_EARLY_DROP_CTR 0x7224
+#define MVPP2_RX_PKTS_BM_DROP_CTR 0x7228
+#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700
+#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704
+
+/* TX Scheduler registers */
+#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
+#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
+#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
+#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
+#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
+#define MVPP2_TXP_SCHED_FIXED_PRIO_REG 0x8014
+#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
+#define MVPP2_TXP_SCHED_MTU_REG 0x801c
+#define MVPP2_TXP_MTU_MAX 0x7FFFF
+#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
+#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
+#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
+#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
+#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
+#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
+#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
+#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
+#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
+#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
+#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
+#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
+#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
+
+/* TX general registers */
+#define MVPP2_TX_SNOOP_REG 0x8800
+#define MVPP2_TX_PORT_FLUSH_REG 0x8810
+#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
+
+/* LMS registers */
+#define MVPP2_SRC_ADDR_MIDDLE 0x24
+#define MVPP2_SRC_ADDR_HIGH 0x28
+#define MVPP2_PHY_AN_CFG0_REG 0x34
+#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
+#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
+
+/* Per-port registers */
+#define MVPP2_GMAC_CTRL_0_REG 0x0
+#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
+#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
+#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
+#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
+#define MVPP2_GMAC_CTRL_1_REG 0x4
+#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
+#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
+#define MVPP2_GMAC_PCS_LB_EN_BIT 6
+#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
+#define MVPP2_GMAC_SA_LOW_OFFS 7
+#define MVPP2_GMAC_CTRL_2_REG 0x8
+#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
+#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
+#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
+#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
+#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
+#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
+#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
+#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
+#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
+#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
+#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
+#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4)
+#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
+#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
+#define MVPP2_GMAC_FC_ADV_EN BIT(9)
+#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10)
+#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
+#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
+#define MVPP2_GMAC_STATUS0 0x10
+#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
+#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1)
+#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2)
+#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3)
+#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(4)
+#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(5)
+#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11)
+#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
+ MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+#define MVPP22_GMAC_INT_STAT 0x20
+#define MVPP22_GMAC_INT_STAT_LINK BIT(1)
+#define MVPP22_GMAC_INT_MASK 0x24
+#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
+#define MVPP22_GMAC_CTRL_4_REG 0x90
+#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
+#define MVPP22_CTRL4_RX_FC_EN BIT(3)
+#define MVPP22_CTRL4_TX_FC_EN BIT(4)
+#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
+#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
+#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
+#define MVPP22_GMAC_INT_SUM_STAT 0xa0
+#define MVPP22_GMAC_INT_SUM_STAT_INTERNAL BIT(1)
+#define MVPP22_GMAC_INT_SUM_STAT_PTP BIT(2)
+#define MVPP22_GMAC_INT_SUM_MASK 0xa4
+#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
+#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
+
+/* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0,
+ * relative to port->base.
+ */
+#define MVPP22_XLG_CTRL0_REG 0x100
+#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
+#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
+#define MVPP22_XLG_CTRL0_FORCE_LINK_DOWN BIT(2)
+#define MVPP22_XLG_CTRL0_FORCE_LINK_PASS BIT(3)
+#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
+#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8)
+#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
+#define MVPP22_XLG_CTRL1_REG 0x104
+#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
+#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
+#define MVPP22_XLG_STATUS 0x10c
+#define MVPP22_XLG_STATUS_LINK_UP BIT(0)
+#define MVPP22_XLG_INT_STAT 0x114
+#define MVPP22_XLG_INT_STAT_LINK BIT(1)
+#define MVPP22_XLG_INT_MASK 0x118
+#define MVPP22_XLG_INT_MASK_LINK BIT(1)
+#define MVPP22_XLG_CTRL3_REG 0x11c
+#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
+#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
+#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
+#define MVPP22_XLG_EXT_INT_STAT 0x158
+#define MVPP22_XLG_EXT_INT_STAT_XLG BIT(1)
+#define MVPP22_XLG_EXT_INT_STAT_PTP BIT(7)
+#define MVPP22_XLG_EXT_INT_MASK 0x15c
+#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
+#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
+#define MVPP22_XLG_EXT_INT_MASK_PTP BIT(7)
+#define MVPP22_XLG_CTRL4_REG 0x184
+#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
+#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
+#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
+#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14)
+
+/* SMI registers. PPv2.2 and PPv2.3, relative to priv->iface_base. */
+#define MVPP22_SMI_MISC_CFG_REG 0x1204
+#define MVPP22_SMI_POLLING_EN BIT(10)
+
+/* TAI registers, PPv2.2 only, relative to priv->iface_base */
+#define MVPP22_TAI_INT_CAUSE 0x1400
+#define MVPP22_TAI_INT_MASK 0x1404
+#define MVPP22_TAI_CR0 0x1408
+#define MVPP22_TAI_CR1 0x140c
+#define MVPP22_TAI_TCFCR0 0x1410
+#define MVPP22_TAI_TCFCR1 0x1414
+#define MVPP22_TAI_TCFCR2 0x1418
+#define MVPP22_TAI_FATWR 0x141c
+#define MVPP22_TAI_TOD_STEP_NANO_CR 0x1420
+#define MVPP22_TAI_TOD_STEP_FRAC_HIGH 0x1424
+#define MVPP22_TAI_TOD_STEP_FRAC_LOW 0x1428
+#define MVPP22_TAI_TAPDC_HIGH 0x142c
+#define MVPP22_TAI_TAPDC_LOW 0x1430
+#define MVPP22_TAI_TGTOD_SEC_HIGH 0x1434
+#define MVPP22_TAI_TGTOD_SEC_MED 0x1438
+#define MVPP22_TAI_TGTOD_SEC_LOW 0x143c
+#define MVPP22_TAI_TGTOD_NANO_HIGH 0x1440
+#define MVPP22_TAI_TGTOD_NANO_LOW 0x1444
+#define MVPP22_TAI_TGTOD_FRAC_HIGH 0x1448
+#define MVPP22_TAI_TGTOD_FRAC_LOW 0x144c
+#define MVPP22_TAI_TLV_SEC_HIGH 0x1450
+#define MVPP22_TAI_TLV_SEC_MED 0x1454
+#define MVPP22_TAI_TLV_SEC_LOW 0x1458
+#define MVPP22_TAI_TLV_NANO_HIGH 0x145c
+#define MVPP22_TAI_TLV_NANO_LOW 0x1460
+#define MVPP22_TAI_TLV_FRAC_HIGH 0x1464
+#define MVPP22_TAI_TLV_FRAC_LOW 0x1468
+#define MVPP22_TAI_TCV0_SEC_HIGH 0x146c
+#define MVPP22_TAI_TCV0_SEC_MED 0x1470
+#define MVPP22_TAI_TCV0_SEC_LOW 0x1474
+#define MVPP22_TAI_TCV0_NANO_HIGH 0x1478
+#define MVPP22_TAI_TCV0_NANO_LOW 0x147c
+#define MVPP22_TAI_TCV0_FRAC_HIGH 0x1480
+#define MVPP22_TAI_TCV0_FRAC_LOW 0x1484
+#define MVPP22_TAI_TCV1_SEC_HIGH 0x1488
+#define MVPP22_TAI_TCV1_SEC_MED 0x148c
+#define MVPP22_TAI_TCV1_SEC_LOW 0x1490
+#define MVPP22_TAI_TCV1_NANO_HIGH 0x1494
+#define MVPP22_TAI_TCV1_NANO_LOW 0x1498
+#define MVPP22_TAI_TCV1_FRAC_HIGH 0x149c
+#define MVPP22_TAI_TCV1_FRAC_LOW 0x14a0
+#define MVPP22_TAI_TCSR 0x14a4
+#define MVPP22_TAI_TUC_LSB 0x14a8
+#define MVPP22_TAI_GFM_SEC_HIGH 0x14ac
+#define MVPP22_TAI_GFM_SEC_MED 0x14b0
+#define MVPP22_TAI_GFM_SEC_LOW 0x14b4
+#define MVPP22_TAI_GFM_NANO_HIGH 0x14b8
+#define MVPP22_TAI_GFM_NANO_LOW 0x14bc
+#define MVPP22_TAI_GFM_FRAC_HIGH 0x14c0
+#define MVPP22_TAI_GFM_FRAC_LOW 0x14c4
+#define MVPP22_TAI_PCLK_DA_HIGH 0x14c8
+#define MVPP22_TAI_PCLK_DA_LOW 0x14cc
+#define MVPP22_TAI_CTCR 0x14d0
+#define MVPP22_TAI_PCLK_CCC_HIGH 0x14d4
+#define MVPP22_TAI_PCLK_CCC_LOW 0x14d8
+#define MVPP22_TAI_DTC_HIGH 0x14dc
+#define MVPP22_TAI_DTC_LOW 0x14e0
+#define MVPP22_TAI_CCC_HIGH 0x14e4
+#define MVPP22_TAI_CCC_LOW 0x14e8
+#define MVPP22_TAI_ICICE 0x14f4
+#define MVPP22_TAI_ICICC_LOW 0x14f8
+#define MVPP22_TAI_TUC_MSB 0x14fc
+
+#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
+
+#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
+
+/* Descriptor ring Macros */
+#define MVPP2_QUEUE_NEXT_DESC(q, index) \
+ (((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* XPCS registers.PPv2.2 and PPv2.3 */
+#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
+#define MVPP22_MPCS_CTRL 0x14
+#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
+#define MVPP22_MPCS_CLK_RESET 0x14c
+#define MAC_CLK_RESET_SD_TX BIT(0)
+#define MAC_CLK_RESET_SD_RX BIT(1)
+#define MAC_CLK_RESET_MAC BIT(2)
+#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
+#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
+
+/* FCA registers. PPv2.2 and PPv2.3 */
+#define MVPP22_FCA_BASE(port) (0x7600 + (port) * 0x1000)
+#define MVPP22_FCA_REG_SIZE 16
+#define MVPP22_FCA_REG_MASK 0xFFFF
+#define MVPP22_FCA_CONTROL_REG 0x0
+#define MVPP22_FCA_ENABLE_PERIODIC BIT(11)
+#define MVPP22_PERIODIC_COUNTER_LSB_REG (0x110)
+#define MVPP22_PERIODIC_COUNTER_MSB_REG (0x114)
+
+/* XPCS registers. PPv2.2 and PPv2.3 */
+#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
+#define MVPP22_XPCS_CFG0 0x0
+#define MVPP22_XPCS_CFG0_RESET_DIS BIT(0)
+#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
+#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
+
+/* PTP registers. PPv2.2 only */
+#define MVPP22_PTP_BASE(port) (0x7800 + (port * 0x1000))
+#define MVPP22_PTP_INT_CAUSE 0x00
+#define MVPP22_PTP_INT_CAUSE_QUEUE1 BIT(6)
+#define MVPP22_PTP_INT_CAUSE_QUEUE0 BIT(5)
+#define MVPP22_PTP_INT_MASK 0x04
+#define MVPP22_PTP_INT_MASK_QUEUE1 BIT(6)
+#define MVPP22_PTP_INT_MASK_QUEUE0 BIT(5)
+#define MVPP22_PTP_GCR 0x08
+#define MVPP22_PTP_GCR_RX_RESET BIT(13)
+#define MVPP22_PTP_GCR_TX_RESET BIT(1)
+#define MVPP22_PTP_GCR_TSU_ENABLE BIT(0)
+#define MVPP22_PTP_TX_Q0_R0 0x0c
+#define MVPP22_PTP_TX_Q0_R1 0x10
+#define MVPP22_PTP_TX_Q0_R2 0x14
+#define MVPP22_PTP_TX_Q1_R0 0x18
+#define MVPP22_PTP_TX_Q1_R1 0x1c
+#define MVPP22_PTP_TX_Q1_R2 0x20
+#define MVPP22_PTP_TPCR 0x24
+#define MVPP22_PTP_V1PCR 0x28
+#define MVPP22_PTP_V2PCR 0x2c
+#define MVPP22_PTP_Y1731PCR 0x30
+#define MVPP22_PTP_NTPTSPCR 0x34
+#define MVPP22_PTP_NTPRXPCR 0x38
+#define MVPP22_PTP_NTPTXPCR 0x3c
+#define MVPP22_PTP_WAMPPCR 0x40
+#define MVPP22_PTP_NAPCR 0x44
+#define MVPP22_PTP_FAPCR 0x48
+#define MVPP22_PTP_CAPCR 0x50
+#define MVPP22_PTP_ATAPCR 0x54
+#define MVPP22_PTP_ACTAPCR 0x58
+#define MVPP22_PTP_CATAPCR 0x5c
+#define MVPP22_PTP_CACTAPCR 0x60
+#define MVPP22_PTP_AITAPCR 0x64
+#define MVPP22_PTP_CAITAPCR 0x68
+#define MVPP22_PTP_CITAPCR 0x6c
+#define MVPP22_PTP_NTP_OFF_HIGH 0x70
+#define MVPP22_PTP_NTP_OFF_LOW 0x74
+#define MVPP22_PTP_TX_PIPE_STATUS_DELAY 0x78
+
+/* System controller registers. Accessed through a regmap. */
+#define GENCONF_SOFT_RESET1 0x1108
+#define GENCONF_SOFT_RESET1_GOP BIT(6)
+#define GENCONF_PORT_CTRL0 0x1110
+#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
+#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
+#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
+#define GENCONF_PORT_CTRL1 0x1114
+#define GENCONF_PORT_CTRL1_EN(p) BIT(p)
+#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
+#define GENCONF_CTRL0 0x1120
+#define GENCONF_CTRL0_PORT2_RGMII BIT(0)
+#define GENCONF_CTRL0_PORT3_RGMII_MII BIT(1)
+#define GENCONF_CTRL0_PORT3_RGMII BIT(2)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
+#define MVPP2_TXDONE_COAL_USEC 1000
+#define MVPP2_RX_COAL_PKTS 32
+#define MVPP2_RX_COAL_USEC 64
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVPP2_MH_SIZE 2
+#define MVPP2_ETH_TYPE_LEN 2
+#define MVPP2_PPPOE_HDR_SIZE 8
+#define MVPP2_VLAN_TAG_LEN 4
+#define MVPP2_VLAN_TAG_EDSA_LEN 8
+
+/* Lbtd 802.3 type */
+#define MVPP2_IP_LBDT_TYPE 0xfffa
+
+#define MVPP2_TX_CSUM_MAX_SIZE 9800
+
+/* Timeout constants */
+#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
+#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
+
+#define MVPP2_TX_MTU_MAX 0x7ffff
+
+/* Maximum number of T-CONTs of PON port */
+#define MVPP2_MAX_TCONT 16
+
+/* Maximum number of supported ports */
+#define MVPP2_MAX_PORTS 4
+
+/* Loopback port index */
+#define MVPP2_LOOPBACK_PORT_INDEX 3
+
+/* Maximum number of TXQs used by single port */
+#define MVPP2_MAX_TXQ 8
+
+/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
+ * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
+ * multiply this value by two to count the maximum number of skb descs needed.
+ */
+#define MVPP2_MAX_TSO_SEGS 300
+#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+/* Max number of RXQs per port */
+#define MVPP2_PORT_MAX_RXQ 32
+
+/* Max number of Rx descriptors */
+#define MVPP2_MAX_RXD_MAX 2048
+#define MVPP2_MAX_RXD_DFLT 1024
+
+/* Max number of Tx descriptors */
+#define MVPP2_MAX_TXD_MAX 2048
+#define MVPP2_MAX_TXD_DFLT 1024
+
+/* Amount of Tx descriptors that can be reserved at once by CPU */
+#define MVPP2_CPU_DESC_CHUNK 64
+
+/* Max number of Tx descriptors in each aggregated queue */
+#define MVPP2_AGGR_TXQ_SIZE 256
+
+/* Descriptor aligned size */
+#define MVPP2_DESC_ALIGNED_SIZE 32
+
+/* Descriptor alignment mask */
+#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
+
+/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB 0xb000
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size) ((data_size) >> 6)
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
+#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
+
+/* TX FIFO constants */
+#define MVPP22_TX_FIFO_DATA_SIZE_18KB 18
+#define MVPP22_TX_FIFO_DATA_SIZE_10KB 10
+#define MVPP22_TX_FIFO_DATA_SIZE_1KB 1
+#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 /* Bytes */
+#define MVPP2_TX_FIFO_THRESHOLD(kb) \
+ ((kb) * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+
+/* RX FIFO threshold in 1KB granularity */
+#define MVPP23_PORT0_FIFO_TRSH (9 * 1024)
+#define MVPP23_PORT1_FIFO_TRSH (4 * 1024)
+#define MVPP23_PORT2_FIFO_TRSH (2 * 1024)
+
+/* RX Flow Control Registers */
+#define MVPP2_RX_FC_REG(port) (0x150 + 4 * (port))
+#define MVPP2_RX_FC_EN BIT(24)
+#define MVPP2_RX_FC_TRSH_OFFS 16
+#define MVPP2_RX_FC_TRSH_MASK (0xFF << MVPP2_RX_FC_TRSH_OFFS)
+#define MVPP2_RX_FC_TRSH_UNIT 256
+
+/* MSS Flow control */
+#define MSS_FC_COM_REG 0
+#define FLOW_CONTROL_ENABLE_BIT BIT(0)
+#define FLOW_CONTROL_UPDATE_COMMAND_BIT BIT(31)
+#define FC_QUANTA 0xFFFF
+#define FC_CLK_DIVIDER 100
+
+#define MSS_RXQ_TRESH_BASE 0x200
+#define MSS_RXQ_TRESH_OFFS 4
+#define MSS_RXQ_TRESH_REG(q, fq) (MSS_RXQ_TRESH_BASE + (((q) + (fq)) \
+ * MSS_RXQ_TRESH_OFFS))
+
+#define MSS_BUF_POOL_BASE 0x40
+#define MSS_BUF_POOL_OFFS 4
+#define MSS_BUF_POOL_REG(id) (MSS_BUF_POOL_BASE \
+ + (id) * MSS_BUF_POOL_OFFS)
+
+#define MSS_BUF_POOL_STOP_MASK 0xFFF
+#define MSS_BUF_POOL_START_MASK (0xFFF << MSS_BUF_POOL_START_OFFS)
+#define MSS_BUF_POOL_START_OFFS 12
+#define MSS_BUF_POOL_PORTS_MASK (0xF << MSS_BUF_POOL_PORTS_OFFS)
+#define MSS_BUF_POOL_PORTS_OFFS 24
+#define MSS_BUF_POOL_PORT_OFFS(id) (0x1 << \
+ ((id) + MSS_BUF_POOL_PORTS_OFFS))
+
+#define MSS_RXQ_TRESH_START_MASK 0xFFFF
+#define MSS_RXQ_TRESH_STOP_MASK (0xFFFF << MSS_RXQ_TRESH_STOP_OFFS)
+#define MSS_RXQ_TRESH_STOP_OFFS 16
+
+#define MSS_RXQ_ASS_BASE 0x80
+#define MSS_RXQ_ASS_OFFS 4
+#define MSS_RXQ_ASS_PER_REG 4
+#define MSS_RXQ_ASS_PER_OFFS 8
+#define MSS_RXQ_ASS_PORTID_OFFS 0
+#define MSS_RXQ_ASS_PORTID_MASK 0x3
+#define MSS_RXQ_ASS_HOSTID_OFFS 2
+#define MSS_RXQ_ASS_HOSTID_MASK 0x3F
+
+#define MSS_RXQ_ASS_Q_BASE(q, fq) ((((q) + (fq)) % MSS_RXQ_ASS_PER_REG) \
+ * MSS_RXQ_ASS_PER_OFFS)
+#define MSS_RXQ_ASS_PQ_BASE(q, fq) ((((q) + (fq)) / MSS_RXQ_ASS_PER_REG) \
+ * MSS_RXQ_ASS_OFFS)
+#define MSS_RXQ_ASS_REG(q, fq) (MSS_RXQ_ASS_BASE + MSS_RXQ_ASS_PQ_BASE(q, fq))
+
+#define MSS_THRESHOLD_STOP 768
+#define MSS_THRESHOLD_START 1024
+#define MSS_FC_MAX_TIMEOUT 5000
+
+/* RX buffer constants */
+#define MVPP2_SKB_SHINFO_SIZE \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+#define MVPP2_RX_PKT_SIZE(mtu) \
+ ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
+ ETH_HLEN + ETH_FCS_LEN, cache_line_size())
+
+#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + MVPP2_SKB_HEADROOM)
+#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
+#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
+ ((total_size) - MVPP2_SKB_HEADROOM - MVPP2_SKB_SHINFO_SIZE)
+
+#define MVPP2_MAX_RX_BUF_SIZE (PAGE_SIZE - MVPP2_SKB_SHINFO_SIZE - MVPP2_SKB_HEADROOM)
+
+#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
+#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32)
+#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32)
+
+#define MVPP2_N_PRS_FLOWS 52
+#define MVPP2_N_RFS_ENTRIES_PER_FLOW 4
+
+/* There are 7 supported high-level flows */
+#define MVPP2_N_RFS_RULES (MVPP2_N_RFS_ENTRIES_PER_FLOW * 7)
+
+/* RSS constants */
+#define MVPP22_N_RSS_TABLES 8
+#define MVPP22_RSS_TABLE_ENTRIES 32
+
+/* IPv6 max L3 address size */
+#define MVPP2_MAX_L3_ADDR_SIZE 16
+
+/* Port flags */
+#define MVPP2_F_LOOPBACK BIT(0)
+#define MVPP2_F_DT_COMPAT BIT(1)
+
+/* Marvell tag types */
+enum mvpp2_tag_type {
+ MVPP2_TAG_TYPE_NONE = 0,
+ MVPP2_TAG_TYPE_MH = 1,
+ MVPP2_TAG_TYPE_DSA = 2,
+ MVPP2_TAG_TYPE_EDSA = 3,
+ MVPP2_TAG_TYPE_VLAN = 4,
+ MVPP2_TAG_TYPE_LAST = 5
+};
+
+/* L2 cast enum */
+enum mvpp2_prs_l2_cast {
+ MVPP2_PRS_L2_UNI_CAST,
+ MVPP2_PRS_L2_MULTI_CAST,
+};
+
+/* L3 cast enum */
+enum mvpp2_prs_l3_cast {
+ MVPP2_PRS_L3_UNI_CAST,
+ MVPP2_PRS_L3_MULTI_CAST,
+ MVPP2_PRS_L3_BROAD_CAST
+};
+
+/* PTP descriptor constants. The low bits of the descriptor are stored
+ * separately from the high bits.
+ */
+#define MVPP22_PTP_DESC_MASK_LOW 0xfff
+
+/* PTPAction */
+enum mvpp22_ptp_action {
+ MVPP22_PTP_ACTION_NONE = 0,
+ MVPP22_PTP_ACTION_FORWARD = 1,
+ MVPP22_PTP_ACTION_CAPTURE = 3,
+ /* The following have not been verified */
+ MVPP22_PTP_ACTION_ADDTIME = 4,
+ MVPP22_PTP_ACTION_ADDCORRECTEDTIME = 5,
+ MVPP22_PTP_ACTION_CAPTUREADDTIME = 6,
+ MVPP22_PTP_ACTION_CAPTUREADDCORRECTEDTIME = 7,
+ MVPP22_PTP_ACTION_ADDINGRESSTIME = 8,
+ MVPP22_PTP_ACTION_CAPTUREADDINGRESSTIME = 9,
+ MVPP22_PTP_ACTION_CAPTUREINGRESSTIME = 10,
+};
+
+/* PTPPacketFormat */
+enum mvpp22_ptp_packet_format {
+ MVPP22_PTP_PKT_FMT_PTPV2 = 0,
+ MVPP22_PTP_PKT_FMT_PTPV1 = 1,
+ MVPP22_PTP_PKT_FMT_Y1731 = 2,
+ MVPP22_PTP_PKT_FMT_NTPTS = 3,
+ MVPP22_PTP_PKT_FMT_NTPRX = 4,
+ MVPP22_PTP_PKT_FMT_NTPTX = 5,
+ MVPP22_PTP_PKT_FMT_TWAMP = 6,
+};
+
+#define MVPP22_PTP_ACTION(x) (((x) & 15) << 0)
+#define MVPP22_PTP_PACKETFORMAT(x) (((x) & 7) << 4)
+#define MVPP22_PTP_MACTIMESTAMPINGEN BIT(11)
+#define MVPP22_PTP_TIMESTAMPENTRYID(x) (((x) & 31) << 12)
+#define MVPP22_PTP_TIMESTAMPQUEUESELECT BIT(18)
+
+/* BM constants */
+#define MVPP2_BM_JUMBO_BUF_NUM 2048
+#define MVPP2_BM_LONG_BUF_NUM 2048
+#define MVPP2_BM_SHORT_BUF_NUM 2048
+#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
+#define MVPP2_BM_POOL_PTR_ALIGN 128
+#define MVPP2_BM_MAX_POOLS 8
+
+/* BM cookie (32 bits) definition */
+#define MVPP2_BM_COOKIE_POOL_OFFS 8
+#define MVPP2_BM_COOKIE_CPU_OFFS 24
+
+#define MVPP2_BM_SHORT_FRAME_SIZE 736 /* frame size 128 */
+#define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */
+#define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */
+/* BM short pool packet size
+ * These value assure that for SWF the total number
+ * of bytes allocated for each buffer will be 512
+ */
+#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
+#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
+#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)
+
+#define MVPP21_ADDR_SPACE_SZ 0
+#define MVPP22_ADDR_SPACE_SZ SZ_64K
+
+#define MVPP2_MAX_THREADS 9
+#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
+
+/* GMAC MIB Counters register definitions */
+#define MVPP21_MIB_COUNTERS_OFFSET 0x1000
+#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
+#define MVPP22_MIB_COUNTERS_OFFSET 0x0
+#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100
+
+#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0
+#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8
+#define MVPP2_MIB_CRC_ERRORS_SENT 0xc
+#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10
+#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18
+#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c
+#define MVPP2_MIB_FRAMES_64_OCTETS 0x20
+#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24
+#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28
+#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c
+#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30
+#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
+#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38
+#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40
+#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48
+#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c
+#define MVPP2_MIB_FC_SENT 0x54
+#define MVPP2_MIB_FC_RCVD 0x58
+#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
+#define MVPP2_MIB_UNDERSIZE_RCVD 0x60
+#define MVPP2_MIB_FRAGMENTS_RCVD 0x64
+#define MVPP2_MIB_OVERSIZE_RCVD 0x68
+#define MVPP2_MIB_JABBER_RCVD 0x6c
+#define MVPP2_MIB_MAC_RCV_ERROR 0x70
+#define MVPP2_MIB_BAD_CRC_EVENT 0x74
+#define MVPP2_MIB_COLLISION 0x78
+#define MVPP2_MIB_LATE_COLLISION 0x7c
+
+#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
+
+#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+ (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
+struct mvpp2_tai;
+
+/* Definitions */
+struct mvpp2_dbgfs_entries;
+
+struct mvpp2_rss_table {
+ u32 indir[MVPP22_RSS_TABLE_ENTRIES];
+};
+
+struct mvpp2_buff_hdr {
+ __le32 next_phys_addr;
+ __le32 next_dma_addr;
+ __le16 byte_count;
+ __le16 info;
+ __le16 reserved1; /* bm_qset (for future use, BM) */
+ u8 next_phys_addr_high;
+ u8 next_dma_addr_high;
+ __le16 reserved2;
+ __le16 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+};
+
+/* Shared Packet Processor resources */
+struct mvpp2 {
+ /* Shared registers' base addresses */
+ void __iomem *lms_base;
+ void __iomem *iface_base;
+ void __iomem *cm3_base;
+
+ /* On PPv2.2 and PPv2.3, each "software thread" can access the base
+ * register through a separate address space, each 64 KB apart
+ * from each other. Typically, such address spaces will be
+ * used per CPU.
+ */
+ void __iomem *swth_base[MVPP2_MAX_THREADS];
+
+ /* On PPv2.2 and PPv2.3, some port control registers are located into
+ * the system controller space. These registers are accessible
+ * through a regmap.
+ */
+ struct regmap *sysctrl_base;
+
+ /* Common clocks */
+ struct clk *pp_clk;
+ struct clk *gop_clk;
+ struct clk *mg_clk;
+ struct clk *mg_core_clk;
+ struct clk *axi_clk;
+
+ /* List of pointers to port structures */
+ int port_count;
+ struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
+ /* Map of enabled ports */
+ unsigned long port_map;
+
+ struct mvpp2_tai *tai;
+
+ /* Number of Tx threads used */
+ unsigned int nthreads;
+ /* Map of threads needing locking */
+ unsigned long lock_map;
+
+ /* Aggregated TXQs */
+ struct mvpp2_tx_queue *aggr_txqs;
+
+ /* Are we using page_pool with per-cpu pools? */
+ int percpu_pools;
+
+ /* BM pools */
+ struct mvpp2_bm_pool *bm_pools;
+
+ /* PRS shadow table */
+ struct mvpp2_prs_shadow *prs_shadow;
+ /* PRS auxiliary table for double vlan entries control */
+ bool *prs_double_vlans;
+
+ /* Tclk value */
+ u32 tclk;
+
+ /* HW version */
+ enum { MVPP21, MVPP22, MVPP23 } hw_version;
+
+ /* Maximum number of RXQs per port */
+ unsigned int max_port_rxqs;
+
+ /* Workqueue to gather hardware statistics */
+ char queue_name[30];
+ struct workqueue_struct *stats_queue;
+
+ /* Debugfs root entry */
+ struct dentry *dbgfs_dir;
+
+ /* Debugfs entries private data */
+ struct mvpp2_dbgfs_entries *dbgfs_entries;
+
+ /* RSS Indirection tables */
+ struct mvpp2_rss_table *rss_tables[MVPP22_N_RSS_TABLES];
+
+ /* page_pool allocator */
+ struct page_pool *page_pool[MVPP2_PORT_MAX_RXQ];
+
+ /* Global TX Flow Control config */
+ bool global_tx_fc;
+
+ /* Spinlocks for CM3 shared memory configuration */
+ spinlock_t mss_spinlock;
+};
+
+struct mvpp2_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ /* XDP */
+ u64 xdp_redirect;
+ u64 xdp_pass;
+ u64 xdp_drop;
+ u64 xdp_xmit;
+ u64 xdp_xmit_err;
+ u64 xdp_tx;
+ u64 xdp_tx_err;
+};
+
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+ struct hrtimer tx_done_timer;
+ struct net_device *dev;
+ bool timer_scheduled;
+};
+
+struct mvpp2_queue_vector {
+ int irq;
+ struct napi_struct napi;
+ enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
+ int sw_thread_id;
+ u16 sw_thread_mask;
+ int first_rxq;
+ int nrxqs;
+ u32 pending_cause_rx;
+ struct mvpp2_port *port;
+ struct cpumask *mask;
+};
+
+/* Internal represention of a Flow Steering rule */
+struct mvpp2_rfs_rule {
+ /* Rule location inside the flow*/
+ int loc;
+
+ /* Flow type, such as TCP_V4_FLOW, IP6_FLOW, etc. */
+ int flow_type;
+
+ /* Index of the C2 TCAM entry handling this rule */
+ int c2_index;
+
+ /* Header fields that needs to be extracted to match this flow */
+ u16 hek_fields;
+
+ /* CLS engine : only c2 is supported for now. */
+ u8 engine;
+
+ /* TCAM key and mask for C2-based steering. These fields should be
+ * encapsulated in a union should we add more engines.
+ */
+ u64 c2_tcam;
+ u64 c2_tcam_mask;
+
+ struct flow_rule *flow;
+};
+
+struct mvpp2_ethtool_fs {
+ struct mvpp2_rfs_rule rule;
+ struct ethtool_rxnfc rxnfc;
+};
+
+struct mvpp2_hwtstamp_queue {
+ struct sk_buff *skb[32];
+ u8 next;
+};
+
+struct mvpp2_port {
+ u8 id;
+
+ /* Index of the port from the "group of ports" complex point
+ * of view. This is specific to PPv2.2.
+ */
+ int gop_id;
+
+ int port_irq;
+
+ struct mvpp2 *priv;
+
+ /* Firmware node associated to the port */
+ struct fwnode_handle *fwnode;
+
+ /* Per-port registers' base address */
+ void __iomem *base;
+ void __iomem *stats_base;
+
+ struct mvpp2_rx_queue **rxqs;
+ unsigned int nrxqs;
+ struct mvpp2_tx_queue **txqs;
+ unsigned int ntxqs;
+ struct net_device *dev;
+
+ struct bpf_prog *xdp_prog;
+
+ int pkt_size;
+
+ /* Per-CPU port control */
+ struct mvpp2_port_pcpu __percpu *pcpu;
+
+ /* Protect the BM refills and the Tx paths when a thread is used on more
+ * than a single CPU.
+ */
+ spinlock_t bm_lock[MVPP2_MAX_THREADS];
+ spinlock_t tx_lock[MVPP2_MAX_THREADS];
+
+ /* Flags */
+ unsigned long flags;
+
+ u16 tx_ring_size;
+ u16 rx_ring_size;
+ struct mvpp2_pcpu_stats __percpu *stats;
+ u64 *ethtool_stats;
+
+ unsigned long state;
+
+ /* Per-port work and its lock to gather hardware statistics */
+ struct mutex gather_stats_lock;
+ struct delayed_work stats_work;
+
+ struct device_node *of_node;
+
+ phy_interface_t phy_interface;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs pcs_gmac;
+ struct phylink_pcs pcs_xlg;
+ struct phy *comphy;
+
+ struct mvpp2_bm_pool *pool_long;
+ struct mvpp2_bm_pool *pool_short;
+
+ /* Index of first port's physical RXQ */
+ u8 first_rxq;
+
+ struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
+ unsigned int nqvecs;
+ bool has_tx_irqs;
+
+ u32 tx_time_coal;
+
+ /* List of steering rules active on that port */
+ struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_ENTRIES_PER_FLOW];
+ int n_rfs_rules;
+
+ /* Each port has its own view of the rss contexts, so that it can number
+ * them from 0
+ */
+ int rss_ctx[MVPP22_N_RSS_TABLES];
+
+ bool hwtstamp;
+ bool rx_hwtstamp;
+ enum hwtstamp_tx_types tx_hwtstamp_type;
+ struct mvpp2_hwtstamp_queue tx_hwtstamp_queue[2];
+
+ /* Firmware TX flow control */
+ bool tx_fc;
+};
+
+/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+
+#define MVPP2_TXD_L3_OFF_SHIFT 0
+#define MVPP2_TXD_IP_HLEN_SHIFT 8
+#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
+#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
+#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
+#define MVPP2_TXD_PADDING_DISABLE BIT(23)
+#define MVPP2_TXD_L4_UDP BIT(24)
+#define MVPP2_TXD_L3_IP6 BIT(26)
+#define MVPP2_TXD_L_DESC BIT(28)
+#define MVPP2_TXD_F_DESC BIT(29)
+
+#define MVPP2_RXD_ERR_SUMMARY BIT(15)
+#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
+#define MVPP2_RXD_ERR_CRC 0x0
+#define MVPP2_RXD_ERR_OVERRUN BIT(13)
+#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
+#define MVPP2_RXD_BM_POOL_ID_OFFS 16
+#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
+#define MVPP2_RXD_HWF_SYNC BIT(21)
+#define MVPP2_RXD_L4_CSUM_OK BIT(22)
+#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
+#define MVPP2_RXD_L4_TCP BIT(25)
+#define MVPP2_RXD_L4_UDP BIT(26)
+#define MVPP2_RXD_L3_IP4 BIT(28)
+#define MVPP2_RXD_L3_IP6 BIT(30)
+#define MVPP2_RXD_BUF_HDR BIT(31)
+
+/* HW TX descriptor for PPv2.1 */
+struct mvpp21_tx_desc {
+ __le32 command; /* Options used by HW for packet transmitting.*/
+ u8 packet_offset; /* the offset from the buffer beginning */
+ u8 phys_txq; /* destination queue ID */
+ __le16 data_size; /* data size of transmitted packet in bytes */
+ __le32 buf_dma_addr; /* physical addr of transmitted buffer */
+ __le32 buf_cookie; /* cookie for access to TX buffer in tx path */
+ __le32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
+ __le32 reserved2; /* reserved (for future use) */
+};
+
+/* HW RX descriptor for PPv2.1 */
+struct mvpp21_rx_desc {
+ __le32 status; /* info about received packet */
+ __le16 reserved1; /* parser_info (for future use, PnC) */
+ __le16 data_size; /* size of received packet in bytes */
+ __le32 buf_dma_addr; /* physical address of the buffer */
+ __le32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ __le16 reserved2; /* gem_port_id (for future use, PON) */
+ __le16 reserved3; /* csum_l4 (for future use, PnC) */
+ u8 reserved4; /* bm_qset (for future use, BM) */
+ u8 reserved5;
+ __le16 reserved6; /* classify_info (for future use, PnC) */
+ __le32 reserved7; /* flow_id (for future use, PnC) */
+ __le32 reserved8;
+};
+
+/* HW TX descriptor for PPv2.2 and PPv2.3 */
+struct mvpp22_tx_desc {
+ __le32 command;
+ u8 packet_offset;
+ u8 phys_txq;
+ __le16 data_size;
+ __le32 ptp_descriptor;
+ __le32 reserved2;
+ __le64 buf_dma_addr_ptp;
+ __le64 buf_cookie_misc;
+};
+
+/* HW RX descriptor for PPv2.2 and PPv2.3 */
+struct mvpp22_rx_desc {
+ __le32 status;
+ __le16 reserved1;
+ __le16 data_size;
+ __le32 reserved2;
+ __le32 timestamp;
+ __le64 buf_dma_addr_key_hash;
+ __le64 buf_cookie_misc;
+};
+
+/* Opaque type used by the driver to manipulate the HW TX and RX
+ * descriptors
+ */
+struct mvpp2_tx_desc {
+ union {
+ struct mvpp21_tx_desc pp21;
+ struct mvpp22_tx_desc pp22;
+ };
+};
+
+struct mvpp2_rx_desc {
+ union {
+ struct mvpp21_rx_desc pp21;
+ struct mvpp22_rx_desc pp22;
+ };
+};
+
+enum mvpp2_tx_buf_type {
+ MVPP2_TYPE_SKB,
+ MVPP2_TYPE_XDP_TX,
+ MVPP2_TYPE_XDP_NDO,
+};
+
+struct mvpp2_txq_pcpu_buf {
+ enum mvpp2_tx_buf_type type;
+
+ /* Transmitted SKB */
+ union {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ };
+
+ /* Physical address of transmitted buffer */
+ dma_addr_t dma;
+
+ /* Size transmitted */
+ size_t size;
+};
+
+/* Per-CPU Tx queue control */
+struct mvpp2_txq_pcpu {
+ unsigned int thread;
+
+ /* Number of Tx DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Number of currently used Tx DMA descriptor in the
+ * descriptor ring
+ */
+ int count;
+
+ int wake_threshold;
+ int stop_threshold;
+
+ /* Number of Tx DMA descriptors reserved for each CPU */
+ int reserved_num;
+
+ /* Infos about transmitted buffers */
+ struct mvpp2_txq_pcpu_buf *buffs;
+
+ /* Index of last TX DMA descriptor that was inserted */
+ int txq_put_index;
+
+ /* Index of the TX DMA descriptor to be cleaned up */
+ int txq_get_index;
+
+ /* DMA buffer for TSO headers */
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
+};
+
+struct mvpp2_tx_queue {
+ /* Physical number of this Tx queue */
+ u8 id;
+
+ /* Logical number of this Tx queue */
+ u8 log_id;
+
+ /* Number of Tx DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Number of currently used Tx DMA descriptor in the descriptor ring */
+ int count;
+
+ /* Per-CPU control of physical Tx queues */
+ struct mvpp2_txq_pcpu __percpu *pcpu;
+
+ u32 done_pkts_coal;
+
+ /* Virtual address of thex Tx DMA descriptors array */
+ struct mvpp2_tx_desc *descs;
+
+ /* DMA address of the Tx DMA descriptors array */
+ dma_addr_t descs_dma;
+
+ /* Index of the last Tx DMA descriptor */
+ int last_desc;
+
+ /* Index of the next Tx DMA descriptor to process */
+ int next_desc_to_proc;
+};
+
+struct mvpp2_rx_queue {
+ /* RX queue number, in the range 0-31 for physical RXQs */
+ u8 id;
+
+ /* Num of rx descriptors in the rx descriptor ring */
+ int size;
+
+ u32 pkts_coal;
+ u32 time_coal;
+
+ /* Virtual address of the RX DMA descriptors array */
+ struct mvpp2_rx_desc *descs;
+
+ /* DMA address of the RX DMA descriptors array */
+ dma_addr_t descs_dma;
+
+ /* Index of the last RX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next RX DMA descriptor to process */
+ int next_desc_to_proc;
+
+ /* ID of port to which physical RXQ is mapped */
+ int port;
+
+ /* Port's logic RXQ number to which physical RXQ is mapped */
+ int logic_rxq;
+
+ /* XDP memory accounting */
+ struct xdp_rxq_info xdp_rxq_short;
+ struct xdp_rxq_info xdp_rxq_long;
+};
+
+struct mvpp2_bm_pool {
+ /* Pool number in the range 0-7 */
+ int id;
+
+ /* Buffer Pointers Pool External (BPPE) size */
+ int size;
+ /* BPPE size in bytes */
+ int size_bytes;
+ /* Number of buffers for this pool */
+ int buf_num;
+ /* Pool buffer size */
+ int buf_size;
+ /* Packet size */
+ int pkt_size;
+ int frag_size;
+
+ /* BPPE virtual base address */
+ u32 *virt_addr;
+ /* BPPE DMA base address */
+ dma_addr_t dma_addr;
+
+ /* Ports using BM pool */
+ u32 port_map;
+};
+
+#define IS_TSO_HEADER(txq_pcpu, addr) \
+ ((addr) >= (txq_pcpu)->tso_headers_dma && \
+ (addr) < (txq_pcpu)->tso_headers_dma + \
+ (txq_pcpu)->size * TSO_HEADER_SIZE)
+
+#define MVPP2_DRIVER_NAME "mvpp2"
+#define MVPP2_DRIVER_VERSION "1.0"
+
+void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data);
+u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
+
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp2_dbgfs_exit(void);
+
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
+
+#ifdef CONFIG_MVPP2_PTP
+int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv);
+void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp);
+void mvpp22_tai_start(struct mvpp2_tai *tai);
+void mvpp22_tai_stop(struct mvpp2_tai *tai);
+int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai);
+#else
+static inline int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv)
+{
+ return 0;
+}
+static inline void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp)
+{
+}
+static inline void mvpp22_tai_start(struct mvpp2_tai *tai)
+{
+}
+static inline void mvpp22_tai_stop(struct mvpp2_tai *tai)
+{
+}
+static inline int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai)
+{
+ return -1;
+}
+#endif
+
+static inline bool mvpp22_rx_hwtstamping(struct mvpp2_port *port)
+{
+ return IS_ENABLED(CONFIG_MVPP2_PTP) && port->rx_hwtstamp;
+}
+
+#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
new file mode 100644
index 000000000..40aeaa7bd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -0,0 +1,1748 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RSS and Classifier helpers for Marvell PPv2 Network Controller
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ */
+
+#include "mvpp2.h"
+#include "mvpp2_cls.h"
+#include "mvpp2_prs.h"
+
+#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
+{ \
+ .flow_type = _type, \
+ .flow_id = _id, \
+ .supported_hash_opts = _opts, \
+ .prs_ri = { \
+ .ri = _ri, \
+ .ri_mask = _ri_mask \
+ } \
+}
+
+static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
+ /* TCP over IPv4 flows, Not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv4 flows, Not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv4 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv4 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv4 flows, Not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv4 flows, Not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv4 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv4 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv6 flows, not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv6 flows, not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv6 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv6 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv6 flows, not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv6 flows, not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv6 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv6 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* IPv4 flows, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv4 flows, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv6 flows, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv6 flows, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* Non IP flow, no vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
+ 0,
+ MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK),
+ /* Non IP flow, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
+ MVPP22_CLS_HEK_OPT_VLAN,
+ 0, 0),
+};
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+ return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_flow_entry *fe)
+{
+ fe->index = index;
+ mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
+ fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
+ fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
+ fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
+}
+
+/* Update classification flow table registers */
+static void mvpp2_cls_flow_write(struct mvpp2 *priv,
+ struct mvpp2_cls_flow_entry *fe)
+{
+ mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
+}
+
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+ return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+ struct mvpp2_cls_lookup_entry *le)
+{
+ u32 val;
+
+ val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
+ mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+ le->way = way;
+ le->lkpid = lkpid;
+ le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
+}
+
+/* Update classification lookup table register */
+static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
+ struct mvpp2_cls_lookup_entry *le)
+{
+ u32 val;
+
+ val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
+ mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+ mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
+}
+
+/* Operations on flow entry */
+static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+}
+
+static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
+ int num_of_fields)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
+}
+
+static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
+ int field_index)
+{
+ return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
+ MVPP2_CLS_FLOW_TBL2_FLD_MASK;
+}
+
+static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
+ int field_index, int field_id)
+{
+ fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
+ MVPP2_CLS_FLOW_TBL2_FLD_MASK);
+ fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
+}
+
+static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
+ int engine)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
+}
+
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
+ MVPP2_CLS_FLOW_TBL0_ENG_MASK;
+}
+
+static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
+ bool from_packet)
+{
+ if (from_packet)
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+ else
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+}
+
+static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
+ bool is_last)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
+ fe->data[0] |= !!is_last;
+}
+
+static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
+}
+
+static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
+ u32 port)
+{
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
+}
+
+static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe,
+ u32 port)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
+}
+
+static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
+ u8 lu_type)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
+}
+
+/* Initialize the parser entry for the given flow */
+static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
+ const struct mvpp2_cls_flow *flow)
+{
+ mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
+ flow->prs_ri.ri_mask);
+}
+
+/* Initialize the Lookup Id table entry for the given flow */
+static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
+ const struct mvpp2_cls_flow *flow)
+{
+ struct mvpp2_cls_lookup_entry le;
+
+ le.way = 0;
+ le.lkpid = flow->flow_id;
+
+ /* The default RxQ for this port is set in the C2 lookup */
+ le.data = 0;
+
+ /* We point on the first lookup in the sequence for the flow, that is
+ * the C2 lookup.
+ */
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
+
+ /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
+ le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+ mvpp2_cls_lookup_write(priv, &le);
+}
+
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ u32 val;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+ val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
+ if (c2->valid)
+ val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
+ else
+ val |= MVPP22_CLS_C2_TCAM_INV_BIT;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+ /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ u32 val;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ c2->index = index;
+
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+
+ val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
+ c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
+}
+
+static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
+{
+ switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case ETHER_FLOW:
+ return MVPP22_FLOW_ETHERNET;
+ case TCP_V4_FLOW:
+ return MVPP22_FLOW_TCP4;
+ case TCP_V6_FLOW:
+ return MVPP22_FLOW_TCP6;
+ case UDP_V4_FLOW:
+ return MVPP22_FLOW_UDP4;
+ case UDP_V6_FLOW:
+ return MVPP22_FLOW_UDP6;
+ case IPV4_FLOW:
+ return MVPP22_FLOW_IP4;
+ case IPV6_FLOW:
+ return MVPP22_FLOW_IP6;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc)
+{
+ return MVPP22_CLS_C2_RFS_LOC(port->id, loc);
+}
+
+/* Initialize the flow table entries for the given flow */
+static void mvpp2_cls_flow_init(struct mvpp2 *priv,
+ const struct mvpp2_cls_flow *flow)
+{
+ struct mvpp2_cls_flow_entry fe;
+ int i, pri = 0;
+
+ /* Assign default values to all entries in the flow */
+ for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
+ i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
+ memset(&fe, 0, sizeof(fe));
+ fe.index = i;
+ mvpp2_cls_flow_pri_set(&fe, pri++);
+
+ if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
+ mvpp2_cls_flow_last_set(&fe, 1);
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ /* RSS config C2 lookup */
+ mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
+ &fe);
+
+ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_lu_type_set(&fe, MVPP22_CLS_LU_TYPE_ALL);
+
+ /* Add all ports */
+ for (i = 0; i < MVPP2_MAX_PORTS; i++)
+ mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+ mvpp2_cls_flow_write(priv, &fe);
+
+ /* C3Hx lookups */
+ for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+ mvpp2_cls_flow_read(priv,
+ MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
+ &fe);
+
+ /* Set a default engine. Will be overwritten when setting the
+ * real HEK parameters
+ */
+ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+}
+
+/* Adds a field to the Header Extracted Key generation parameters*/
+static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
+ u32 field_id)
+{
+ int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+ if (nb_fields == MVPP2_FLOW_N_FIELDS)
+ return -EINVAL;
+
+ mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
+
+ mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
+
+ return 0;
+}
+
+static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
+ unsigned long hash_opts)
+{
+ u32 field_id;
+ int i;
+
+ /* Clear old fields */
+ mvpp2_cls_flow_hek_num_set(fe, 0);
+ fe->data[2] = 0;
+
+ for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+ switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ field_id = MVPP22_CLS_FIELD_MAC_DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ field_id = MVPP22_CLS_FIELD_VLAN;
+ break;
+ case MVPP22_CLS_HEK_OPT_VLAN_PRI:
+ field_id = MVPP22_CLS_FIELD_VLAN_PRI;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ field_id = MVPP22_CLS_FIELD_IP4SA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ field_id = MVPP22_CLS_FIELD_IP4DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ field_id = MVPP22_CLS_FIELD_IP6SA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ field_id = MVPP22_CLS_FIELD_IP6DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ field_id = MVPP22_CLS_FIELD_L4SIP;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ field_id = MVPP22_CLS_FIELD_L4DIP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (mvpp2_flow_add_hek_field(fe, field_id))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Returns the size, in bits, of the corresponding HEK field */
+static int mvpp2_cls_hek_field_size(u32 field)
+{
+ switch (field) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ return 48;
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ return 12;
+ case MVPP22_CLS_HEK_OPT_VLAN_PRI:
+ return 3;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ return 32;
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ return 128;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ return 16;
+ default:
+ return -1;
+ }
+}
+
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+{
+ if (flow >= MVPP2_N_PRS_FLOWS)
+ return NULL;
+
+ return &cls_flows[flow];
+}
+
+/* Set the hash generation options for the given traffic flow.
+ * One traffic flow (in the ethtool sense) has multiple classification flows,
+ * to handle specific cases such as fragmentation, or the presence of a
+ * VLAN / DSA Tag.
+ *
+ * Each of these individual flows has different constraints, for example we
+ * can't hash fragmented packets on L4 data (else we would risk having packet
+ * re-ordering), so each classification flows masks the options with their
+ * supported ones.
+ *
+ */
+static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
+ u16 requested_opts)
+{
+ const struct mvpp2_cls_flow *flow;
+ struct mvpp2_cls_flow_entry fe;
+ int i, engine, flow_index;
+ u16 hash_opts;
+
+ for_each_cls_flow_id_with_type(i, flow_type) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return -EINVAL;
+
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts = flow->supported_hash_opts & requested_opts;
+
+ /* Use C3HB engine to access L4 infos. This adds L4 infos to the
+ * hash parameters
+ */
+ if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
+ engine = MVPP22_CLS_ENGINE_C3HB;
+ else
+ engine = MVPP22_CLS_ENGINE_C3HA;
+
+ if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
+ return -EINVAL;
+
+ mvpp2_cls_flow_eng_set(&fe, engine);
+
+ mvpp2_cls_flow_write(port->priv, &fe);
+ }
+
+ return 0;
+}
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
+{
+ u16 hash_opts = 0;
+ int n_fields, i, field;
+
+ n_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+ for (i = 0; i < n_fields; i++) {
+ field = mvpp2_cls_flow_hek_get(fe, i);
+
+ switch (field) {
+ case MVPP22_CLS_FIELD_MAC_DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+ break;
+ case MVPP22_CLS_FIELD_VLAN:
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+ break;
+ case MVPP22_CLS_FIELD_VLAN_PRI:
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
+ break;
+ case MVPP22_CLS_FIELD_L3_PROTO:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+ break;
+ case MVPP22_CLS_FIELD_IP4SA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
+ break;
+ case MVPP22_CLS_FIELD_IP4DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
+ break;
+ case MVPP22_CLS_FIELD_IP6SA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
+ break;
+ case MVPP22_CLS_FIELD_IP6DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
+ break;
+ case MVPP22_CLS_FIELD_L4SIP:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+ break;
+ case MVPP22_CLS_FIELD_L4DIP:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+ break;
+ default:
+ break;
+ }
+ }
+ return hash_opts;
+}
+
+/* Returns the hash opts for this flow. There are several classifier flows
+ * for one traffic flow, this returns an aggregation of all configurations.
+ */
+static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
+{
+ const struct mvpp2_cls_flow *flow;
+ struct mvpp2_cls_flow_entry fe;
+ int i, flow_index;
+ u16 hash_opts = 0;
+
+ for_each_cls_flow_id_with_type(i, flow_type) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts |= mvpp2_flow_get_hek_fields(&fe);
+ }
+
+ return hash_opts;
+}
+
+static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
+{
+ const struct mvpp2_cls_flow *flow;
+ int i;
+
+ for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ break;
+
+ mvpp2_cls_flow_prs_init(priv, flow);
+ mvpp2_cls_flow_lkp_init(priv, flow);
+ mvpp2_cls_flow_init(priv, flow);
+ }
+}
+
+static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql, pmap;
+
+ memset(&c2, 0, sizeof(c2));
+
+ c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
+
+ pmap = BIT(port->id);
+ c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+
+ /* Match on Lookup Type */
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
+ c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL);
+
+ /* Update RSS status after matching this entry */
+ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
+
+ /* Mark packet as "forwarded to software", needed for RSS */
+ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
+
+ /* Configure the default rx queue : Update Queue Low and Queue High, but
+ * don't lock, since the rx queue selection might be overridden by RSS
+ */
+ c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
+ MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
+
+ qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
+ c2.valid = true;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+/* Classifier default initialization */
+void mvpp2_cls_init(struct mvpp2 *priv)
+{
+ struct mvpp2_cls_lookup_entry le;
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_c2_entry c2;
+ int index;
+
+ /* Enable classifier */
+ mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
+
+ /* Clear classifier flow table */
+ memset(&fe.data, 0, sizeof(fe.data));
+ for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
+ fe.index = index;
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ /* Clear classifier lookup table */
+ le.data = 0;
+ for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
+ le.lkpid = index;
+ le.way = 0;
+ mvpp2_cls_lookup_write(priv, &le);
+
+ le.way = 1;
+ mvpp2_cls_lookup_write(priv, &le);
+ }
+
+ /* Clear C2 TCAM engine table */
+ memset(&c2, 0, sizeof(c2));
+ c2.valid = false;
+ for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
+ c2.index = index;
+ mvpp2_cls_c2_write(priv, &c2);
+ }
+
+ /* Disable the FIFO stages in C2 engine, which are only used in BIST
+ * mode
+ */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
+ MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
+
+ mvpp2_cls_port_init_flows(priv);
+}
+
+void mvpp2_cls_port_config(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_lookup_entry le;
+ u32 val;
+
+ /* Set way for the port */
+ val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
+ val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
+ mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
+
+ /* Pick the entry to be accessed in lookup ID decoding table
+ * according to the way and lkpid.
+ */
+ le.lkpid = port->id;
+ le.way = 0;
+ le.data = 0;
+
+ /* Set initial CPU queue for receiving packets */
+ le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
+ le.data |= port->first_rxq;
+
+ /* Disable classification engines */
+ le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+ /* Update lookup ID table entry */
+ mvpp2_cls_lookup_write(port->priv, &le);
+
+ mvpp2_port_c2_cls_init(port);
+}
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
+
+ return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
+}
+
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
+{
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ /* The RxQ number is used to select the RSS table. It that case, we set
+ * it to be the ctx number.
+ */
+ qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
+ c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ /* Reset the default destination RxQ to the port's first rx queue. */
+ qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
+ c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx)
+{
+ return port->rss_ctx[port_rss_ctx];
+}
+
+int mvpp22_port_rss_enable(struct mvpp2_port *port)
+{
+ if (mvpp22_rss_ctx(port, 0) < 0)
+ return -EINVAL;
+
+ mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0));
+
+ return 0;
+}
+
+int mvpp22_port_rss_disable(struct mvpp2_port *port)
+{
+ if (mvpp22_rss_ctx(port, 0) < 0)
+ return -EINVAL;
+
+ mvpp2_rss_port_c2_disable(port);
+
+ return 0;
+}
+
+static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, entry, &c2);
+
+ /* Clear the port map so that the entry doesn't match anymore */
+ c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id)));
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+/* Set CPU queue number for oversize packets */
+void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
+{
+ u32 val;
+
+ mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
+ port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
+
+ mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
+ (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
+
+ val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
+ val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
+ mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
+}
+
+static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ struct flow_action_entry *act;
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql, pmap;
+ int index, ctx;
+
+ if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
+ return -EOPNOTSUPP;
+
+ memset(&c2, 0, sizeof(c2));
+
+ index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
+ if (index < 0)
+ return -EINVAL;
+ c2.index = index;
+
+ act = &rule->flow->action.entries[0];
+
+ rule->c2_index = c2.index;
+
+ c2.tcam[3] = (rule->c2_tcam & 0xffff) |
+ ((rule->c2_tcam_mask & 0xffff) << 16);
+ c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) |
+ (((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
+ c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) |
+ (((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
+ c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) |
+ (((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
+
+ pmap = BIT(port->id);
+ c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+
+ /* Match on Lookup Type */
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
+ c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
+
+ if (act->id == FLOW_ACTION_DROP) {
+ c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
+ } else {
+ /* We want to keep the default color derived from the Header
+ * Parser drop entries, for VLAN and MAC filtering. This will
+ * assign a default color of Green or Red, and we want matches
+ * with a non-drop action to keep that color.
+ */
+ c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
+
+ /* Update RSS status after matching this entry */
+ if (act->queue.ctx)
+ c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ /* Always lock the RSS_EN decision. We might have high prio
+ * rules steering to an RXQ, and a lower one steering to RSS,
+ * we don't want the low prio RSS rule overwriting this flag.
+ */
+ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
+
+ /* Mark packet as "forwarded to software", needed for RSS */
+ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
+
+ c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
+ MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
+
+ if (act->queue.ctx) {
+ /* Get the global ctx number */
+ ctx = mvpp22_rss_ctx(port, act->queue.ctx);
+ if (ctx < 0)
+ return -EINVAL;
+
+ qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+ } else {
+ qh = ((act->queue.index + port->first_rxq) >> 3) &
+ MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = (act->queue.index + port->first_rxq) &
+ MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+ }
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+ }
+
+ c2.valid = true;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+
+ return 0;
+}
+
+static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ return mvpp2_port_c2_tcam_rule_add(port, rule);
+}
+
+static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ const struct mvpp2_cls_flow *flow;
+ struct mvpp2_cls_flow_entry fe;
+ int index, i;
+
+ for_each_cls_flow_id_containing_type(i, rule->flow_type) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
+
+ mvpp2_cls_flow_read(port->priv, index, &fe);
+ mvpp2_cls_flow_port_remove(&fe, BIT(port->id));
+ mvpp2_cls_flow_write(port->priv, &fe);
+ }
+
+ if (rule->c2_index >= 0)
+ mvpp22_port_c2_lookup_disable(port, rule->c2_index);
+
+ return 0;
+}
+
+static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ const struct mvpp2_cls_flow *flow;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_cls_flow_entry fe;
+ int index, ret, i;
+
+ if (rule->engine != MVPP22_CLS_ENGINE_C2)
+ return -EOPNOTSUPP;
+
+ ret = mvpp2_port_c2_rfs_rule_insert(port, rule);
+ if (ret)
+ return ret;
+
+ for_each_cls_flow_id_containing_type(i, rule->flow_type) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields)
+ continue;
+
+ index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
+
+ mvpp2_cls_flow_read(priv, index, &fe);
+ mvpp2_cls_flow_eng_set(&fe, rule->engine);
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_flow_set_hek_fields(&fe, rule->hek_fields);
+ mvpp2_cls_flow_lu_type_set(&fe, rule->loc);
+ mvpp2_cls_flow_port_add(&fe, 0xf);
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ return 0;
+}
+
+static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
+{
+ struct flow_rule *flow = rule->flow;
+ int offs = 0;
+
+ /* The order of insertion in C2 tcam must match the order in which
+ * the fields are found in the header
+ */
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(flow, &match);
+ if (match.mask->vlan_id) {
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN;
+
+ rule->c2_tcam |= ((u64)match.key->vlan_id) << offs;
+ rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs;
+
+ /* Don't update the offset yet */
+ }
+
+ if (match.mask->vlan_priority) {
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
+
+ /* VLAN pri is always at offset 13 relative to the
+ * current offset
+ */
+ rule->c2_tcam |= ((u64)match.key->vlan_priority) <<
+ (offs + 13);
+ rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) <<
+ (offs + 13);
+ }
+
+ if (match.mask->vlan_dei)
+ return -EOPNOTSUPP;
+
+ /* vlan id and prio always seem to take a full 16-bit slot in
+ * the Header Extracted Key.
+ */
+ offs += 16;
+ }
+
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(flow, &match);
+ if (match.mask->src) {
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
+
+ rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
+ rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
+ offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
+ }
+
+ if (match.mask->dst) {
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
+
+ rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
+ rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
+ offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
+ }
+ }
+
+ if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
+{
+ struct flow_rule *flow = rule->flow;
+ struct flow_action_entry *act;
+
+ if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
+ return -EOPNOTSUPP;
+
+ act = &flow->action.entries[0];
+ if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
+ return -EOPNOTSUPP;
+
+ /* When both an RSS context and an queue index are set, the index
+ * is considered as an offset to be added to the indirection table
+ * entries. We don't support this, so reject this rule.
+ */
+ if (act->queue.ctx && act->queue.index)
+ return -EOPNOTSUPP;
+
+ /* For now, only use the C2 engine which has a HEK size limited to 64
+ * bits for TCAM matching.
+ */
+ rule->engine = MVPP22_CLS_ENGINE_C2;
+
+ if (mvpp2_cls_c2_build_match(rule))
+ return -EINVAL;
+
+ return 0;
+}
+
+int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
+ struct ethtool_rxnfc *rxnfc)
+{
+ struct mvpp2_ethtool_fs *efs;
+
+ if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
+ return -EINVAL;
+
+ efs = port->rfs_rules[rxnfc->fs.location];
+ if (!efs)
+ return -ENOENT;
+
+ memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
+
+ return 0;
+}
+
+int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info)
+{
+ struct ethtool_rx_flow_spec_input input = {};
+ struct ethtool_rx_flow_rule *ethtool_rule;
+ struct mvpp2_ethtool_fs *efs, *old_efs;
+ int ret = 0;
+
+ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
+ return -EINVAL;
+
+ efs = kzalloc(sizeof(*efs), GFP_KERNEL);
+ if (!efs)
+ return -ENOMEM;
+
+ input.fs = &info->fs;
+
+ /* We need to manually set the rss_ctx, since this info isn't present
+ * in info->fs
+ */
+ if (info->fs.flow_type & FLOW_RSS)
+ input.rss_ctx = info->rss_context;
+
+ ethtool_rule = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(ethtool_rule)) {
+ ret = PTR_ERR(ethtool_rule);
+ goto clean_rule;
+ }
+
+ efs->rule.flow = ethtool_rule->rule;
+ efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
+ if (efs->rule.flow_type < 0) {
+ ret = efs->rule.flow_type;
+ goto clean_rule;
+ }
+
+ ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
+ if (ret)
+ goto clean_eth_rule;
+
+ efs->rule.loc = info->fs.location;
+
+ /* Replace an already existing rule */
+ if (port->rfs_rules[efs->rule.loc]) {
+ old_efs = port->rfs_rules[efs->rule.loc];
+ ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule);
+ if (ret)
+ goto clean_eth_rule;
+ kfree(old_efs);
+ port->n_rfs_rules--;
+ }
+
+ ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule);
+ if (ret)
+ goto clean_eth_rule;
+
+ ethtool_rx_flow_rule_destroy(ethtool_rule);
+ efs->rule.flow = NULL;
+
+ memcpy(&efs->rxnfc, info, sizeof(*info));
+ port->rfs_rules[efs->rule.loc] = efs;
+ port->n_rfs_rules++;
+
+ return ret;
+
+clean_eth_rule:
+ ethtool_rx_flow_rule_destroy(ethtool_rule);
+clean_rule:
+ kfree(efs);
+ return ret;
+}
+
+int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info)
+{
+ struct mvpp2_ethtool_fs *efs;
+ int ret;
+
+ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
+ return -EINVAL;
+
+ efs = port->rfs_rules[info->fs.location];
+ if (!efs)
+ return -EINVAL;
+
+ /* Remove the rule from the engines. */
+ ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule);
+ if (ret)
+ return ret;
+
+ port->n_rfs_rules--;
+ port->rfs_rules[info->fs.location] = NULL;
+ kfree(efs);
+
+ return 0;
+}
+
+static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
+{
+ int nrxqs, cpu, cpus = num_possible_cpus();
+
+ /* Number of RXQs per CPU */
+ nrxqs = port->nrxqs / cpus;
+
+ /* CPU that will handle this rx queue */
+ cpu = rxq / nrxqs;
+
+ if (!cpu_online(cpu))
+ return port->first_rxq;
+
+ /* Indirection to better distribute the paquets on the CPUs when
+ * configuring the RSS queues.
+ */
+ return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
+}
+
+static void mvpp22_rss_fill_table(struct mvpp2_port *port,
+ struct mvpp2_rss_table *table,
+ u32 rss_ctx)
+{
+ struct mvpp2 *priv = port->priv;
+ int i;
+
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
+ u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
+ MVPP22_RSS_INDEX_TABLE_ENTRY(i);
+ mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+
+ mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
+ mvpp22_rxfh_indir(port, table->indir[i]));
+ }
+}
+
+static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 ctx;
+
+ /* Find the first free RSS table */
+ for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) {
+ if (!priv->rss_tables[ctx])
+ break;
+ }
+
+ if (ctx == MVPP22_N_RSS_TABLES)
+ return -EINVAL;
+
+ priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]),
+ GFP_KERNEL);
+ if (!priv->rss_tables[ctx])
+ return -ENOMEM;
+
+ *rss_ctx = ctx;
+
+ /* Set the table width: replace the whole classifier Rx queue number
+ * with the ones configured in RSS table entries.
+ */
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
+ mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
+
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx));
+
+ return 0;
+}
+
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
+{
+ u32 rss_ctx;
+ int ret, i;
+
+ ret = mvpp22_rss_context_create(port, &rss_ctx);
+ if (ret)
+ return ret;
+
+ /* Find the first available context number in the port, starting from 1.
+ * Context 0 on each port is reserved for the default context.
+ */
+ for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
+ if (port->rss_ctx[i] < 0)
+ break;
+ }
+
+ if (i == MVPP22_N_RSS_TABLES)
+ return -EINVAL;
+
+ port->rss_ctx[i] = rss_ctx;
+ *port_ctx = i;
+
+ return 0;
+}
+
+static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv,
+ int rss_ctx)
+{
+ if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
+ return NULL;
+
+ return priv->rss_tables[rss_ctx];
+}
+
+int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
+{
+ struct mvpp2 *priv = port->priv;
+ struct ethtool_rxnfc *rxnfc;
+ int i, rss_ctx, ret;
+
+ rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+
+ if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
+ return -EINVAL;
+
+ /* Invalidate any active classification rule that use this context */
+ for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+ if (!port->rfs_rules[i])
+ continue;
+
+ rxnfc = &port->rfs_rules[i]->rxnfc;
+ if (!(rxnfc->fs.flow_type & FLOW_RSS) ||
+ rxnfc->rss_context != port_ctx)
+ continue;
+
+ ret = mvpp2_ethtool_cls_rule_del(port, rxnfc);
+ if (ret) {
+ netdev_warn(port->dev,
+ "couldn't remove classification rule %d associated to this context",
+ rxnfc->fs.location);
+ }
+ }
+
+ kfree(priv->rss_tables[rss_ctx]);
+
+ priv->rss_tables[rss_ctx] = NULL;
+ port->rss_ctx[port_ctx] = -1;
+
+ return 0;
+}
+
+int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx,
+ const u32 *indir)
+{
+ int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+ struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
+ rss_ctx);
+
+ if (!rss_table)
+ return -EINVAL;
+
+ memcpy(rss_table->indir, indir,
+ MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
+
+ mvpp22_rss_fill_table(port, rss_table, rss_ctx);
+
+ return 0;
+}
+
+int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
+ u32 *indir)
+{
+ int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+ struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
+ rss_ctx);
+
+ if (!rss_table)
+ return -EINVAL;
+
+ memcpy(indir, rss_table->indir,
+ MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
+
+ return 0;
+}
+
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+ u16 hash_opts = 0;
+ u32 flow_type;
+
+ flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
+
+ switch (flow_type) {
+ case MVPP22_FLOW_TCP4:
+ case MVPP22_FLOW_UDP4:
+ case MVPP22_FLOW_TCP6:
+ case MVPP22_FLOW_UDP6:
+ if (info->data & RXH_L4_B_0_1)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+ if (info->data & RXH_L4_B_2_3)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+ fallthrough;
+ case MVPP22_FLOW_IP4:
+ case MVPP22_FLOW_IP6:
+ if (info->data & RXH_L2DA)
+ hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+ if (info->data & RXH_VLAN)
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+ if (info->data & RXH_L3_PROTO)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+ if (info->data & RXH_IP_SRC)
+ hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
+ MVPP22_CLS_HEK_OPT_IP6SA);
+ if (info->data & RXH_IP_DST)
+ hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
+ MVPP22_CLS_HEK_OPT_IP6DA);
+ break;
+ default: return -EOPNOTSUPP;
+ }
+
+ return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
+}
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+ unsigned long hash_opts;
+ u32 flow_type;
+ int i;
+
+ flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
+
+ hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type);
+ info->data = 0;
+
+ for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+ switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ info->data |= RXH_L2DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ info->data |= RXH_VLAN;
+ break;
+ case MVPP22_CLS_HEK_OPT_L3_PROTO:
+ info->data |= RXH_L3_PROTO;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ info->data |= RXH_IP_SRC;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ info->data |= RXH_IP_DST;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ info->data |= RXH_L4_B_0_1;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ info->data |= RXH_L4_B_2_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+int mvpp22_port_rss_init(struct mvpp2_port *port)
+{
+ struct mvpp2_rss_table *table;
+ u32 context = 0;
+ int i, ret;
+
+ for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
+ port->rss_ctx[i] = -1;
+
+ ret = mvpp22_rss_context_create(port, &context);
+ if (ret)
+ return ret;
+
+ table = mvpp22_rss_table_get(port->priv, context);
+ if (!table)
+ return -EINVAL;
+
+ port->rss_ctx[0] = context;
+
+ /* Configure the first table to evenly distribute the packets across
+ * real Rx Queues. The table entries map a hash to a port Rx Queue.
+ */
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
+ table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
+
+ mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0));
+
+ /* Configure default flows */
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
new file mode 100644
index 000000000..663157dc8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * RSS and Classifier definitions for Marvell PPv2 Network Controller
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ */
+
+#ifndef _MVPP2_CLS_H_
+#define _MVPP2_CLS_H_
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+
+/* Classifier constants */
+#define MVPP2_CLS_FLOWS_TBL_SIZE 512
+#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
+#define MVPP2_CLS_LKP_TBL_SIZE 64
+#define MVPP2_CLS_RX_QUEUES 256
+
+/* Classifier flow constants */
+
+#define MVPP2_FLOW_N_FIELDS 4
+
+enum mvpp2_cls_engine {
+ MVPP22_CLS_ENGINE_C2 = 1,
+ MVPP22_CLS_ENGINE_C3A,
+ MVPP22_CLS_ENGINE_C3B,
+ MVPP22_CLS_ENGINE_C4,
+ MVPP22_CLS_ENGINE_C3HA = 6,
+ MVPP22_CLS_ENGINE_C3HB = 7,
+};
+
+#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0)
+#define MVPP22_CLS_HEK_OPT_VLAN_PRI BIT(1)
+#define MVPP22_CLS_HEK_OPT_VLAN BIT(2)
+#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(3)
+#define MVPP22_CLS_HEK_OPT_IP4SA BIT(4)
+#define MVPP22_CLS_HEK_OPT_IP4DA BIT(5)
+#define MVPP22_CLS_HEK_OPT_IP6SA BIT(6)
+#define MVPP22_CLS_HEK_OPT_IP6DA BIT(7)
+#define MVPP22_CLS_HEK_OPT_L4SIP BIT(8)
+#define MVPP22_CLS_HEK_OPT_L4DIP BIT(9)
+#define MVPP22_CLS_HEK_N_FIELDS 10
+
+#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \
+ MVPP22_CLS_HEK_OPT_L4DIP)
+
+#define MVPP22_CLS_HEK_IP4_2T (MVPP22_CLS_HEK_OPT_IP4SA | \
+ MVPP22_CLS_HEK_OPT_IP4DA)
+
+#define MVPP22_CLS_HEK_IP6_2T (MVPP22_CLS_HEK_OPT_IP6SA | \
+ MVPP22_CLS_HEK_OPT_IP6DA)
+
+/* The fifth tuple in "5T" is the L4_Info field */
+#define MVPP22_CLS_HEK_IP4_5T (MVPP22_CLS_HEK_IP4_2T | \
+ MVPP22_CLS_HEK_L4_OPTS)
+
+#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \
+ MVPP22_CLS_HEK_L4_OPTS)
+
+#define MVPP22_CLS_HEK_TAGGED (MVPP22_CLS_HEK_OPT_VLAN | \
+ MVPP22_CLS_HEK_OPT_VLAN_PRI)
+
+enum mvpp2_cls_field_id {
+ MVPP22_CLS_FIELD_MAC_DA = 0x03,
+ MVPP22_CLS_FIELD_VLAN_PRI = 0x05,
+ MVPP22_CLS_FIELD_VLAN = 0x06,
+ MVPP22_CLS_FIELD_L3_PROTO = 0x0f,
+ MVPP22_CLS_FIELD_IP4SA = 0x10,
+ MVPP22_CLS_FIELD_IP4DA = 0x11,
+ MVPP22_CLS_FIELD_IP6SA = 0x17,
+ MVPP22_CLS_FIELD_IP6DA = 0x1a,
+ MVPP22_CLS_FIELD_L4SIP = 0x1d,
+ MVPP22_CLS_FIELD_L4DIP = 0x1e,
+};
+
+/* Classifier C2 engine constants */
+#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16)
+
+enum mvpp22_cls_c2_action {
+ MVPP22_C2_NO_UPD = 0,
+ MVPP22_C2_NO_UPD_LOCK,
+ MVPP22_C2_UPD,
+ MVPP22_C2_UPD_LOCK,
+};
+
+enum mvpp22_cls_c2_fwd_action {
+ MVPP22_C2_FWD_NO_UPD = 0,
+ MVPP22_C2_FWD_NO_UPD_LOCK,
+ MVPP22_C2_FWD_SW,
+ MVPP22_C2_FWD_SW_LOCK,
+ MVPP22_C2_FWD_HW,
+ MVPP22_C2_FWD_HW_LOCK,
+ MVPP22_C2_FWD_HW_LOW_LAT,
+ MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
+};
+
+enum mvpp22_cls_c2_color_action {
+ MVPP22_C2_COL_NO_UPD = 0,
+ MVPP22_C2_COL_NO_UPD_LOCK,
+ MVPP22_C2_COL_GREEN,
+ MVPP22_C2_COL_GREEN_LOCK,
+ MVPP22_C2_COL_YELLOW,
+ MVPP22_C2_COL_YELLOW_LOCK,
+ MVPP22_C2_COL_RED, /* Drop */
+ MVPP22_C2_COL_RED_LOCK, /* Drop */
+};
+
+#define MVPP2_CLS_C2_TCAM_WORDS 5
+#define MVPP2_CLS_C2_ATTR_WORDS 5
+
+struct mvpp2_cls_c2_entry {
+ u32 index;
+ /* TCAM lookup key */
+ u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
+ /* Actions to perform upon TCAM match */
+ u32 act;
+ /* Attributes relative to the actions to perform */
+ u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
+ /* Entry validity */
+ u8 valid;
+};
+
+#define MVPP22_FLOW_ETHER_BIT BIT(0)
+#define MVPP22_FLOW_IP4_BIT BIT(1)
+#define MVPP22_FLOW_IP6_BIT BIT(2)
+#define MVPP22_FLOW_TCP_BIT BIT(3)
+#define MVPP22_FLOW_UDP_BIT BIT(4)
+
+#define MVPP22_FLOW_TCP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_TCP_BIT)
+#define MVPP22_FLOW_TCP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_TCP_BIT)
+#define MVPP22_FLOW_UDP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_UDP_BIT)
+#define MVPP22_FLOW_UDP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_UDP_BIT)
+#define MVPP22_FLOW_IP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT)
+#define MVPP22_FLOW_IP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT)
+#define MVPP22_FLOW_ETHERNET (MVPP22_FLOW_ETHER_BIT)
+
+/* Classifier C2 engine entries */
+#define MVPP22_CLS_C2_N_ENTRIES 256
+
+/* Number of per-port dedicated entries in the C2 TCAM */
+#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW
+
+/* Each port has one range per flow type + one entry controlling the global RSS
+ * setting and the default rx queue
+ */
+#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1)
+#define MVPP22_CLS_C2_PORT_FIRST(p) ((p) * MVPP22_CLS_C2_PORT_RANGE)
+#define MVPP22_CLS_C2_RSS_ENTRY(p) (MVPP22_CLS_C2_PORT_FIRST((p) + 1) - 1)
+
+#define MVPP22_CLS_C2_PORT_FLOW_FIRST(p) (MVPP22_CLS_C2_PORT_FIRST(p))
+
+#define MVPP22_CLS_C2_RFS_LOC(p, loc) (MVPP22_CLS_C2_PORT_FLOW_FIRST(p) + (loc))
+
+/* Packet flow ID */
+enum mvpp2_prs_flow {
+ MVPP2_FL_START = 8,
+ MVPP2_FL_IP4_TCP_NF_UNTAG = MVPP2_FL_START,
+ MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP2_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */
+ MVPP2_FL_IP4_TAG,
+ MVPP2_FL_IP6_UNTAG,
+ MVPP2_FL_IP6_TAG,
+ MVPP2_FL_NON_IP_UNTAG,
+ MVPP2_FL_NON_IP_TAG,
+ MVPP2_FL_LAST,
+};
+
+/* LU Type defined for all engines, and specified in the flow table */
+#define MVPP2_CLS_LU_TYPE_MASK 0x3f
+
+enum mvpp2_cls_lu_type {
+ /* rule->loc is used as a lu-type for the entries 0 - 62. */
+ MVPP22_CLS_LU_TYPE_ALL = 63,
+};
+
+#define MVPP2_N_FLOWS (MVPP2_FL_LAST - MVPP2_FL_START)
+
+struct mvpp2_cls_flow {
+ /* The L2-L4 traffic flow type */
+ int flow_type;
+
+ /* The first id in the flow table for this flow */
+ u16 flow_id;
+
+ /* The supported HEK fields for this flow */
+ u16 supported_hash_opts;
+
+ /* The Header Parser result_info that matches this flow */
+ struct mvpp2_prs_result_info prs_ri;
+};
+
+#define MVPP2_CLS_FLT_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1 + 16)
+#define MVPP2_CLS_FLT_FIRST(id) (((id) - MVPP2_FL_START) * \
+ MVPP2_CLS_FLT_ENTRIES_PER_FLOW)
+
+#define MVPP2_CLS_FLT_C2_RFS(port, id, rfs_n) (MVPP2_CLS_FLT_FIRST(id) + \
+ ((port) * MVPP2_MAX_PORTS) + \
+ (rfs_n))
+
+#define MVPP2_CLS_FLT_C2_RSS_ENTRY(id) (MVPP2_CLS_FLT_C2_RFS(MVPP2_MAX_PORTS, id, 0))
+#define MVPP2_CLS_FLT_HASH_ENTRY(port, id) (MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + 1 + (port))
+#define MVPP2_CLS_FLT_LAST(id) (MVPP2_CLS_FLT_FIRST(id) + \
+ MVPP2_CLS_FLT_ENTRIES_PER_FLOW - 1)
+
+/* Iterate on each classifier flow id. Sets 'i' to be the index of the first
+ * entry in the cls_flows table for each different flow_id.
+ * This relies on entries having the same flow_id in the cls_flows table being
+ * contiguous.
+ */
+#define for_each_cls_flow_id(i) \
+ for ((i) = 0; (i) < MVPP2_N_PRS_FLOWS; (i)++) \
+ if ((i) > 0 && \
+ cls_flows[(i)].flow_id == cls_flows[(i) - 1].flow_id) \
+ continue; \
+ else
+
+/* Iterate on each classifier flow that has a given flow_type. Sets 'i' to be
+ * the index of the first entry in the cls_flow table for each different flow_id
+ * that has the given flow_type. This allows to operate on all flows that
+ * matches a given ethtool flow type.
+ */
+#define for_each_cls_flow_id_with_type(i, type) \
+ for_each_cls_flow_id((i)) \
+ if (cls_flows[(i)].flow_type != (type)) \
+ continue; \
+ else
+
+#define for_each_cls_flow_id_containing_type(i, type) \
+ for_each_cls_flow_id((i)) \
+ if ((cls_flows[(i)].flow_type & (type)) != (type)) \
+ continue; \
+ else
+
+struct mvpp2_cls_flow_entry {
+ u32 index;
+ u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
+};
+
+struct mvpp2_cls_lookup_entry {
+ u32 lkpid;
+ u32 way;
+ u32 data;
+};
+
+int mvpp22_port_rss_init(struct mvpp2_port *port);
+
+int mvpp22_port_rss_enable(struct mvpp2_port *port);
+int mvpp22_port_rss_disable(struct mvpp2_port *port);
+
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx);
+int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx);
+
+int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
+ const u32 *indir);
+int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx,
+ u32 *indir);
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+
+void mvpp2_cls_init(struct mvpp2 *priv);
+
+void mvpp2_cls_port_config(struct mvpp2_port *port);
+
+void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port);
+
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
+
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_flow_entry *fe);
+
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+ struct mvpp2_cls_lookup_entry *le);
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index);
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2);
+
+int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
+ struct ethtool_rxnfc *rxnfc);
+
+int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info);
+
+int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
new file mode 100644
index 000000000..0f9bc4f8e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2018 Marvell
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+#include "mvpp2_cls.h"
+
+struct mvpp2_dbgfs_prs_entry {
+ int tid;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_c2_entry {
+ int id;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_flow_entry {
+ int flow;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_flow_tbl_entry {
+ int id;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_port_flow_entry {
+ struct mvpp2_port *port;
+ struct mvpp2_dbgfs_flow_entry *dbg_fe;
+};
+
+struct mvpp2_dbgfs_entries {
+ /* Entries for Header Parser debug info */
+ struct mvpp2_dbgfs_prs_entry prs_entries[MVPP2_PRS_TCAM_SRAM_SIZE];
+
+ /* Entries for Classifier C2 engine debug info */
+ struct mvpp2_dbgfs_c2_entry c2_entries[MVPP22_CLS_C2_N_ENTRIES];
+
+ /* Entries for Classifier Flow Table debug info */
+ struct mvpp2_dbgfs_flow_tbl_entry flt_entries[MVPP2_CLS_FLOWS_TBL_SIZE];
+
+ /* Entries for Classifier flows debug info */
+ struct mvpp2_dbgfs_flow_entry flow_entries[MVPP2_N_PRS_FLOWS];
+
+ /* Entries for per-port flows debug info */
+ struct mvpp2_dbgfs_port_flow_entry port_flow_entries[MVPP2_MAX_PORTS];
+};
+
+static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private;
+
+ u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits);
+
+static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+
+ u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
+
+static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ const struct mvpp2_cls_flow *f;
+ const char *flow_name;
+
+ f = mvpp2_cls_flow_get(entry->flow);
+ if (!f)
+ return -EINVAL;
+
+ switch (f->flow_type) {
+ case IPV4_FLOW:
+ flow_name = "ipv4";
+ break;
+ case IPV6_FLOW:
+ flow_name = "ipv6";
+ break;
+ case TCP_V4_FLOW:
+ flow_name = "tcp4";
+ break;
+ case TCP_V6_FLOW:
+ flow_name = "tcp6";
+ break;
+ case UDP_V4_FLOW:
+ flow_name = "udp4";
+ break;
+ case UDP_V6_FLOW:
+ flow_name = "udp6";
+ break;
+ default:
+ flow_name = "other";
+ }
+
+ seq_printf(s, "%s\n", flow_name);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_type);
+
+static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
+{
+ const struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ const struct mvpp2_cls_flow *f;
+
+ f = mvpp2_cls_flow_get(entry->flow);
+ if (!f)
+ return -EINVAL;
+
+ seq_printf(s, "%d\n", f->flow_id);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id);
+
+static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+ struct mvpp2_port *port = entry->port;
+ struct mvpp2_cls_flow_entry fe;
+ const struct mvpp2_cls_flow *f;
+ int flow_index;
+ u16 hash_opts;
+
+ f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+ if (!f)
+ return -EINVAL;
+
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts = mvpp2_flow_get_hek_fields(&fe);
+
+ seq_printf(s, "0x%04x\n", hash_opts);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_hash_opt);
+
+static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+ struct mvpp2_port *port = entry->port;
+ struct mvpp2_cls_flow_entry fe;
+ const struct mvpp2_cls_flow *f;
+ int flow_index, engine;
+
+ f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+ if (!f)
+ return -EINVAL;
+
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ engine = mvpp2_cls_flow_eng_get(&fe);
+
+ seq_printf(s, "%d\n", engine);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
+
+static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
+ u32 hits;
+
+ hits = mvpp2_cls_c2_hit_count(entry->priv, entry->id);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
+
+static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
+
+ mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
+
+ qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
+ MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+
+ ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) &
+ MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ seq_printf(s, "%d\n", (qh << 3 | ql));
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
+
+static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
+ struct mvpp2_cls_c2_entry c2;
+ int enabled;
+
+ mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
+
+ enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
+
+ seq_printf(s, "%d\n", enabled);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable);
+
+static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ unsigned char byte[2], enable[2];
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ u16 rvid;
+ int tid;
+
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ if (!priv->prs_shadow[tid].valid)
+ continue;
+
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+ mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+
+ rvid = ((byte[0] & 0xf) << 8) + byte[1];
+
+ seq_printf(s, "%u\n", rvid);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid);
+
+static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int i;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ mvpp2_prs_init_from_hw(port->priv, &pe, i);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap))
+ seq_printf(s, "%03d\n", i);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser);
+
+static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int index, tid;
+
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC ||
+ priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ /* We only want entries active on this port */
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ /* Read mac addr from entry */
+ for (index = 0; index < ETH_ALEN; index++)
+ mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+ &da_mask[index]);
+
+ seq_printf(s, "%pM\n", da);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter);
+
+static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2 *priv = entry->priv;
+
+ seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu);
+
+static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned int pmap;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ pmap &= MVPP2_PRS_PORT_MASK;
+
+ seq_printf(s, "%02x\n", pmap);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap);
+
+static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned char ai, ai_mask;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
+ ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK;
+
+ seq_printf(s, "%02x %02x\n", ai, ai_mask);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai);
+
+static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned char data[8], mask[8];
+ int i;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ for (i = 0; i < 8; i++)
+ mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]);
+
+ seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata);
+
+static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ seq_printf(s, "%*phN\n", 14, pe.sram);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram);
+
+static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ int val;
+
+ val = mvpp2_prs_hits(entry->priv, entry->tid);
+ if (val < 0)
+ return val;
+
+ seq_printf(s, "%d\n", val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits);
+
+static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2 *priv = entry->priv;
+ int tid = entry->tid;
+
+ seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_valid);
+
+static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
+ struct mvpp2_port *port,
+ struct mvpp2_dbgfs_flow_entry *entry)
+{
+ struct mvpp2_dbgfs_port_flow_entry *port_entry;
+ struct dentry *port_dir;
+
+ port_dir = debugfs_create_dir(port->dev->name, parent);
+
+ port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id];
+
+ port_entry->port = port;
+ port_entry->dbg_fe = entry;
+
+ debugfs_create_file("hash_opts", 0444, port_dir, port_entry,
+ &mvpp2_dbgfs_port_flow_hash_opt_fops);
+
+ debugfs_create_file("engine", 0444, port_dir, port_entry,
+ &mvpp2_dbgfs_port_flow_engine_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int flow)
+{
+ struct mvpp2_dbgfs_flow_entry *entry;
+ struct dentry *flow_entry_dir;
+ char flow_entry_name[10];
+ int i, ret;
+
+ sprintf(flow_entry_name, "%02d", flow);
+
+ flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
+
+ entry = &priv->dbgfs_entries->flow_entries[flow];
+
+ entry->flow = flow;
+ entry->priv = priv;
+
+ debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_dec_hits_fops);
+
+ debugfs_create_file("type", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_type_fops);
+
+ debugfs_create_file("id", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_id_fops);
+
+ /* Create entry for each port */
+ for (i = 0; i < priv->port_count; i++) {
+ ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir,
+ priv->port_list[i], entry);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *flow_dir;
+ int i, ret;
+
+ flow_dir = debugfs_create_dir("flows", parent);
+
+ for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
+ ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int tid)
+{
+ struct mvpp2_dbgfs_prs_entry *entry;
+ struct dentry *prs_entry_dir;
+ char prs_entry_name[10];
+
+ if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE)
+ return -EINVAL;
+
+ sprintf(prs_entry_name, "%03d", tid);
+
+ prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
+
+ entry = &priv->dbgfs_entries->prs_entries[tid];
+
+ entry->tid = tid;
+ entry->priv = priv;
+
+ /* Create each attr */
+ debugfs_create_file("sram", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_sram_fops);
+
+ debugfs_create_file("valid", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_valid_fops);
+
+ debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_lu_fops);
+
+ debugfs_create_file("ai", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_ai_fops);
+
+ debugfs_create_file("header_data", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_hdata_fops);
+
+ debugfs_create_file("hits", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_hits_fops);
+
+ debugfs_create_file("pmap", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_pmap_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *prs_dir;
+ int i, ret;
+
+ prs_dir = debugfs_create_dir("parser", parent);
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int id)
+{
+ struct mvpp2_dbgfs_c2_entry *entry;
+ struct dentry *c2_entry_dir;
+ char c2_entry_name[10];
+
+ if (id >= MVPP22_CLS_C2_N_ENTRIES)
+ return -EINVAL;
+
+ sprintf(c2_entry_name, "%03d", id);
+
+ c2_entry_dir = debugfs_create_dir(c2_entry_name, parent);
+
+ entry = &priv->dbgfs_entries->c2_entries[id];
+
+ entry->id = id;
+ entry->priv = priv;
+
+ debugfs_create_file("hits", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_hits_fops);
+
+ debugfs_create_file("default_rxq", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+ debugfs_create_file("rss_enable", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_enable_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int id)
+{
+ struct mvpp2_dbgfs_flow_tbl_entry *entry;
+ struct dentry *flow_tbl_entry_dir;
+ char flow_tbl_entry_name[10];
+
+ if (id >= MVPP2_CLS_FLOWS_TBL_SIZE)
+ return -EINVAL;
+
+ sprintf(flow_tbl_entry_name, "%03d", id);
+
+ flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent);
+
+ entry = &priv->dbgfs_entries->flt_entries[id];
+
+ entry->id = id;
+ entry->priv = priv;
+
+ debugfs_create_file("hits", 0444, flow_tbl_entry_dir, entry,
+ &mvpp2_dbgfs_flow_flt_hits_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *cls_dir, *c2_dir, *flow_tbl_dir;
+ int i, ret;
+
+ cls_dir = debugfs_create_dir("classifier", parent);
+
+ c2_dir = debugfs_create_dir("c2", cls_dir);
+
+ for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) {
+ ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir);
+
+ for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) {
+ ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_init(struct dentry *parent,
+ struct mvpp2_port *port)
+{
+ struct dentry *port_dir;
+
+ port_dir = debugfs_create_dir(port->dev->name, parent);
+
+ debugfs_create_file("parser_entries", 0444, port_dir, port,
+ &mvpp2_dbgfs_port_parser_fops);
+
+ debugfs_create_file("mac_filter", 0444, port_dir, port,
+ &mvpp2_dbgfs_filter_fops);
+
+ debugfs_create_file("vid_filter", 0444, port_dir, port,
+ &mvpp2_dbgfs_port_vid_fops);
+
+ return 0;
+}
+
+static struct dentry *mvpp2_root;
+
+void mvpp2_dbgfs_exit(void)
+{
+ debugfs_remove(mvpp2_root);
+}
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
+{
+ debugfs_remove_recursive(priv->dbgfs_dir);
+
+ kfree(priv->dbgfs_entries);
+}
+
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
+{
+ struct dentry *mvpp2_dir;
+ int ret, i;
+
+ if (!mvpp2_root)
+ mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
+
+ mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
+
+ priv->dbgfs_dir = mvpp2_dir;
+ priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL);
+ if (!priv->dbgfs_entries)
+ goto err;
+
+ ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ ret = mvpp2_dbgfs_cls_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < priv->port_count; i++) {
+ ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ return;
+err:
+ mvpp2_dbgfs_cleanup(priv);
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
new file mode 100644
index 000000000..aca17082b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -0,0 +1,7785 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/phy/phy.h>
+#include <linux/ptp_classify.h>
+#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/regmap.h>
+#include <uapi/linux/ppp_defs.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
+#include <net/tso.h>
+#include <linux/bpf_trace.h>
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+#include "mvpp2_cls.h"
+
+enum mvpp2_bm_pool_log_num {
+ MVPP2_BM_SHORT,
+ MVPP2_BM_LONG,
+ MVPP2_BM_JUMBO,
+ MVPP2_BM_POOLS_NUM
+};
+
+static struct {
+ int pkt_size;
+ int buf_num;
+} mvpp2_pools[MVPP2_BM_POOLS_NUM];
+
+/* The prototype is added here to be used in start_dev when using ACPI. This
+ * will be removed once phylink is used for all modes (dt+ACPI).
+ */
+static void mvpp2_acpi_start(struct mvpp2_port *port);
+
+/* Queue modes */
+#define MVPP2_QDIST_SINGLE_MODE 0
+#define MVPP2_QDIST_MULTI_MODE 1
+
+static int queue_mode = MVPP2_QDIST_MULTI_MODE;
+
+module_param(queue_mode, int, 0444);
+MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
+
+/* Utility/helper methods */
+
+void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->swth_base[0] + offset);
+}
+
+u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->swth_base[0] + offset);
+}
+
+static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
+{
+ return readl_relaxed(priv->swth_base[0] + offset);
+}
+
+static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
+{
+ return cpu % priv->nthreads;
+}
+
+static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->cm3_base + offset);
+}
+
+static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->cm3_base + offset);
+}
+
+static struct page_pool *
+mvpp2_create_page_pool(struct device *dev, int num, int len,
+ enum dma_data_direction dma_dir)
+{
+ struct page_pool_params pp_params = {
+ /* internal DMA mapping in page_pool */
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = num,
+ .nid = NUMA_NO_NODE,
+ .dev = dev,
+ .dma_dir = dma_dir,
+ .offset = MVPP2_SKB_HEADROOM,
+ .max_len = len,
+ };
+
+ return page_pool_create(&pp_params);
+}
+
+/* These accessors should be used to access:
+ *
+ * - per-thread registers, where each thread has its own copy of the
+ * register.
+ *
+ * MVPP2_BM_VIRT_ALLOC_REG
+ * MVPP2_BM_ADDR_HIGH_ALLOC
+ * MVPP22_BM_ADDR_HIGH_RLS_REG
+ * MVPP2_BM_VIRT_RLS_REG
+ * MVPP2_ISR_RX_TX_CAUSE_REG
+ * MVPP2_ISR_RX_TX_MASK_REG
+ * MVPP2_TXQ_NUM_REG
+ * MVPP2_AGGR_TXQ_UPDATE_REG
+ * MVPP2_TXQ_RSVD_REQ_REG
+ * MVPP2_TXQ_RSVD_RSLT_REG
+ * MVPP2_TXQ_SENT_REG
+ * MVPP2_RXQ_NUM_REG
+ *
+ * - global registers that must be accessed through a specific thread
+ * window, because they are related to an access to a per-thread
+ * register
+ *
+ * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
+ * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
+ * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ */
+static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
+ u32 offset, u32 data)
+{
+ writel(data, priv->swth_base[thread] + offset);
+}
+
+static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
+ u32 offset)
+{
+ return readl(priv->swth_base[thread] + offset);
+}
+
+static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
+ u32 offset, u32 data)
+{
+ writel_relaxed(data, priv->swth_base[thread] + offset);
+}
+
+static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
+ u32 offset)
+{
+ return readl_relaxed(priv->swth_base[thread] + offset);
+}
+
+static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
+ else
+ return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
+ MVPP2_DESC_DMA_MASK;
+}
+
+static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ dma_addr_t dma_addr)
+{
+ dma_addr_t addr, offset;
+
+ addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
+ offset = dma_addr & MVPP2_TX_DESC_ALIGN;
+
+ if (port->priv->hw_version == MVPP21) {
+ tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
+ tx_desc->pp21.packet_offset = offset;
+ } else {
+ __le64 val = cpu_to_le64(addr);
+
+ tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
+ tx_desc->pp22.buf_dma_addr_ptp |= val;
+ tx_desc->pp22.packet_offset = offset;
+ }
+}
+
+static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return le16_to_cpu(tx_desc->pp21.data_size);
+ else
+ return le16_to_cpu(tx_desc->pp22.data_size);
+}
+
+static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ size_t size)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.data_size = cpu_to_le16(size);
+ else
+ tx_desc->pp22.data_size = cpu_to_le16(size);
+}
+
+static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int txq)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.phys_txq = txq;
+ else
+ tx_desc->pp22.phys_txq = txq;
+}
+
+static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int command)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.command = cpu_to_le32(command);
+ else
+ tx_desc->pp22.command = cpu_to_le32(command);
+}
+
+static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.packet_offset;
+ else
+ return tx_desc->pp22.packet_offset;
+}
+
+static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
+ else
+ return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
+ MVPP2_DESC_DMA_MASK;
+}
+
+static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return le32_to_cpu(rx_desc->pp21.buf_cookie);
+ else
+ return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
+ MVPP2_DESC_DMA_MASK;
+}
+
+static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return le16_to_cpu(rx_desc->pp21.data_size);
+ else
+ return le16_to_cpu(rx_desc->pp22.data_size);
+}
+
+static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return le32_to_cpu(rx_desc->pp21.status);
+ else
+ return le32_to_cpu(rx_desc->pp22.status);
+}
+
+static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
+{
+ txq_pcpu->txq_get_index++;
+ if (txq_pcpu->txq_get_index == txq_pcpu->size)
+ txq_pcpu->txq_get_index = 0;
+}
+
+static void mvpp2_txq_inc_put(struct mvpp2_port *port,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ void *data,
+ struct mvpp2_tx_desc *tx_desc,
+ enum mvpp2_tx_buf_type buf_type)
+{
+ struct mvpp2_txq_pcpu_buf *tx_buf =
+ txq_pcpu->buffs + txq_pcpu->txq_put_index;
+ tx_buf->type = buf_type;
+ if (buf_type == MVPP2_TYPE_SKB)
+ tx_buf->skb = data;
+ else
+ tx_buf->xdpf = data;
+ tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
+ tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
+ mvpp2_txdesc_offset_get(port, tx_desc);
+ txq_pcpu->txq_put_index++;
+ if (txq_pcpu->txq_put_index == txq_pcpu->size)
+ txq_pcpu->txq_put_index = 0;
+}
+
+/* Get number of maximum RXQ */
+static int mvpp2_get_nrxqs(struct mvpp2 *priv)
+{
+ unsigned int nrxqs;
+
+ if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ return 1;
+
+ /* According to the PPv2.2 datasheet and our experiments on
+ * PPv2.1, RX queues have an allocation granularity of 4 (when
+ * more than a single one on PPv2.2).
+ * Round up to nearest multiple of 4.
+ */
+ nrxqs = (num_possible_cpus() + 3) & ~0x3;
+ if (nrxqs > MVPP2_PORT_MAX_RXQ)
+ nrxqs = MVPP2_PORT_MAX_RXQ;
+
+ return nrxqs;
+}
+
+/* Get number of physical egress port */
+static inline int mvpp2_egress_port(struct mvpp2_port *port)
+{
+ return MVPP2_MAX_TCONT + port->id;
+}
+
+/* Get number of physical TXQ */
+static inline int mvpp2_txq_phys(int port, int txq)
+{
+ return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
+}
+
+/* Returns a struct page if page_pool is set, otherwise a buffer */
+static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
+ struct page_pool *page_pool)
+{
+ if (page_pool)
+ return page_pool_dev_alloc_pages(page_pool);
+
+ if (likely(pool->frag_size <= PAGE_SIZE))
+ return netdev_alloc_frag(pool->frag_size);
+
+ return kmalloc(pool->frag_size, GFP_ATOMIC);
+}
+
+static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
+ struct page_pool *page_pool, void *data)
+{
+ if (page_pool)
+ page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
+ else if (likely(pool->frag_size <= PAGE_SIZE))
+ skb_free_frag(data);
+ else
+ kfree(data);
+}
+
+/* Buffer Manager configuration routines */
+
+/* Create pool */
+static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool, int size)
+{
+ u32 val;
+
+ /* Number of buffer pointers must be a multiple of 16, as per
+ * hardware constraints
+ */
+ if (!IS_ALIGNED(size, 16))
+ return -EINVAL;
+
+ /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
+ * bytes per buffer pointer
+ */
+ if (priv->hw_version == MVPP21)
+ bm_pool->size_bytes = 2 * sizeof(u32) * size;
+ else
+ bm_pool->size_bytes = 2 * sizeof(u64) * size;
+
+ bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
+ &bm_pool->dma_addr,
+ GFP_KERNEL);
+ if (!bm_pool->virt_addr)
+ return -ENOMEM;
+
+ if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
+ MVPP2_BM_POOL_PTR_ALIGN)) {
+ dma_free_coherent(dev, bm_pool->size_bytes,
+ bm_pool->virt_addr, bm_pool->dma_addr);
+ dev_err(dev, "BM pool %d is not %d bytes aligned\n",
+ bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
+ return -ENOMEM;
+ }
+
+ mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
+ lower_32_bits(bm_pool->dma_addr));
+ mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
+
+ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+ val |= MVPP2_BM_START_MASK;
+
+ val &= ~MVPP2_BM_LOW_THRESH_MASK;
+ val &= ~MVPP2_BM_HIGH_THRESH_MASK;
+
+ /* Set 8 Pools BPPI threshold for MVPP23 */
+ if (priv->hw_version == MVPP23) {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
+ } else {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
+ }
+
+ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+ bm_pool->size = size;
+ bm_pool->pkt_size = 0;
+ bm_pool->buf_num = 0;
+
+ return 0;
+}
+
+/* Set pool buffer size */
+static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool,
+ int buf_size)
+{
+ u32 val;
+
+ bm_pool->buf_size = buf_size;
+
+ val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
+ mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
+}
+
+static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool,
+ dma_addr_t *dma_addr,
+ phys_addr_t *phys_addr)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
+
+ *dma_addr = mvpp2_thread_read(priv, thread,
+ MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+ *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
+
+ if (priv->hw_version >= MVPP22) {
+ u32 val;
+ u32 dma_addr_highbits, phys_addr_highbits;
+
+ val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
+ dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
+ phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
+ MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
+
+ if (sizeof(dma_addr_t) == 8)
+ *dma_addr |= (u64)dma_addr_highbits << 32;
+
+ if (sizeof(phys_addr_t) == 8)
+ *phys_addr |= (u64)phys_addr_highbits << 32;
+ }
+
+ put_cpu();
+}
+
+/* Free all buffers from the pool */
+static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool, int buf_num)
+{
+ struct page_pool *pp = NULL;
+ int i;
+
+ if (buf_num > bm_pool->buf_num) {
+ WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
+ bm_pool->id, buf_num);
+ buf_num = bm_pool->buf_num;
+ }
+
+ if (priv->percpu_pools)
+ pp = priv->page_pool[bm_pool->id];
+
+ for (i = 0; i < buf_num; i++) {
+ dma_addr_t buf_dma_addr;
+ phys_addr_t buf_phys_addr;
+ void *data;
+
+ mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
+ &buf_dma_addr, &buf_phys_addr);
+
+ if (!pp)
+ dma_unmap_single(dev, buf_dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+
+ data = (void *)phys_to_virt(buf_phys_addr);
+ if (!data)
+ break;
+
+ mvpp2_frag_free(bm_pool, pp, data);
+ }
+
+ /* Update BM driver with number of buffers removed from pool */
+ bm_pool->buf_num -= i;
+}
+
+/* Check number of buffers in BM pool */
+static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
+{
+ int buf_num = 0;
+
+ buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
+ MVPP22_BM_POOL_PTRS_NUM_MASK;
+ buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
+ MVPP2_BM_BPPI_PTR_NUM_MASK;
+
+ /* HW has one buffer ready which is not reflected in the counters */
+ if (buf_num)
+ buf_num += 1;
+
+ return buf_num;
+}
+
+/* Cleanup pool */
+static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool)
+{
+ int buf_num;
+ u32 val;
+
+ buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
+ mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
+
+ /* Check buffer counters after free */
+ buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
+ if (buf_num) {
+ WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
+ bm_pool->id, bm_pool->buf_num);
+ return 0;
+ }
+
+ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+ val |= MVPP2_BM_STOP_MASK;
+ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+ if (priv->percpu_pools) {
+ page_pool_destroy(priv->page_pool[bm_pool->id]);
+ priv->page_pool[bm_pool->id] = NULL;
+ }
+
+ dma_free_coherent(dev, bm_pool->size_bytes,
+ bm_pool->virt_addr,
+ bm_pool->dma_addr);
+ return 0;
+}
+
+static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
+{
+ int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
+ struct mvpp2_bm_pool *bm_pool;
+
+ if (priv->percpu_pools)
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+ /* Create all pools with maximum size */
+ size = MVPP2_BM_POOL_SIZE_MAX;
+ for (i = 0; i < poolnum; i++) {
+ bm_pool = &priv->bm_pools[i];
+ bm_pool->id = i;
+ err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
+ if (err)
+ goto err_unroll_pools;
+ mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
+ }
+ return 0;
+
+err_unroll_pools:
+ dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
+ for (i = i - 1; i >= 0; i--)
+ mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
+ return err;
+}
+
+/* Routine enable PPv23 8 pool mode */
+static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
+{
+ int val;
+
+ val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
+ val |= MVPP23_BM_8POOL_MODE;
+ mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
+}
+
+/* Cleanup pool before actual initialization in the OS */
+static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
+ u32 val;
+ int i;
+
+ /* Drain the BM from all possible residues left by firmware */
+ for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
+ mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
+
+ put_cpu();
+
+ /* Stop the BM pool */
+ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
+ val |= MVPP2_BM_STOP_MASK;
+ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
+}
+
+static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
+{
+ enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
+ int i, err, poolnum = MVPP2_BM_POOLS_NUM;
+ struct mvpp2_port *port;
+
+ if (priv->percpu_pools)
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+ /* Clean up the pool state in case it contains stale state */
+ for (i = 0; i < poolnum; i++)
+ mvpp2_bm_pool_cleanup(priv, i);
+
+ if (priv->percpu_pools) {
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ if (port->xdp_prog) {
+ dma_dir = DMA_BIDIRECTIONAL;
+ break;
+ }
+ }
+
+ for (i = 0; i < poolnum; i++) {
+ /* the pool in use */
+ int pn = i / (poolnum / 2);
+
+ priv->page_pool[i] =
+ mvpp2_create_page_pool(dev,
+ mvpp2_pools[pn].buf_num,
+ mvpp2_pools[pn].pkt_size,
+ dma_dir);
+ if (IS_ERR(priv->page_pool[i])) {
+ int j;
+
+ for (j = 0; j < i; j++) {
+ page_pool_destroy(priv->page_pool[j]);
+ priv->page_pool[j] = NULL;
+ }
+ return PTR_ERR(priv->page_pool[i]);
+ }
+ }
+ }
+
+ dev_info(dev, "using %d %s buffers\n", poolnum,
+ priv->percpu_pools ? "per-cpu" : "shared");
+
+ for (i = 0; i < poolnum; i++) {
+ /* Mask BM all interrupts */
+ mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
+ /* Clear BM cause register */
+ mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
+ }
+
+ /* Allocate and initialize BM pools */
+ priv->bm_pools = devm_kcalloc(dev, poolnum,
+ sizeof(*priv->bm_pools), GFP_KERNEL);
+ if (!priv->bm_pools)
+ return -ENOMEM;
+
+ if (priv->hw_version == MVPP23)
+ mvpp23_bm_set_8pool_mode(priv);
+
+ err = mvpp2_bm_pools_init(dev, priv);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static void mvpp2_setup_bm_pool(void)
+{
+ /* Short pool */
+ mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
+
+ /* Long pool */
+ mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
+
+ /* Jumbo pool */
+ mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
+}
+
+/* Attach long pool to rxq */
+static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
+ int lrxq, int long_pool)
+{
+ u32 val, mask;
+ int prxq;
+
+ /* Get queue physical ID */
+ prxq = port->rxqs[lrxq]->id;
+
+ if (port->priv->hw_version == MVPP21)
+ mask = MVPP21_RXQ_POOL_LONG_MASK;
+ else
+ mask = MVPP22_RXQ_POOL_LONG_MASK;
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~mask;
+ val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Attach short pool to rxq */
+static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
+ int lrxq, int short_pool)
+{
+ u32 val, mask;
+ int prxq;
+
+ /* Get queue physical ID */
+ prxq = port->rxqs[lrxq]->id;
+
+ if (port->priv->hw_version == MVPP21)
+ mask = MVPP21_RXQ_POOL_SHORT_MASK;
+ else
+ mask = MVPP22_RXQ_POOL_SHORT_MASK;
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~mask;
+ val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+static void *mvpp2_buf_alloc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool,
+ struct page_pool *page_pool,
+ dma_addr_t *buf_dma_addr,
+ phys_addr_t *buf_phys_addr,
+ gfp_t gfp_mask)
+{
+ dma_addr_t dma_addr;
+ struct page *page;
+ void *data;
+
+ data = mvpp2_frag_alloc(bm_pool, page_pool);
+ if (!data)
+ return NULL;
+
+ if (page_pool) {
+ page = (struct page *)data;
+ dma_addr = page_pool_get_dma_addr(page);
+ data = page_to_virt(page);
+ } else {
+ dma_addr = dma_map_single(port->dev->dev.parent, data,
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
+ mvpp2_frag_free(bm_pool, NULL, data);
+ return NULL;
+ }
+ }
+ *buf_dma_addr = dma_addr;
+ *buf_phys_addr = virt_to_phys(data);
+
+ return data;
+}
+
+/* Routine enable flow control for RXQs condition */
+static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, host_id, q;
+ int fq = port->first_rxq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control was enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Set same Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set stop and start Flow control RXQ thresholds */
+ val = MSS_THRESHOLD_START;
+ val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+ /* Set RXQ port ID */
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ /* Calculate RXQ host ID:
+ * In Single queue mode: Host ID equal to Host ID used for
+ * shared RX interrupt
+ * In Multi queue mode: Host ID equal to number of
+ * RXQ ID / number of CoS queues
+ * In Single resource mode: Host ID always equal to 0
+ */
+ if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ host_id = port->nqvecs;
+ else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
+ host_id = q;
+ else
+ host_id = 0;
+
+ /* Set RXQ host ID */
+ val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable flow control for RXQs condition */
+static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, q;
+ unsigned long flags;
+ int fq = port->first_rxq;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control was enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Disable Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set threshold 0 to disable Flow control */
+ val = 0;
+ val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable/enable flow control for BM pool condition */
+static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *pool,
+ bool en)
+{
+ int val, cm3_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control were enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Check if BM pool should be enabled/disable */
+ if (en) {
+ /* Set BM pool start and stop thresholds per port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val |= MSS_BUF_POOL_PORT_OFFS(port->id);
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
+ val &= ~MSS_BUF_POOL_STOP_MASK;
+ val |= MSS_THRESHOLD_STOP;
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ } else {
+ /* Remove BM pool from the port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
+
+ /* Zero BM pool start and stop thresholds to disable pool
+ * flow control if pool empty (not used by any port)
+ */
+ if (!pool->buf_num) {
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val &= ~MSS_BUF_POOL_STOP_MASK;
+ }
+
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* disable/enable flow control for BM pool on all ports */
+static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
+{
+ struct mvpp2_port *port;
+ int i;
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ if (port->priv->percpu_pools) {
+ for (i = 0; i < port->nrxqs; i++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
+ port->tx_fc & en);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
+ }
+ }
+}
+
+static int mvpp2_enable_global_fc(struct mvpp2 *priv)
+{
+ int val, timeout = 0;
+
+ /* Enable global flow control. In this stage global
+ * flow control enabled, but still disabled per port.
+ */
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ /* Check if Firmware running and disable FC if not*/
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ while (timeout < MSS_FC_MAX_TIMEOUT) {
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+
+ if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
+ return 0;
+ usleep_range(10, 20);
+ timeout++;
+ }
+
+ priv->global_tx_fc = false;
+ return -EOPNOTSUPP;
+}
+
+/* Release buffer to BM */
+static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
+ dma_addr_t buf_dma_addr,
+ phys_addr_t buf_phys_addr)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ unsigned long flags = 0;
+
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_lock_irqsave(&port->bm_lock[thread], flags);
+
+ if (port->priv->hw_version >= MVPP22) {
+ u32 val = 0;
+
+ if (sizeof(dma_addr_t) == 8)
+ val |= upper_32_bits(buf_dma_addr) &
+ MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
+
+ if (sizeof(phys_addr_t) == 8)
+ val |= (upper_32_bits(buf_phys_addr)
+ << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
+ MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
+
+ mvpp2_thread_write_relaxed(port->priv, thread,
+ MVPP22_BM_ADDR_HIGH_RLS_REG, val);
+ }
+
+ /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
+ * returned in the "cookie" field of the RX
+ * descriptor. Instead of storing the virtual address, we
+ * store the physical address
+ */
+ mvpp2_thread_write_relaxed(port->priv, thread,
+ MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
+ mvpp2_thread_write_relaxed(port->priv, thread,
+ MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_unlock_irqrestore(&port->bm_lock[thread], flags);
+
+ put_cpu();
+}
+
+/* Allocate buffers for the pool */
+static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool, int buf_num)
+{
+ int i, buf_size, total_size;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
+ struct page_pool *pp = NULL;
+ void *buf;
+
+ if (port->priv->percpu_pools &&
+ bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
+ netdev_err(port->dev,
+ "attempted to use jumbo frames with per-cpu pools");
+ return 0;
+ }
+
+ buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
+ total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
+
+ if (buf_num < 0 ||
+ (buf_num + bm_pool->buf_num > bm_pool->size)) {
+ netdev_err(port->dev,
+ "cannot allocate %d buffers for pool %d\n",
+ buf_num, bm_pool->id);
+ return 0;
+ }
+
+ if (port->priv->percpu_pools)
+ pp = port->priv->page_pool[bm_pool->id];
+ for (i = 0; i < buf_num; i++) {
+ buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
+ &phys_addr, GFP_KERNEL);
+ if (!buf)
+ break;
+
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
+ phys_addr);
+ }
+
+ /* Update BM driver with number of buffers added to pool */
+ bm_pool->buf_num += i;
+
+ netdev_dbg(port->dev,
+ "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
+ bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
+
+ netdev_dbg(port->dev,
+ "pool %d: %d of %d buffers added\n",
+ bm_pool->id, i, buf_num);
+ return i;
+}
+
+/* Notify the driver that BM pool is being used as specific type and return the
+ * pool pointer on success
+ */
+static struct mvpp2_bm_pool *
+mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
+{
+ struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
+ int num;
+
+ if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
+ (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
+ netdev_err(port->dev, "Invalid pool %d\n", pool);
+ return NULL;
+ }
+
+ /* Allocate buffers in case BM pool is used as long pool, but packet
+ * size doesn't match MTU or BM pool hasn't being used yet
+ */
+ if (new_pool->pkt_size == 0) {
+ int pkts_num;
+
+ /* Set default buffer number or free all the buffers in case
+ * the pool is not empty
+ */
+ pkts_num = new_pool->buf_num;
+ if (pkts_num == 0) {
+ if (port->priv->percpu_pools) {
+ if (pool < port->nrxqs)
+ pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
+ else
+ pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
+ } else {
+ pkts_num = mvpp2_pools[pool].buf_num;
+ }
+ } else {
+ mvpp2_bm_bufs_free(port->dev->dev.parent,
+ port->priv, new_pool, pkts_num);
+ }
+
+ new_pool->pkt_size = pkt_size;
+ new_pool->frag_size =
+ SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
+ MVPP2_SKB_SHINFO_SIZE;
+
+ /* Allocate buffers for this pool */
+ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
+ if (num != pkts_num) {
+ WARN(1, "pool %d: %d of %d allocated\n",
+ new_pool->id, num, pkts_num);
+ return NULL;
+ }
+ }
+
+ mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
+ MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+
+ return new_pool;
+}
+
+static struct mvpp2_bm_pool *
+mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
+ unsigned int pool, int pkt_size)
+{
+ struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
+ int num;
+
+ if (pool > port->nrxqs * 2) {
+ netdev_err(port->dev, "Invalid pool %d\n", pool);
+ return NULL;
+ }
+
+ /* Allocate buffers in case BM pool is used as long pool, but packet
+ * size doesn't match MTU or BM pool hasn't being used yet
+ */
+ if (new_pool->pkt_size == 0) {
+ int pkts_num;
+
+ /* Set default buffer number or free all the buffers in case
+ * the pool is not empty
+ */
+ pkts_num = new_pool->buf_num;
+ if (pkts_num == 0)
+ pkts_num = mvpp2_pools[type].buf_num;
+ else
+ mvpp2_bm_bufs_free(port->dev->dev.parent,
+ port->priv, new_pool, pkts_num);
+
+ new_pool->pkt_size = pkt_size;
+ new_pool->frag_size =
+ SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
+ MVPP2_SKB_SHINFO_SIZE;
+
+ /* Allocate buffers for this pool */
+ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
+ if (num != pkts_num) {
+ WARN(1, "pool %d: %d of %d allocated\n",
+ new_pool->id, num, pkts_num);
+ return NULL;
+ }
+ }
+
+ mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
+ MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+
+ return new_pool;
+}
+
+/* Initialize pools for swf, shared buffers variant */
+static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
+{
+ enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
+ int rxq;
+
+ /* If port pkt_size is higher than 1518B:
+ * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
+ * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
+ */
+ if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
+ long_log_pool = MVPP2_BM_JUMBO;
+ short_log_pool = MVPP2_BM_LONG;
+ } else {
+ long_log_pool = MVPP2_BM_LONG;
+ short_log_pool = MVPP2_BM_SHORT;
+ }
+
+ if (!port->pool_long) {
+ port->pool_long =
+ mvpp2_bm_pool_use(port, long_log_pool,
+ mvpp2_pools[long_log_pool].pkt_size);
+ if (!port->pool_long)
+ return -ENOMEM;
+
+ port->pool_long->port_map |= BIT(port->id);
+
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+ }
+
+ if (!port->pool_short) {
+ port->pool_short =
+ mvpp2_bm_pool_use(port, short_log_pool,
+ mvpp2_pools[short_log_pool].pkt_size);
+ if (!port->pool_short)
+ return -ENOMEM;
+
+ port->pool_short->port_map |= BIT(port->id);
+
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_short_pool_set(port, rxq,
+ port->pool_short->id);
+ }
+
+ return 0;
+}
+
+/* Initialize pools for swf, percpu buffers variant */
+static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
+{
+ struct mvpp2_bm_pool *bm_pool;
+ int i;
+
+ for (i = 0; i < port->nrxqs; i++) {
+ bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
+ mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
+ if (!bm_pool)
+ return -ENOMEM;
+
+ bm_pool->port_map |= BIT(port->id);
+ mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
+ }
+
+ for (i = 0; i < port->nrxqs; i++) {
+ bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
+ mvpp2_pools[MVPP2_BM_LONG].pkt_size);
+ if (!bm_pool)
+ return -ENOMEM;
+
+ bm_pool->port_map |= BIT(port->id);
+ mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
+ }
+
+ port->pool_long = NULL;
+ port->pool_short = NULL;
+
+ return 0;
+}
+
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
+{
+ if (port->priv->percpu_pools)
+ return mvpp2_swf_bm_pool_init_percpu(port);
+ else
+ return mvpp2_swf_bm_pool_init_shared(port);
+}
+
+static void mvpp2_set_hw_csum(struct mvpp2_port *port,
+ enum mvpp2_bm_pool_log_num new_long_pool)
+{
+ const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ /* Update L4 checksum when jumbo enable/disable on port.
+ * Only port 0 supports hardware checksum offload due to
+ * the Tx FIFO size limitation.
+ * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
+ * has 7 bits, so the maximum L3 offset is 128.
+ */
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+ port->dev->features &= ~csums;
+ port->dev->hw_features &= ~csums;
+ } else {
+ port->dev->features |= csums;
+ port->dev->hw_features |= csums;
+ }
+}
+
+static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ enum mvpp2_bm_pool_log_num new_long_pool;
+ int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+
+ if (port->priv->percpu_pools)
+ goto out_set;
+
+ /* If port MTU is higher than 1518B:
+ * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
+ * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
+ */
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ new_long_pool = MVPP2_BM_JUMBO;
+ else
+ new_long_pool = MVPP2_BM_LONG;
+
+ if (new_long_pool != port->pool_long->id) {
+ if (port->tx_fc) {
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port,
+ port->pool_short,
+ false);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ false);
+ }
+
+ /* Remove port from old short & long pool */
+ port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
+ port->pool_long->pkt_size);
+ port->pool_long->port_map &= ~BIT(port->id);
+ port->pool_long = NULL;
+
+ port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
+ port->pool_short->pkt_size);
+ port->pool_short->port_map &= ~BIT(port->id);
+ port->pool_short = NULL;
+
+ port->pkt_size = pkt_size;
+
+ /* Add port to new short & long pool */
+ mvpp2_swf_bm_pool_init(port);
+
+ mvpp2_set_hw_csum(port, new_long_pool);
+
+ if (port->tx_fc) {
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ true);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_short,
+ true);
+ }
+
+ /* Update L4 checksum when jumbo enable/disable on port */
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+ } else {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ }
+ }
+
+out_set:
+ dev->mtu = mtu;
+ dev->wanted_features = dev->features;
+
+ netdev_update_features(dev);
+ return 0;
+}
+
+static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
+{
+ int i, sw_thread_mask = 0;
+
+ for (i = 0; i < port->nqvecs; i++)
+ sw_thread_mask |= port->qvecs[i].sw_thread_mask;
+
+ mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
+}
+
+static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
+{
+ int i, sw_thread_mask = 0;
+
+ for (i = 0; i < port->nqvecs; i++)
+ sw_thread_mask |= port->qvecs[i].sw_thread_mask;
+
+ mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
+}
+
+static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
+{
+ struct mvpp2_port *port = qvec->port;
+
+ mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
+}
+
+static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
+{
+ struct mvpp2_port *port = qvec->port;
+
+ mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
+}
+
+/* Mask the current thread's Rx/Tx interrupts
+ * Called by on_each_cpu(), guaranteed to run with migration disabled,
+ * using smp_processor_id() is OK.
+ */
+static void mvpp2_interrupts_mask(void *arg)
+{
+ struct mvpp2_port *port = arg;
+ int cpu = smp_processor_id();
+ u32 thread;
+
+ /* If the thread isn't used, don't do anything */
+ if (cpu > port->priv->nthreads)
+ return;
+
+ thread = mvpp2_cpu_to_thread(port->priv, cpu);
+
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
+}
+
+/* Unmask the current thread's Rx/Tx interrupts.
+ * Called by on_each_cpu(), guaranteed to run with migration disabled,
+ * using smp_processor_id() is OK.
+ */
+static void mvpp2_interrupts_unmask(void *arg)
+{
+ struct mvpp2_port *port = arg;
+ int cpu = smp_processor_id();
+ u32 val, thread;
+
+ /* If the thread isn't used, don't do anything */
+ if (cpu >= port->priv->nthreads)
+ return;
+
+ thread = mvpp2_cpu_to_thread(port->priv, cpu);
+
+ val = MVPP2_CAUSE_MISC_SUM_MASK |
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
+ if (port->has_tx_irqs)
+ val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
+}
+
+static void
+mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
+{
+ u32 val;
+ int i;
+
+ if (port->priv->hw_version == MVPP21)
+ return;
+
+ if (mask)
+ val = 0;
+ else
+ val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
+
+ for (i = 0; i < port->nqvecs; i++) {
+ struct mvpp2_queue_vector *v = port->qvecs + i;
+
+ if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
+ continue;
+
+ mvpp2_thread_write(port->priv, v->sw_thread_id,
+ MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv, v->sw_thread_id,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
+ }
+}
+
+/* Only GOP port 0 has an XLG MAC */
+static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
+{
+ return port->gop_id == 0;
+}
+
+static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
+{
+ return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0);
+}
+
+/* Port configuration routines */
+static bool mvpp2_is_xlg(phy_interface_t interface)
+{
+ return interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_5GBASER ||
+ interface == PHY_INTERFACE_MODE_XAUI;
+}
+
+static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
+{
+ u32 old, val;
+
+ old = val = readl(ptr);
+ val &= ~mask;
+ val |= set;
+ if (old != val)
+ writel(val, ptr);
+}
+
+static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+
+ regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+ val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
+ regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+ regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
+ if (port->gop_id == 2)
+ val |= GENCONF_CTRL0_PORT2_RGMII;
+ else if (port->gop_id == 3)
+ val |= GENCONF_CTRL0_PORT3_RGMII_MII;
+ regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
+}
+
+static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+
+ regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+ val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
+ GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
+ regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+ if (port->gop_id > 1) {
+ regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
+ if (port->gop_id == 2)
+ val &= ~GENCONF_CTRL0_PORT2_RGMII;
+ else if (port->gop_id == 3)
+ val &= ~GENCONF_CTRL0_PORT3_RGMII_MII;
+ regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
+ }
+}
+
+static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+ void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
+ u32 val;
+
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
+ val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
+ MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
+ val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
+ writel(val, xpcs + MVPP22_XPCS_CFG0);
+
+ val = readl(mpcs + MVPP22_MPCS_CTRL);
+ val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
+ writel(val, mpcs + MVPP22_MPCS_CTRL);
+
+ val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
+ val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
+ val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+}
+
+static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 val;
+
+ val = readl(fca + MVPP22_FCA_CONTROL_REG);
+ val &= ~MVPP22_FCA_ENABLE_PERIODIC;
+ if (en)
+ val |= MVPP22_FCA_ENABLE_PERIODIC;
+ writel(val, fca + MVPP22_FCA_CONTROL_REG);
+}
+
+static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 lsb, msb;
+
+ lsb = timer & MVPP22_FCA_REG_MASK;
+ msb = timer >> MVPP22_FCA_REG_SIZE;
+
+ writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
+ writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
+}
+
+/* Set Flow Control timer x100 faster than pause quanta to ensure that link
+ * partner won't send traffic if port is in XOFF mode.
+ */
+static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
+{
+ u32 timer;
+
+ timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
+ * FC_QUANTA;
+
+ mvpp22_gop_fca_enable_periodic(port, false);
+
+ mvpp22_gop_fca_set_timer(port, timer);
+
+ mvpp22_gop_fca_enable_periodic(port, true);
+}
+
+static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+
+ if (!priv->sysctrl_base)
+ return 0;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (!mvpp2_port_supports_rgmii(port))
+ goto invalid_conf;
+ mvpp22_gop_init_rgmii(port);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mvpp22_gop_init_sgmii(port);
+ break;
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ if (!mvpp2_port_supports_xlg(port))
+ goto invalid_conf;
+ mvpp22_gop_init_10gkr(port);
+ break;
+ default:
+ goto unsupported_conf;
+ }
+
+ regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
+ val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
+ GENCONF_PORT_CTRL1_EN(port->gop_id);
+ regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
+
+ regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+ val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
+ regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+ regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
+ val |= GENCONF_SOFT_RESET1_GOP;
+ regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
+
+ mvpp22_gop_fca_set_periodic_timer(port);
+
+unsupported_conf:
+ return 0;
+
+invalid_conf:
+ netdev_err(port->dev, "Invalid port configuration\n");
+ return -EINVAL;
+}
+
+static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
+{
+ u32 val;
+
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Enable the GMAC link status irq for this port */
+ val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
+ val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
+ writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
+ }
+
+ if (mvpp2_port_supports_xlg(port)) {
+ /* Enable the XLG/GIG irqs for this port */
+ val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
+ if (mvpp2_is_xlg(port->phy_interface))
+ val |= MVPP22_XLG_EXT_INT_MASK_XLG;
+ else
+ val |= MVPP22_XLG_EXT_INT_MASK_GIG;
+ writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
+ }
+}
+
+static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
+{
+ u32 val;
+
+ if (mvpp2_port_supports_xlg(port)) {
+ val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
+ val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
+ MVPP22_XLG_EXT_INT_MASK_GIG);
+ writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
+ }
+
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
+ val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
+ writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
+ }
+}
+
+static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
+{
+ u32 val;
+
+ mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
+ MVPP22_GMAC_INT_SUM_MASK_PTP,
+ MVPP22_GMAC_INT_SUM_MASK_PTP);
+
+ if (port->phylink ||
+ phy_interface_mode_is_rgmii(port->phy_interface) ||
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ val = readl(port->base + MVPP22_GMAC_INT_MASK);
+ val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
+ writel(val, port->base + MVPP22_GMAC_INT_MASK);
+ }
+
+ if (mvpp2_port_supports_xlg(port)) {
+ val = readl(port->base + MVPP22_XLG_INT_MASK);
+ val |= MVPP22_XLG_INT_MASK_LINK;
+ writel(val, port->base + MVPP22_XLG_INT_MASK);
+
+ mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
+ MVPP22_XLG_EXT_INT_MASK_PTP,
+ MVPP22_XLG_EXT_INT_MASK_PTP);
+ }
+
+ mvpp22_gop_unmask_irq(port);
+}
+
+/* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
+ *
+ * The PHY mode used by the PPv2 driver comes from the network subsystem, while
+ * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
+ * differ.
+ *
+ * The COMPHY configures the serdes lanes regardless of the actual use of the
+ * lanes by the physical layer. This is why configurations like
+ * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
+ */
+static int mvpp22_comphy_init(struct mvpp2_port *port,
+ phy_interface_t interface)
+{
+ int ret;
+
+ if (!port->comphy)
+ return 0;
+
+ ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface);
+ if (ret)
+ return ret;
+
+ return phy_power_on(port->comphy);
+}
+
+static void mvpp2_port_enable(struct mvpp2_port *port)
+{
+ u32 val;
+
+ if (mvpp2_port_supports_xlg(port) &&
+ mvpp2_is_xlg(port->phy_interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val |= MVPP22_XLG_CTRL0_PORT_EN;
+ val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val |= MVPP2_GMAC_PORT_EN_MASK;
+ val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ }
+}
+
+static void mvpp2_port_disable(struct mvpp2_port *port)
+{
+ u32 val;
+
+ if (mvpp2_port_supports_xlg(port) &&
+ mvpp2_is_xlg(port->phy_interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_PORT_EN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ }
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
+static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
+ ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+/* Configure loopback port */
+static void mvpp2_port_loopback_set(struct mvpp2_port *port,
+ const struct phylink_link_state *state)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+
+ if (state->speed == 1000)
+ val |= MVPP2_GMAC_GMII_LB_EN_MASK;
+ else
+ val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
+
+ if (phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII)
+ val |= MVPP2_GMAC_PCS_LB_EN_MASK;
+ else
+ val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
+
+ writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+enum {
+ ETHTOOL_XDP_REDIRECT,
+ ETHTOOL_XDP_PASS,
+ ETHTOOL_XDP_DROP,
+ ETHTOOL_XDP_TX,
+ ETHTOOL_XDP_TX_ERR,
+ ETHTOOL_XDP_XMIT,
+ ETHTOOL_XDP_XMIT_ERR,
+};
+
+struct mvpp2_ethtool_counter {
+ unsigned int offset;
+ const char string[ETH_GSTRING_LEN];
+ bool reg_is_64b;
+};
+
+static u64 mvpp2_read_count(struct mvpp2_port *port,
+ const struct mvpp2_ethtool_counter *counter)
+{
+ u64 val;
+
+ val = readl(port->stats_base + counter->offset);
+ if (counter->reg_is_64b)
+ val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
+
+ return val;
+}
+
+/* Some counters are accessed indirectly by first writing an index to
+ * MVPP2_CTRS_IDX. The index can represent various resources depending on the
+ * register we access, it can be a hit counter for some classification tables,
+ * a counter specific to a rxq, a txq or a buffer pool.
+ */
+static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+ return mvpp2_read(priv, reg);
+}
+
+/* Due to the fact that software statistics and hardware statistics are, by
+ * design, incremented at different moments in the chain of packet processing,
+ * it is very likely that incoming packets could have been dropped after being
+ * counted by hardware but before reaching software statistics (most probably
+ * multicast packets), and in the opposite way, during transmission, FCS bytes
+ * are added in between as well as TSO skb will be split and header bytes added.
+ * Hence, statistics gathered from userspace with ifconfig (software) and
+ * ethtool (hardware) cannot be compared.
+ */
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
+ { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
+ { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
+ { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
+ { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
+ { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
+ { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
+ { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
+ { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
+ { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
+ { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
+ { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
+ { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
+ { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
+ { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
+ { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
+ { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
+ { MVPP2_MIB_FC_SENT, "fc_sent" },
+ { MVPP2_MIB_FC_RCVD, "fc_received" },
+ { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
+ { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
+ { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
+ { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
+ { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
+ { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
+ { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
+ { MVPP2_MIB_COLLISION, "collision" },
+ { MVPP2_MIB_LATE_COLLISION, "late_collision" },
+};
+
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
+ { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
+ { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
+};
+
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
+ { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
+ { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
+ { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
+ { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
+ { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
+ { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
+ { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
+ { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
+ { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
+};
+
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
+ { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
+ { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
+ { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
+ { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
+};
+
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
+ { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
+ { ETHTOOL_XDP_PASS, "rx_xdp_pass", },
+ { ETHTOOL_XDP_DROP, "rx_xdp_drop", },
+ { ETHTOOL_XDP_TX, "rx_xdp_tx", },
+ { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
+ { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
+ { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
+};
+
+#define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
+ ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
+ (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
+ (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
+ ARRAY_SIZE(mvpp2_ethtool_xdp))
+
+static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ struct mvpp2_port *port = netdev_priv(netdev);
+ int i, q;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
+ strscpy(data, mvpp2_ethtool_mib_regs[i].string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
+ strscpy(data, mvpp2_ethtool_port_regs[i].string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (q = 0; q < port->ntxqs; q++) {
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ mvpp2_ethtool_txq_regs[i].string, q);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (q = 0; q < port->nrxqs; q++) {
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ mvpp2_ethtool_rxq_regs[i].string,
+ q);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
+ strscpy(data, mvpp2_ethtool_xdp[i].string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static void
+mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
+{
+ unsigned int start;
+ unsigned int cpu;
+
+ /* Gather XDP Statistics */
+ for_each_possible_cpu(cpu) {
+ struct mvpp2_pcpu_stats *cpu_stats;
+ u64 xdp_redirect;
+ u64 xdp_pass;
+ u64 xdp_drop;
+ u64 xdp_xmit;
+ u64 xdp_xmit_err;
+ u64 xdp_tx;
+ u64 xdp_tx_err;
+
+ cpu_stats = per_cpu_ptr(port->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
+ xdp_redirect = cpu_stats->xdp_redirect;
+ xdp_pass = cpu_stats->xdp_pass;
+ xdp_drop = cpu_stats->xdp_drop;
+ xdp_xmit = cpu_stats->xdp_xmit;
+ xdp_xmit_err = cpu_stats->xdp_xmit_err;
+ xdp_tx = cpu_stats->xdp_tx;
+ xdp_tx_err = cpu_stats->xdp_tx_err;
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
+
+ xdp_stats->xdp_redirect += xdp_redirect;
+ xdp_stats->xdp_pass += xdp_pass;
+ xdp_stats->xdp_drop += xdp_drop;
+ xdp_stats->xdp_xmit += xdp_xmit;
+ xdp_stats->xdp_xmit_err += xdp_xmit_err;
+ xdp_stats->xdp_tx += xdp_tx;
+ xdp_stats->xdp_tx_err += xdp_tx_err;
+ }
+}
+
+static void mvpp2_read_stats(struct mvpp2_port *port)
+{
+ struct mvpp2_pcpu_stats xdp_stats = {};
+ const struct mvpp2_ethtool_counter *s;
+ u64 *pstats;
+ int i, q;
+
+ pstats = port->ethtool_stats;
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
+ *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
+ *pstats++ += mvpp2_read(port->priv,
+ mvpp2_ethtool_port_regs[i].offset +
+ 4 * port->id);
+
+ for (q = 0; q < port->ntxqs; q++)
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
+ *pstats++ += mvpp2_read_index(port->priv,
+ MVPP22_CTRS_TX_CTR(port->id, q),
+ mvpp2_ethtool_txq_regs[i].offset);
+
+ /* Rxqs are numbered from 0 from the user standpoint, but not from the
+ * driver's. We need to add the port->first_rxq offset.
+ */
+ for (q = 0; q < port->nrxqs; q++)
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
+ *pstats++ += mvpp2_read_index(port->priv,
+ port->first_rxq + q,
+ mvpp2_ethtool_rxq_regs[i].offset);
+
+ /* Gather XDP Statistics */
+ mvpp2_get_xdp_stats(port, &xdp_stats);
+
+ for (i = 0, s = mvpp2_ethtool_xdp;
+ s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
+ s++, i++) {
+ switch (s->offset) {
+ case ETHTOOL_XDP_REDIRECT:
+ *pstats++ = xdp_stats.xdp_redirect;
+ break;
+ case ETHTOOL_XDP_PASS:
+ *pstats++ = xdp_stats.xdp_pass;
+ break;
+ case ETHTOOL_XDP_DROP:
+ *pstats++ = xdp_stats.xdp_drop;
+ break;
+ case ETHTOOL_XDP_TX:
+ *pstats++ = xdp_stats.xdp_tx;
+ break;
+ case ETHTOOL_XDP_TX_ERR:
+ *pstats++ = xdp_stats.xdp_tx_err;
+ break;
+ case ETHTOOL_XDP_XMIT:
+ *pstats++ = xdp_stats.xdp_xmit;
+ break;
+ case ETHTOOL_XDP_XMIT_ERR:
+ *pstats++ = xdp_stats.xdp_xmit_err;
+ break;
+ }
+ }
+}
+
+static void mvpp2_gather_hw_statistics(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
+ stats_work);
+
+ mutex_lock(&port->gather_stats_lock);
+
+ mvpp2_read_stats(port);
+
+ /* No need to read again the counters right after this function if it
+ * was called asynchronously by the user (ie. use of ethtool).
+ */
+ cancel_delayed_work(&port->stats_work);
+ queue_delayed_work(port->priv->stats_queue, &port->stats_work,
+ MVPP2_MIB_COUNTERS_STATS_DELAY);
+
+ mutex_unlock(&port->gather_stats_lock);
+}
+
+static void mvpp2_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ /* Update statistics for the given port, then take the lock to avoid
+ * concurrent accesses on the ethtool_stats structure during its copy.
+ */
+ mvpp2_gather_hw_statistics(&port->stats_work.work);
+
+ mutex_lock(&port->gather_stats_lock);
+ memcpy(data, port->ethtool_stats,
+ sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
+ mutex_unlock(&port->gather_stats_lock);
+}
+
+static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (sset == ETH_SS_STATS)
+ return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
+
+ return -EOPNOTSUPP;
+}
+
+static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
+ MVPP2_GMAC_PORT_RESET_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+ if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
+ ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ }
+}
+
+static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *mpcs, *xpcs;
+ u32 val;
+
+ if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
+ return;
+
+ mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+ xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
+
+ val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
+ val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
+ val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
+ writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
+}
+
+static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
+ phy_interface_t interface)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *mpcs, *xpcs;
+ u32 val;
+
+ if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
+ return;
+
+ mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+ xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
+ val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
+ MAC_CLK_RESET_SD_TX;
+ val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
+ writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Change maximum receive size of the port */
+static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
+ val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
+ MVPP2_GMAC_MAX_RX_SIZE_OFFS);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Change maximum receive size of the port */
+static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP22_XLG_CTRL1_REG);
+ val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
+ val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
+ MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
+ writel(val, port->base + MVPP22_XLG_CTRL1_REG);
+}
+
+/* Set defaults to the MVPP2 port */
+static void mvpp2_defaults_set(struct mvpp2_port *port)
+{
+ int tx_port_num, val, queue, lrxq;
+
+ if (port->priv->hw_version == MVPP21) {
+ /* Update TX FIFO MIN Threshold */
+ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+ /* Min. TX threshold must be less than minimal packet length */
+ val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+ writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ }
+
+ /* Disable Legacy WRR, Disable EJP, Release from reset */
+ tx_port_num = mvpp2_egress_port(port);
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+ tx_port_num);
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+
+ /* Set TXQ scheduling to Round-Robin */
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
+
+ /* Close bandwidth for all queues */
+ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
+ mvpp2_write(port->priv,
+ MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
+
+ /* Set refill period to 1 usec, refill tokens
+ * and bucket size to maximum
+ */
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
+ port->priv->tclk / USEC_PER_SEC);
+ val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
+ val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
+ val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
+ val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
+ val = MVPP2_TXP_TOKEN_SIZE_MAX;
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+
+ /* Set MaximumLowLatencyPacketSize value to 256 */
+ mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
+ MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
+ MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
+
+ /* Enable Rx cache snoop */
+ for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
+ queue = port->rxqs[lrxq]->id;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+ val |= MVPP2_SNOOP_PKT_SIZE_MASK |
+ MVPP2_SNOOP_BUF_HDR_MASK;
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+ }
+
+ /* At default, mask all interrupts to all present cpus */
+ mvpp2_interrupts_disable(port);
+}
+
+/* Enable/disable receiving packets */
+static void mvpp2_ingress_enable(struct mvpp2_port *port)
+{
+ u32 val;
+ int lrxq, queue;
+
+ for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
+ queue = port->rxqs[lrxq]->id;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+ val &= ~MVPP2_RXQ_DISABLE_MASK;
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+ }
+}
+
+static void mvpp2_ingress_disable(struct mvpp2_port *port)
+{
+ u32 val;
+ int lrxq, queue;
+
+ for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
+ queue = port->rxqs[lrxq]->id;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+ val |= MVPP2_RXQ_DISABLE_MASK;
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+ }
+}
+
+/* Enable transmit via physical egress queue
+ * - HW starts take descriptors from DRAM
+ */
+static void mvpp2_egress_enable(struct mvpp2_port *port)
+{
+ u32 qmap;
+ int queue;
+ int tx_port_num = mvpp2_egress_port(port);
+
+ /* Enable all initialized TXs. */
+ qmap = 0;
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+ if (txq->descs)
+ qmap |= (1 << queue);
+ }
+
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
+}
+
+/* Disable transmit via physical egress queue
+ * - HW doesn't take descriptors from DRAM
+ */
+static void mvpp2_egress_disable(struct mvpp2_port *port)
+{
+ u32 reg_data;
+ int delay;
+ int tx_port_num = mvpp2_egress_port(port);
+
+ /* Issue stop command for active channels only */
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+ reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
+ MVPP2_TXP_SCHED_ENQ_MASK;
+ if (reg_data != 0)
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
+ (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
+
+ /* Wait for all Tx activity to terminate. */
+ delay = 0;
+ do {
+ if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(port->dev,
+ "Tx stop timed out, status=0x%08x\n",
+ reg_data);
+ break;
+ }
+ mdelay(1);
+ delay++;
+
+ /* Check port TX Command register that all
+ * Tx queues are stopped
+ */
+ reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
+ } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
+}
+
+/* Rx descriptors helper methods */
+
+/* Get number of Rx descriptors occupied by received packets */
+static inline int
+mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
+{
+ u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
+
+ return val & MVPP2_RXQ_OCCUPIED_MASK;
+}
+
+/* Update Rx queue status with the number of occupied and available
+ * Rx descriptor slots.
+ */
+static inline void
+mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
+ int used_count, int free_count)
+{
+ /* Decrement the number of used descriptors and increment count
+ * increment the number of free descriptors.
+ */
+ u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
+
+ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static inline struct mvpp2_rx_desc *
+mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
+{
+ int rx_desc = rxq->next_desc_to_proc;
+
+ rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
+ prefetch(rxq->descs + rxq->next_desc_to_proc);
+ return rxq->descs + rx_desc;
+}
+
+/* Set rx queue offset */
+static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
+ int prxq, int offset)
+{
+ u32 val;
+
+ /* Convert offset from bytes to units of 32 bytes */
+ offset = offset >> 5;
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
+
+ /* Offset is in */
+ val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
+ MVPP2_RXQ_PACKET_OFFSET_MASK);
+
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Tx descriptors helper methods */
+
+/* Get pointer to next Tx descriptor to be processed (send) by HW */
+static struct mvpp2_tx_desc *
+mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
+{
+ int tx_desc = txq->next_desc_to_proc;
+
+ txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
+ return txq->descs + tx_desc;
+}
+
+/* Update HW with number of aggregated Tx descriptors to be sent
+ *
+ * Called only from mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
+ */
+static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
+{
+ /* aggregated access - relevant TXQ number is written in TX desc */
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+}
+
+/* Check if there are enough free descriptors in aggregated txq.
+ * If not, update the number of occupied descriptors and repeat the check.
+ *
+ * Called only from mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
+ */
+static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *aggr_txq, int num)
+{
+ if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
+ /* Update number of occupied aggregated Tx descriptors */
+ unsigned int thread =
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ u32 val = mvpp2_read_relaxed(port->priv,
+ MVPP2_AGGR_TXQ_STATUS_REG(thread));
+
+ aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
+
+ if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Reserved Tx descriptors allocation request
+ *
+ * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
+ * only by mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
+ */
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq, int num)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+
+ val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
+ mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
+
+ val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
+
+ return val & MVPP2_TXQ_RSVD_RSLT_MASK;
+}
+
+/* Check if there are enough reserved descriptors for transmission.
+ * If not, request chunk of reserved descriptors and check again.
+ */
+static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ int num)
+{
+ int req, desc_count;
+ unsigned int thread;
+
+ if (txq_pcpu->reserved_num >= num)
+ return 0;
+
+ /* Not enough descriptors reserved! Update the reserved descriptor
+ * count and check again.
+ */
+
+ desc_count = 0;
+ /* Compute total of used descriptors */
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ struct mvpp2_txq_pcpu *txq_pcpu_aux;
+
+ txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
+ desc_count += txq_pcpu_aux->count;
+ desc_count += txq_pcpu_aux->reserved_num;
+ }
+
+ req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
+ desc_count += req;
+
+ if (desc_count >
+ (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
+ return -ENOMEM;
+
+ txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
+
+ /* OK, the descriptor could have been updated: check again. */
+ if (txq_pcpu->reserved_num < num)
+ return -ENOMEM;
+ return 0;
+}
+
+/* Release the last allocated Tx descriptor. Useful to handle DMA
+ * mapping failures in the Tx path.
+ */
+static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
+{
+ if (txq->next_desc_to_proc == 0)
+ txq->next_desc_to_proc = txq->last_desc - 1;
+ else
+ txq->next_desc_to_proc--;
+}
+
+/* Set Tx descriptors fields relevant for CSUM calculation */
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
+ int ip_hdr_len, int l4_proto)
+{
+ u32 command;
+
+ /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+ * G_L4_chk, L4_type required only for checksum calculation
+ */
+ command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
+ command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
+ command |= MVPP2_TXD_IP_CSUM_DISABLE;
+
+ if (l3_proto == htons(ETH_P_IP)) {
+ command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
+ command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
+ } else {
+ command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
+ }
+
+ if (l4_proto == IPPROTO_TCP) {
+ command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
+ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
+ } else if (l4_proto == IPPROTO_UDP) {
+ command |= MVPP2_TXD_L4_UDP; /* enable UDP */
+ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
+ } else {
+ command |= MVPP2_TXD_L4_CSUM_NOT;
+ }
+
+ return command;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ * Per-thread access
+ *
+ * Called only from mvpp2_txq_done(), called from mvpp2_tx()
+ * (migration disabled) and from the TX completion tasklet (migration
+ * disabled) so using smp_processor_id() is OK.
+ */
+static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq)
+{
+ u32 val;
+
+ /* Reading status reg resets transmitted descriptor counter */
+ val = mvpp2_thread_read_relaxed(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ MVPP2_TXQ_SENT_REG(txq->id));
+
+ return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
+ MVPP2_TRANSMITTED_COUNT_OFFSET;
+}
+
+/* Called through on_each_cpu(), so runs on all CPUs, with migration
+ * disabled, therefore using smp_processor_id() is OK.
+ */
+static void mvpp2_txq_sent_counter_clear(void *arg)
+{
+ struct mvpp2_port *port = arg;
+ int queue;
+
+ /* If the thread isn't used, don't do anything */
+ if (smp_processor_id() >= port->priv->nthreads)
+ return;
+
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ int id = port->txqs[queue]->id;
+
+ mvpp2_thread_read(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ MVPP2_TXQ_SENT_REG(id));
+ }
+}
+
+/* Set max sizes for Tx queues */
+static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
+{
+ u32 val, size, mtu;
+ int txq, tx_port_num;
+
+ mtu = port->pkt_size * 8;
+ if (mtu > MVPP2_TXP_MTU_MAX)
+ mtu = MVPP2_TXP_MTU_MAX;
+
+ /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
+ mtu = 3 * mtu;
+
+ /* Indirect access to registers */
+ tx_port_num = mvpp2_egress_port(port);
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+ /* Set MTU */
+ val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
+ val &= ~MVPP2_TXP_MTU_MAX;
+ val |= mtu;
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
+
+ /* TXP token size and all TXQs token size must be larger that MTU */
+ val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
+ size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
+ val |= size;
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+ }
+
+ for (txq = 0; txq < port->ntxqs; txq++) {
+ val = mvpp2_read(port->priv,
+ MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
+ size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
+
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
+ val |= size;
+ mvpp2_write(port->priv,
+ MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
+ val);
+ }
+ }
+}
+
+/* Set the number of non-occupied descriptors threshold */
+static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ u32 val;
+
+ mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
+ val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
+ val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
+ mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+}
+
+/* Set the number of packets that will be received before Rx interrupt
+ * will be generated by HW.
+ */
+static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+
+ if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
+ rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
+
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
+ rxq->pkts_coal);
+
+ put_cpu();
+}
+
+/* For some reason in the LSP this is done on each CPU. Why ? */
+static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq)
+{
+ unsigned int thread;
+ u32 val;
+
+ if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
+ txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
+
+ val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
+ /* PKT-coalescing registers are per-queue + per-thread */
+ for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+ }
+}
+
+static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
+{
+ u64 tmp = (u64)clk_hz * usec;
+
+ do_div(tmp, USEC_PER_SEC);
+
+ return tmp > U32_MAX ? U32_MAX : tmp;
+}
+
+static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
+{
+ u64 tmp = (u64)cycles * USEC_PER_SEC;
+
+ do_div(tmp, clk_hz);
+
+ return tmp > U32_MAX ? U32_MAX : tmp;
+}
+
+/* Set the time delay in usec before Rx interrupt */
+static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ unsigned long freq = port->priv->tclk;
+ u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
+
+ if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
+ rxq->time_coal =
+ mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
+
+ /* re-evaluate to get actual register value */
+ val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
+ }
+
+ mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
+}
+
+static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
+{
+ unsigned long freq = port->priv->tclk;
+ u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
+
+ if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
+ port->tx_time_coal =
+ mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
+
+ /* re-evaluate to get actual register value */
+ val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
+ }
+
+ mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
+}
+
+/* Free Tx queue skbuffs */
+static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq,
+ struct mvpp2_txq_pcpu *txq_pcpu, int num)
+{
+ struct xdp_frame_bulk bq;
+ int i;
+
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
+ for (i = 0; i < num; i++) {
+ struct mvpp2_txq_pcpu_buf *tx_buf =
+ txq_pcpu->buffs + txq_pcpu->txq_get_index;
+
+ if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
+ tx_buf->type != MVPP2_TYPE_XDP_TX)
+ dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
+ tx_buf->size, DMA_TO_DEVICE);
+ if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
+ dev_kfree_skb_any(tx_buf->skb);
+ else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
+ tx_buf->type == MVPP2_TYPE_XDP_NDO)
+ xdp_return_frame_bulk(tx_buf->xdpf, &bq);
+
+ mvpp2_txq_inc_get(txq_pcpu);
+ }
+ xdp_flush_frame_bulk(&bq);
+
+ rcu_read_unlock();
+}
+
+static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
+ u32 cause)
+{
+ int queue = fls(cause) - 1;
+
+ return port->rxqs[queue];
+}
+
+static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
+ u32 cause)
+{
+ int queue = fls(cause) - 1;
+
+ return port->txqs[queue];
+}
+
+/* Handle end of transmission */
+static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
+ struct mvpp2_txq_pcpu *txq_pcpu)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
+ int tx_done;
+
+ if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
+ netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
+
+ tx_done = mvpp2_txq_sent_desc_proc(port, txq);
+ if (!tx_done)
+ return;
+ mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
+
+ txq_pcpu->count -= tx_done;
+
+ if (netif_tx_queue_stopped(nq))
+ if (txq_pcpu->count <= txq_pcpu->wake_threshold)
+ netif_tx_wake_queue(nq);
+}
+
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
+ unsigned int thread)
+{
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ unsigned int tx_todo = 0;
+
+ while (cause) {
+ txq = mvpp2_get_tx_queue(port, cause);
+ if (!txq)
+ break;
+
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+
+ if (txq_pcpu->count) {
+ mvpp2_txq_done(port, txq, txq_pcpu);
+ tx_todo += txq_pcpu->count;
+ }
+
+ cause &= ~(1 << txq->log_id);
+ }
+ return tx_todo;
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+/* Allocate and initialize descriptors for aggr TXQ */
+static int mvpp2_aggr_txq_init(struct platform_device *pdev,
+ struct mvpp2_tx_queue *aggr_txq,
+ unsigned int thread, struct mvpp2 *priv)
+{
+ u32 txq_dma;
+
+ /* Allocate memory for TX descriptors */
+ aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ &aggr_txq->descs_dma, GFP_KERNEL);
+ if (!aggr_txq->descs)
+ return -ENOMEM;
+
+ aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
+
+ /* Aggr TXQ no reset WA */
+ aggr_txq->next_desc_to_proc = mvpp2_read(priv,
+ MVPP2_AGGR_TXQ_INDEX_REG(thread));
+
+ /* Set Tx descriptors queue starting address indirect
+ * access
+ */
+ if (priv->hw_version == MVPP21)
+ txq_dma = aggr_txq->descs_dma;
+ else
+ txq_dma = aggr_txq->descs_dma >>
+ MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
+
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
+ MVPP2_AGGR_TXQ_SIZE);
+
+ return 0;
+}
+
+/* Create a specified Rx queue */
+static int mvpp2_rxq_init(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ struct mvpp2 *priv = port->priv;
+ unsigned int thread;
+ u32 rxq_dma;
+ int err;
+
+ rxq->size = port->rx_ring_size;
+
+ /* Allocate memory for RX descriptors */
+ rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
+ rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+ &rxq->descs_dma, GFP_KERNEL);
+ if (!rxq->descs)
+ return -ENOMEM;
+
+ rxq->last_desc = rxq->size - 1;
+
+ /* Zero occupied and non-occupied counters - direct access */
+ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+
+ /* Set Rx descriptors queue starting address - indirect access */
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
+ if (port->priv->hw_version == MVPP21)
+ rxq_dma = rxq->descs_dma;
+ else
+ rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
+ put_cpu();
+
+ /* Set Offset */
+ mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
+
+ /* Set coalescing pkts and time */
+ mvpp2_rx_pkts_coal_set(port, rxq);
+ mvpp2_rx_time_coal_set(port, rxq);
+
+ /* Set the number of non occupied descriptors threshold */
+ mvpp2_set_rxq_free_tresh(port, rxq);
+
+ /* Add number of descriptors ready for receiving packets */
+ mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
+
+ if (priv->percpu_pools) {
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
+ if (err < 0)
+ goto err_free_dma;
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
+ if (err < 0)
+ goto err_unregister_rxq_short;
+
+ /* Every RXQ has a pool for short and another for long packets */
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
+ MEM_TYPE_PAGE_POOL,
+ priv->page_pool[rxq->logic_rxq]);
+ if (err < 0)
+ goto err_unregister_rxq_long;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
+ MEM_TYPE_PAGE_POOL,
+ priv->page_pool[rxq->logic_rxq +
+ port->nrxqs]);
+ if (err < 0)
+ goto err_unregister_mem_rxq_short;
+ }
+
+ return 0;
+
+err_unregister_mem_rxq_short:
+ xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
+err_unregister_rxq_long:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
+err_unregister_rxq_short:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
+err_free_dma:
+ dma_free_coherent(port->dev->dev.parent,
+ rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+ rxq->descs, rxq->descs_dma);
+ return err;
+}
+
+/* Push packets received by the RXQ to BM pool */
+static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ int rx_received, i;
+
+ rx_received = mvpp2_rxq_received(port, rxq->id);
+ if (!rx_received)
+ return;
+
+ for (i = 0; i < rx_received; i++) {
+ struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+ u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+ int pool;
+
+ pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+ MVPP2_RXD_BM_POOL_ID_OFFS;
+
+ mvpp2_bm_pool_put(port, pool,
+ mvpp2_rxdesc_dma_addr_get(port, rx_desc),
+ mvpp2_rxdesc_cookie_get(port, rx_desc));
+ }
+ mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
+}
+
+/* Cleanup Rx queue */
+static void mvpp2_rxq_deinit(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ unsigned int thread;
+
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
+
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
+
+ mvpp2_rxq_drop_pkts(port, rxq);
+
+ if (rxq->descs)
+ dma_free_coherent(port->dev->dev.parent,
+ rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+ rxq->descs,
+ rxq->descs_dma);
+
+ rxq->descs = NULL;
+ rxq->last_desc = 0;
+ rxq->next_desc_to_proc = 0;
+ rxq->descs_dma = 0;
+
+ /* Clear Rx descriptors queue starting address and size;
+ * free descriptor number
+ */
+ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
+ put_cpu();
+}
+
+/* Create and initialize a Tx queue */
+static int mvpp2_txq_init(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq)
+{
+ u32 val;
+ unsigned int thread;
+ int desc, desc_per_txq, tx_port_num;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+
+ txq->size = port->tx_ring_size;
+
+ /* Allocate memory for Tx descriptors */
+ txq->descs = dma_alloc_coherent(port->dev->dev.parent,
+ txq->size * MVPP2_DESC_ALIGNED_SIZE,
+ &txq->descs_dma, GFP_KERNEL);
+ if (!txq->descs)
+ return -ENOMEM;
+
+ txq->last_desc = txq->size - 1;
+
+ /* Set Tx descriptors queue starting address - indirect access */
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
+ txq->descs_dma);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
+ txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
+ txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+ val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
+ val &= ~MVPP2_TXQ_PENDING_MASK;
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
+
+ /* Calculate base address in prefetch buffer. We reserve 16 descriptors
+ * for each existing TXQ.
+ * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
+ * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
+ */
+ desc_per_txq = 16;
+ desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
+ (txq->log_id * desc_per_txq);
+
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
+ MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+ MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
+ put_cpu();
+
+ /* WRR / EJP configuration - indirect access */
+ tx_port_num = mvpp2_egress_port(port);
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+ val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
+ val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
+ val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
+ val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
+ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
+
+ val = MVPP2_TXQ_TOKEN_SIZE_MAX;
+ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
+ val);
+
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ txq_pcpu->size = txq->size;
+ txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
+ sizeof(*txq_pcpu->buffs),
+ GFP_KERNEL);
+ if (!txq_pcpu->buffs)
+ return -ENOMEM;
+
+ txq_pcpu->count = 0;
+ txq_pcpu->reserved_num = 0;
+ txq_pcpu->txq_put_index = 0;
+ txq_pcpu->txq_get_index = 0;
+ txq_pcpu->tso_headers = NULL;
+
+ txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
+ txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
+
+ txq_pcpu->tso_headers =
+ dma_alloc_coherent(port->dev->dev.parent,
+ txq_pcpu->size * TSO_HEADER_SIZE,
+ &txq_pcpu->tso_headers_dma,
+ GFP_KERNEL);
+ if (!txq_pcpu->tso_headers)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Free allocated TXQ resources */
+static void mvpp2_txq_deinit(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq)
+{
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ unsigned int thread;
+
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ kfree(txq_pcpu->buffs);
+
+ if (txq_pcpu->tso_headers)
+ dma_free_coherent(port->dev->dev.parent,
+ txq_pcpu->size * TSO_HEADER_SIZE,
+ txq_pcpu->tso_headers,
+ txq_pcpu->tso_headers_dma);
+
+ txq_pcpu->tso_headers = NULL;
+ }
+
+ if (txq->descs)
+ dma_free_coherent(port->dev->dev.parent,
+ txq->size * MVPP2_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_dma);
+
+ txq->descs = NULL;
+ txq->last_desc = 0;
+ txq->next_desc_to_proc = 0;
+ txq->descs_dma = 0;
+
+ /* Set minimum bandwidth for disabled TXQs */
+ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
+
+ /* Set Tx descriptors queue starting address and size */
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
+ put_cpu();
+}
+
+/* Cleanup Tx ports */
+static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
+{
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ int delay, pending;
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ u32 val;
+
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
+ val |= MVPP2_TXQ_DRAIN_EN_MASK;
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
+
+ /* The napi queue has been stopped so wait for all packets
+ * to be transmitted.
+ */
+ delay = 0;
+ do {
+ if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
+ netdev_warn(port->dev,
+ "port %d: cleaning queue %d timed out\n",
+ port->id, txq->log_id);
+ break;
+ }
+ mdelay(1);
+ delay++;
+
+ pending = mvpp2_thread_read(port->priv, thread,
+ MVPP2_TXQ_PENDING_REG);
+ pending &= MVPP2_TXQ_PENDING_MASK;
+ } while (pending);
+
+ val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
+ put_cpu();
+
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+
+ /* Release all packets */
+ mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
+
+ /* Reset queue */
+ txq_pcpu->count = 0;
+ txq_pcpu->txq_put_index = 0;
+ txq_pcpu->txq_get_index = 0;
+ }
+}
+
+/* Cleanup all Tx queues */
+static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
+{
+ struct mvpp2_tx_queue *txq;
+ int queue;
+ u32 val;
+
+ val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
+
+ /* Reset Tx ports and delete Tx queues */
+ val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
+ mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
+
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ txq = port->txqs[queue];
+ mvpp2_txq_clean(port, txq);
+ mvpp2_txq_deinit(port, txq);
+ }
+
+ on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
+
+ val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
+ mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
+}
+
+/* Cleanup all Rx queues */
+static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
+{
+ int queue;
+
+ for (queue = 0; queue < port->nrxqs; queue++)
+ mvpp2_rxq_deinit(port, port->rxqs[queue]);
+
+ if (port->tx_fc)
+ mvpp2_rxq_disable_fc(port);
+}
+
+/* Init all Rx queues for port */
+static int mvpp2_setup_rxqs(struct mvpp2_port *port)
+{
+ int queue, err;
+
+ for (queue = 0; queue < port->nrxqs; queue++) {
+ err = mvpp2_rxq_init(port, port->rxqs[queue]);
+ if (err)
+ goto err_cleanup;
+ }
+
+ if (port->tx_fc)
+ mvpp2_rxq_enable_fc(port);
+
+ return 0;
+
+err_cleanup:
+ mvpp2_cleanup_rxqs(port);
+ return err;
+}
+
+/* Init all tx queues for port */
+static int mvpp2_setup_txqs(struct mvpp2_port *port)
+{
+ struct mvpp2_tx_queue *txq;
+ int queue, err;
+
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ txq = port->txqs[queue];
+ err = mvpp2_txq_init(port, txq);
+ if (err)
+ goto err_cleanup;
+
+ /* Assign this queue to a CPU */
+ if (queue < num_possible_cpus())
+ netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
+ }
+
+ if (port->has_tx_irqs) {
+ mvpp2_tx_time_coal_set(port);
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ txq = port->txqs[queue];
+ mvpp2_tx_pkts_coal_set(port, txq);
+ }
+ }
+
+ on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
+ return 0;
+
+err_cleanup:
+ mvpp2_cleanup_txqs(port);
+ return err;
+}
+
+/* The callback for per-port interrupt */
+static irqreturn_t mvpp2_isr(int irq, void *dev_id)
+{
+ struct mvpp2_queue_vector *qv = dev_id;
+
+ mvpp2_qvec_interrupt_disable(qv);
+
+ napi_schedule(&qv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct mvpp2_hwtstamp_queue *queue;
+ struct sk_buff *skb;
+ void __iomem *ptp_q;
+ unsigned int id;
+ u32 r0, r1, r2;
+
+ ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+ if (nq)
+ ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
+
+ queue = &port->tx_hwtstamp_queue[nq];
+
+ while (1) {
+ r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
+ if (!r0)
+ break;
+
+ r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
+ r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
+
+ id = (r0 >> 1) & 31;
+
+ skb = queue->skb[id];
+ queue->skb[id] = NULL;
+ if (skb) {
+ u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
+
+ mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ }
+ }
+}
+
+static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
+{
+ void __iomem *ptp;
+ u32 val;
+
+ ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+ val = readl(ptp + MVPP22_PTP_INT_CAUSE);
+ if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
+ mvpp2_isr_handle_ptp_queue(port, 0);
+ if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
+ mvpp2_isr_handle_ptp_queue(port, 1);
+}
+
+static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
+{
+ struct net_device *dev = port->dev;
+
+ if (port->phylink) {
+ phylink_mac_change(port->phylink, link);
+ return;
+ }
+
+ if (!netif_running(dev))
+ return;
+
+ if (link) {
+ mvpp2_interrupts_enable(port);
+
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ netif_carrier_on(dev);
+ netif_tx_wake_all_queues(dev);
+ } else {
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ mvpp2_ingress_disable(port);
+ mvpp2_egress_disable(port);
+
+ mvpp2_interrupts_disable(port);
+ }
+}
+
+static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
+{
+ bool link;
+ u32 val;
+
+ val = readl(port->base + MVPP22_XLG_INT_STAT);
+ if (val & MVPP22_XLG_INT_STAT_LINK) {
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ link = (val & MVPP22_XLG_STATUS_LINK_UP);
+ mvpp2_isr_handle_link(port, link);
+ }
+}
+
+static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
+{
+ bool link;
+ u32 val;
+
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ val = readl(port->base + MVPP22_GMAC_INT_STAT);
+ if (val & MVPP22_GMAC_INT_STAT_LINK) {
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+ link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
+ mvpp2_isr_handle_link(port, link);
+ }
+ }
+}
+
+/* Per-port interrupt for link status changes */
+static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
+{
+ struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
+ u32 val;
+
+ mvpp22_gop_mask_irq(port);
+
+ if (mvpp2_port_supports_xlg(port) &&
+ mvpp2_is_xlg(port->phy_interface)) {
+ /* Check the external status register */
+ val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
+ if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
+ mvpp2_isr_handle_xlg(port);
+ if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
+ mvpp2_isr_handle_ptp(port);
+ } else {
+ /* If it's not the XLG, we must be using the GMAC.
+ * Check the summary status.
+ */
+ val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
+ if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
+ mvpp2_isr_handle_gmac_internal(port);
+ if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
+ mvpp2_isr_handle_ptp(port);
+ }
+
+ mvpp22_gop_unmask_irq(port);
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+ struct net_device *dev;
+ struct mvpp2_port *port;
+ struct mvpp2_port_pcpu *port_pcpu;
+ unsigned int tx_todo, cause;
+
+ port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
+ dev = port_pcpu->dev;
+
+ if (!netif_running(dev))
+ return HRTIMER_NORESTART;
+
+ port_pcpu->timer_scheduled = false;
+ port = netdev_priv(dev);
+
+ /* Process all the Tx queues */
+ cause = (1 << port->ntxqs) - 1;
+ tx_todo = mvpp2_tx_done(port, cause,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
+
+ /* Set the timer in case not all the packets were processed */
+ if (tx_todo && !port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ hrtimer_forward_now(&port_pcpu->tx_done_timer,
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+
+ return HRTIMER_RESTART;
+ }
+ return HRTIMER_NORESTART;
+}
+
+/* Main RX/TX processing routines */
+
+/* Display more error info */
+static void mvpp2_rx_error(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+ size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
+ char *err_str = NULL;
+
+ switch (status & MVPP2_RXD_ERR_CODE_MASK) {
+ case MVPP2_RXD_ERR_CRC:
+ err_str = "crc";
+ break;
+ case MVPP2_RXD_ERR_OVERRUN:
+ err_str = "overrun";
+ break;
+ case MVPP2_RXD_ERR_RESOURCE:
+ err_str = "resource";
+ break;
+ }
+ if (err_str && net_ratelimit())
+ netdev_err(port->dev,
+ "bad rx status %08x (%s error), size=%zu\n",
+ status, err_str, sz);
+}
+
+/* Handle RX checksum offload */
+static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status)
+{
+ if (((status & MVPP2_RXD_L3_IP4) &&
+ !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
+ (status & MVPP2_RXD_L3_IP6))
+ if (((status & MVPP2_RXD_L4_UDP) ||
+ (status & MVPP2_RXD_L4_TCP)) &&
+ (status & MVPP2_RXD_L4_CSUM_OK))
+ return CHECKSUM_UNNECESSARY;
+
+ return CHECKSUM_NONE;
+}
+
+/* Allocate a new skb and add it to BM pool */
+static int mvpp2_rx_refill(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool,
+ struct page_pool *page_pool, int pool)
+{
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
+ void *buf;
+
+ buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
+ &dma_addr, &phys_addr, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+
+ return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int ip_hdr_len = 0;
+ u8 l4_proto;
+ __be16 l3_proto = vlan_get_protocol(skb);
+
+ if (l3_proto == htons(ETH_P_IP)) {
+ struct iphdr *ip4h = ip_hdr(skb);
+
+ /* Calculate IPv4 checksum and L4 checksum */
+ ip_hdr_len = ip4h->ihl;
+ l4_proto = ip4h->protocol;
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ /* Read l4_protocol from one of IPv6 extra headers */
+ if (skb_network_header_len(skb) > 0)
+ ip_hdr_len = (skb_network_header_len(skb) >> 2);
+ l4_proto = ip6h->nexthdr;
+ } else {
+ return MVPP2_TXD_L4_CSUM_NOT;
+ }
+
+ return mvpp2_txq_desc_csum(skb_network_offset(skb),
+ l3_proto, ip_hdr_len, l4_proto);
+ }
+
+ return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
+}
+
+static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2_tx_queue *aggr_txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ struct mvpp2_tx_queue *txq;
+ struct netdev_queue *nq;
+
+ txq = port->txqs[txq_id];
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ nq = netdev_get_tx_queue(port->dev, txq_id);
+ aggr_txq = &port->priv->aggr_txqs[thread];
+
+ txq_pcpu->reserved_num -= nxmit;
+ txq_pcpu->count += nxmit;
+ aggr_txq->count += nxmit;
+
+ /* Enable transmit */
+ wmb();
+ mvpp2_aggr_txq_pend_desc_add(port, nxmit);
+
+ if (txq_pcpu->count >= txq_pcpu->stop_threshold)
+ netif_tx_stop_queue(nq);
+
+ /* Finalize TX processing */
+ if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
+ mvpp2_txq_done(port, txq, txq_pcpu);
+}
+
+static int
+mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
+ struct xdp_frame *xdpf, bool dma_map)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
+ MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
+ enum mvpp2_tx_buf_type buf_type;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ struct mvpp2_tx_queue *aggr_txq;
+ struct mvpp2_tx_desc *tx_desc;
+ struct mvpp2_tx_queue *txq;
+ int ret = MVPP2_XDP_TX;
+ dma_addr_t dma_addr;
+
+ txq = port->txqs[txq_id];
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ aggr_txq = &port->priv->aggr_txqs[thread];
+
+ /* Check number of available descriptors */
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
+ ret = MVPP2_XDP_DROPPED;
+ goto out;
+ }
+
+ /* Get a descriptor for the first part of the packet */
+ tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
+
+ if (dma_map) {
+ /* XDP_REDIRECT or AF_XDP */
+ dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
+ mvpp2_txq_desc_put(txq);
+ ret = MVPP2_XDP_DROPPED;
+ goto out;
+ }
+
+ buf_type = MVPP2_TYPE_XDP_NDO;
+ } else {
+ /* XDP_TX */
+ struct page *page = virt_to_page(xdpf->data);
+
+ dma_addr = page_pool_get_dma_addr(page) +
+ sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
+ xdpf->len, DMA_BIDIRECTIONAL);
+
+ buf_type = MVPP2_TYPE_XDP_TX;
+ }
+
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
+
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
+
+out:
+ return ret;
+}
+
+static int
+mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
+{
+ struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+ struct xdp_frame *xdpf;
+ u16 txq_id;
+ int ret;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ return MVPP2_XDP_DROPPED;
+
+ /* The first of the TX queues are used for XPS,
+ * the second half for XDP_TX
+ */
+ txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
+
+ ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
+ if (ret == MVPP2_XDP_TX) {
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_bytes += xdpf->len;
+ stats->tx_packets++;
+ stats->xdp_tx++;
+ u64_stats_update_end(&stats->syncp);
+
+ mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
+ } else {
+ u64_stats_update_begin(&stats->syncp);
+ stats->xdp_tx_err++;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ return ret;
+}
+
+static int
+mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int i, nxmit_byte = 0, nxmit = 0;
+ struct mvpp2_pcpu_stats *stats;
+ u16 txq_id;
+ u32 ret;
+
+ if (unlikely(test_bit(0, &port->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ /* The first of the TX queues are used for XPS,
+ * the second half for XDP_TX
+ */
+ txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
+
+ for (i = 0; i < num_frame; i++) {
+ ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
+ if (ret != MVPP2_XDP_TX)
+ break;
+
+ nxmit_byte += frames[i]->len;
+ nxmit++;
+ }
+
+ if (likely(nxmit > 0))
+ mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
+
+ stats = this_cpu_ptr(port->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_bytes += nxmit_byte;
+ stats->tx_packets += nxmit;
+ stats->xdp_xmit += nxmit;
+ stats->xdp_xmit_err += num_frame - nxmit;
+ u64_stats_update_end(&stats->syncp);
+
+ return nxmit;
+}
+
+static int
+mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
+ struct xdp_buff *xdp, struct page_pool *pp,
+ struct mvpp2_pcpu_stats *stats)
+{
+ unsigned int len, sync, err;
+ struct page *page;
+ u32 ret, act;
+
+ len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
+ sync = max(sync, len);
+
+ switch (act) {
+ case XDP_PASS:
+ stats->xdp_pass++;
+ ret = MVPP2_XDP_PASS;
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(port->dev, xdp, prog);
+ if (unlikely(err)) {
+ ret = MVPP2_XDP_DROPPED;
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(pp, page, sync, true);
+ } else {
+ ret = MVPP2_XDP_REDIR;
+ stats->xdp_redirect++;
+ }
+ break;
+ case XDP_TX:
+ ret = mvpp2_xdp_xmit_back(port, xdp);
+ if (ret != MVPP2_XDP_TX) {
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(pp, page, sync, true);
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(port->dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(port->dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(pp, page, sync, true);
+ ret = MVPP2_XDP_DROPPED;
+ stats->xdp_drop++;
+ break;
+ }
+
+ return ret;
+}
+
+static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
+ int pool, u32 rx_status)
+{
+ phys_addr_t phys_addr, phys_addr_next;
+ dma_addr_t dma_addr, dma_addr_next;
+ struct mvpp2_buff_hdr *buff_hdr;
+
+ phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+
+ do {
+ buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
+
+ phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
+ dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
+
+ if (port->priv->hw_version >= MVPP22) {
+ phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
+ dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
+ }
+
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+
+ phys_addr = phys_addr_next;
+ dma_addr = dma_addr_next;
+
+ } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
+}
+
+/* Main rx processing */
+static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
+ int rx_todo, struct mvpp2_rx_queue *rxq)
+{
+ struct net_device *dev = port->dev;
+ struct mvpp2_pcpu_stats ps = {};
+ enum dma_data_direction dma_dir;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ int rx_received;
+ int rx_done = 0;
+ u32 xdp_ret = 0;
+
+ xdp_prog = READ_ONCE(port->xdp_prog);
+
+ /* Get number of received packets and clamp the to-do */
+ rx_received = mvpp2_rxq_received(port, rxq->id);
+ if (rx_todo > rx_received)
+ rx_todo = rx_received;
+
+ while (rx_done < rx_todo) {
+ struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+ struct mvpp2_bm_pool *bm_pool;
+ struct page_pool *pp = NULL;
+ struct sk_buff *skb;
+ unsigned int frag_size;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
+ u32 rx_status, timestamp;
+ int pool, rx_bytes, err, ret;
+ struct page *page;
+ void *data;
+
+ phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+ data = (void *)phys_to_virt(phys_addr);
+ page = virt_to_page(data);
+ prefetch(page);
+
+ rx_done++;
+ rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
+ rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
+ rx_bytes -= MVPP2_MH_SIZE;
+ dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+
+ pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+ MVPP2_RXD_BM_POOL_ID_OFFS;
+ bm_pool = &port->priv->bm_pools[pool];
+
+ if (port->priv->percpu_pools) {
+ pp = port->priv->page_pool[pool];
+ dma_dir = page_pool_get_dma_dir(pp);
+ } else {
+ dma_dir = DMA_FROM_DEVICE;
+ }
+
+ dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
+ rx_bytes + MVPP2_MH_SIZE,
+ dma_dir);
+
+ /* Buffer header not supported */
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ goto err_drop_frame;
+
+ /* In case of an error, release the requested buffer pointer
+ * to the Buffer Manager. This request process is controlled
+ * by the hardware, and the information about the buffer is
+ * comprised by the RX descriptor.
+ */
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+ goto err_drop_frame;
+
+ /* Prefetch header */
+ prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
+
+ if (bm_pool->frag_size > PAGE_SIZE)
+ frag_size = 0;
+ else
+ frag_size = bm_pool->frag_size;
+
+ if (xdp_prog) {
+ struct xdp_rxq_info *xdp_rxq;
+
+ if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
+ xdp_rxq = &rxq->xdp_rxq_short;
+ else
+ xdp_rxq = &rxq->xdp_rxq_long;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
+ xdp_prepare_buff(&xdp, data,
+ MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
+ rx_bytes, false);
+
+ ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
+
+ if (ret) {
+ xdp_ret |= ret;
+ err = mvpp2_rx_refill(port, bm_pool, pp, pool);
+ if (err) {
+ netdev_err(port->dev, "failed to refill BM pools\n");
+ goto err_drop_frame;
+ }
+
+ ps.rx_packets++;
+ ps.rx_bytes += rx_bytes;
+ continue;
+ }
+ }
+
+ skb = build_skb(data, frag_size);
+ if (!skb) {
+ netdev_warn(port->dev, "skb build failed\n");
+ goto err_drop_frame;
+ }
+
+ /* If we have RX hardware timestamping enabled, grab the
+ * timestamp from the queue and convert.
+ */
+ if (mvpp22_rx_hwtstamping(port)) {
+ timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
+ mvpp22_tai_tstamp(port->priv->tai, timestamp,
+ skb_hwtstamps(skb));
+ }
+
+ err = mvpp2_rx_refill(port, bm_pool, pp, pool);
+ if (err) {
+ netdev_err(port->dev, "failed to refill BM pools\n");
+ dev_kfree_skb_any(skb);
+ goto err_drop_frame;
+ }
+
+ if (pp)
+ skb_mark_for_recycle(skb);
+ else
+ dma_unmap_single_attrs(dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
+ ps.rx_packets++;
+ ps.rx_bytes += rx_bytes;
+
+ skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
+ skb_put(skb, rx_bytes);
+ skb->ip_summed = mvpp2_rx_csum(port, rx_status);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ napi_gro_receive(napi, skb);
+ continue;
+
+err_drop_frame:
+ dev->stats.rx_errors++;
+ mvpp2_rx_error(port, rx_desc);
+ /* Return the buffer to the pool */
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
+ else
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+ }
+
+ if (xdp_ret & MVPP2_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (ps.rx_packets) {
+ struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += ps.rx_packets;
+ stats->rx_bytes += ps.rx_bytes;
+ /* xdp */
+ stats->xdp_redirect += ps.xdp_redirect;
+ stats->xdp_pass += ps.xdp_pass;
+ stats->xdp_drop += ps.xdp_drop;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ /* Update Rx queue management counters */
+ wmb();
+ mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
+
+ return rx_todo;
+}
+
+static inline void
+tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
+ struct mvpp2_tx_desc *desc)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+
+ dma_addr_t buf_dma_addr =
+ mvpp2_txdesc_dma_addr_get(port, desc);
+ size_t buf_sz =
+ mvpp2_txdesc_size_get(port, desc);
+ if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
+ dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
+ buf_sz, DMA_TO_DEVICE);
+ mvpp2_txq_desc_put(txq);
+}
+
+static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *desc)
+{
+ /* We only need to clear the low bits */
+ if (port->priv->hw_version >= MVPP22)
+ desc->pp22.ptp_descriptor &=
+ cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
+}
+
+static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ struct sk_buff *skb)
+{
+ struct mvpp2_hwtstamp_queue *queue;
+ unsigned int mtype, type, i;
+ struct ptp_header *hdr;
+ u64 ptpdesc;
+
+ if (port->priv->hw_version == MVPP21 ||
+ port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
+ return false;
+
+ type = ptp_classify_raw(skb);
+ if (!type)
+ return false;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return false;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
+ MVPP22_PTP_ACTION_CAPTURE;
+ queue = &port->tx_hwtstamp_queue[0];
+
+ switch (type & PTP_CLASS_VMASK) {
+ case PTP_CLASS_V1:
+ ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
+ break;
+
+ case PTP_CLASS_V2:
+ ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
+ mtype = hdr->tsmt & 15;
+ /* Direct PTP Sync messages to queue 1 */
+ if (mtype == 0) {
+ ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
+ queue = &port->tx_hwtstamp_queue[1];
+ }
+ break;
+ }
+
+ /* Take a reference on the skb and insert into our queue */
+ i = queue->next;
+ queue->next = (i + 1) & 31;
+ if (queue->skb[i])
+ dev_kfree_skb_any(queue->skb[i]);
+ queue->skb[i] = skb_get(skb);
+
+ ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
+
+ /*
+ * 3:0 - PTPAction
+ * 6:4 - PTPPacketFormat
+ * 7 - PTP_CF_WraparoundCheckEn
+ * 9:8 - IngressTimestampSeconds[1:0]
+ * 10 - Reserved
+ * 11 - MACTimestampingEn
+ * 17:12 - PTP_TimestampQueueEntryID[5:0]
+ * 18 - PTPTimestampQueueSelect
+ * 19 - UDPChecksumUpdateEn
+ * 27:20 - TimestampOffset
+ * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
+ * NTPTs, Y.1731 - L3 to timestamp entry
+ * 35:28 - UDP Checksum Offset
+ *
+ * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
+ */
+ tx_desc->pp22.ptp_descriptor &=
+ cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
+ tx_desc->pp22.ptp_descriptor |=
+ cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
+ tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
+ tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
+
+ return true;
+}
+
+/* Handle tx fragmentation processing */
+static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
+ struct mvpp2_tx_queue *aggr_txq,
+ struct mvpp2_tx_queue *txq)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ struct mvpp2_tx_desc *tx_desc;
+ int i;
+ dma_addr_t buf_dma_addr;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *addr = skb_frag_address(frag);
+
+ tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
+
+ buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
+ mvpp2_txq_desc_put(txq);
+ goto cleanup;
+ }
+
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
+
+ if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+ /* Last descriptor */
+ mvpp2_txdesc_cmd_set(port, tx_desc,
+ MVPP2_TXD_L_DESC);
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
+ } else {
+ /* Descriptor in the middle: Not First, Not Last */
+ mvpp2_txdesc_cmd_set(port, tx_desc, 0);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
+ }
+ }
+
+ return 0;
+cleanup:
+ /* Release all descriptors that were used to map fragments of
+ * this packet, as well as the corresponding DMA mappings
+ */
+ for (i = i - 1; i >= 0; i--) {
+ tx_desc = txq->descs + i;
+ tx_desc_unmap_put(port, txq, tx_desc);
+ }
+
+ return -ENOMEM;
+}
+
+static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
+ struct net_device *dev,
+ struct mvpp2_tx_queue *txq,
+ struct mvpp2_tx_queue *aggr_txq,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ int hdr_sz)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ dma_addr_t addr;
+
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
+
+ addr = txq_pcpu->tso_headers_dma +
+ txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
+
+ mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
+ MVPP2_TXD_F_DESC |
+ MVPP2_TXD_PADDING_DISABLE);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
+}
+
+static inline int mvpp2_tso_put_data(struct sk_buff *skb,
+ struct net_device *dev, struct tso_t *tso,
+ struct mvpp2_tx_queue *txq,
+ struct mvpp2_tx_queue *aggr_txq,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ int sz, bool left, bool last)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ dma_addr_t buf_dma_addr;
+
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, sz);
+
+ buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
+ mvpp2_txq_desc_put(txq);
+ return -ENOMEM;
+ }
+
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
+
+ if (!left) {
+ mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
+ if (last) {
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
+ return 0;
+ }
+ } else {
+ mvpp2_txdesc_cmd_set(port, tx_desc, 0);
+ }
+
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
+ return 0;
+}
+
+static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
+ struct mvpp2_tx_queue *txq,
+ struct mvpp2_tx_queue *aggr_txq,
+ struct mvpp2_txq_pcpu *txq_pcpu)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int hdr_sz, i, len, descs = 0;
+ struct tso_t tso;
+
+ /* Check number of available descriptors */
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
+ tso_count_descs(skb)))
+ return 0;
+
+ hdr_sz = tso_start(skb, &tso);
+
+ len = skb->len - hdr_sz;
+ while (len > 0) {
+ int left = min_t(int, skb_shinfo(skb)->gso_size, len);
+ char *hdr = txq_pcpu->tso_headers +
+ txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
+
+ len -= left;
+ descs++;
+
+ tso_build_hdr(skb, hdr, &tso, left, len == 0);
+ mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
+
+ while (left > 0) {
+ int sz = min_t(int, tso.size, left);
+ left -= sz;
+ descs++;
+
+ if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
+ txq_pcpu, sz, left, len == 0))
+ goto release;
+ tso_build_data(skb, &tso, sz);
+ }
+ }
+
+ return descs;
+
+release:
+ for (i = descs - 1; i >= 0; i--) {
+ struct mvpp2_tx_desc *tx_desc = txq->descs + i;
+ tx_desc_unmap_put(port, txq, tx_desc);
+ }
+ return 0;
+}
+
+/* Main tx processing */
+static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_tx_queue *txq, *aggr_txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ struct mvpp2_tx_desc *tx_desc;
+ dma_addr_t buf_dma_addr;
+ unsigned long flags = 0;
+ unsigned int thread;
+ int frags = 0;
+ u16 txq_id;
+ u32 tx_cmd;
+
+ thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+
+ txq_id = skb_get_queue_mapping(skb);
+ txq = port->txqs[txq_id];
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ aggr_txq = &port->priv->aggr_txqs[thread];
+
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_lock_irqsave(&port->tx_lock[thread], flags);
+
+ if (skb_is_gso(skb)) {
+ frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
+ goto out;
+ }
+ frags = skb_shinfo(skb)->nr_frags + 1;
+
+ /* Check number of available descriptors */
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
+ frags = 0;
+ goto out;
+ }
+
+ /* Get a descriptor for the first part of the packet */
+ tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+ !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
+
+ buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
+ mvpp2_txq_desc_put(txq);
+ frags = 0;
+ goto out;
+ }
+
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
+
+ tx_cmd = mvpp2_skb_tx_csum(port, skb);
+
+ if (frags == 1) {
+ /* First and Last descriptor */
+ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
+ } else {
+ /* First but not Last */
+ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
+
+ /* Continue with other skb fragments */
+ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
+ tx_desc_unmap_put(port, txq, tx_desc);
+ frags = 0;
+ }
+ }
+
+out:
+ if (frags > 0) {
+ struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
+ struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+ txq_pcpu->reserved_num -= frags;
+ txq_pcpu->count += frags;
+ aggr_txq->count += frags;
+
+ /* Enable transmit */
+ wmb();
+ mvpp2_aggr_txq_pend_desc_add(port, frags);
+
+ if (txq_pcpu->count >= txq_pcpu->stop_threshold)
+ netif_tx_stop_queue(nq);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Finalize TX processing */
+ if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
+ mvpp2_txq_done(port, txq, txq_pcpu);
+
+ /* Set the timer in case not all frags were processed */
+ if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
+ txq_pcpu->count > 0) {
+ struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
+
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ hrtimer_start(&port_pcpu->tx_done_timer,
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS,
+ HRTIMER_MODE_REL_PINNED_SOFT);
+ }
+ }
+
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_unlock_irqrestore(&port->tx_lock[thread], flags);
+
+ return NETDEV_TX_OK;
+}
+
+static inline void mvpp2_cause_error(struct net_device *dev, int cause)
+{
+ if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
+ netdev_err(dev, "FCS error\n");
+ if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
+ netdev_err(dev, "rx fifo overrun error\n");
+ if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
+ netdev_err(dev, "tx fifo underrun error\n");
+}
+
+static int mvpp2_poll(struct napi_struct *napi, int budget)
+{
+ u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
+ int rx_done = 0;
+ struct mvpp2_port *port = netdev_priv(napi->dev);
+ struct mvpp2_queue_vector *qv;
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+
+ qv = container_of(napi, struct mvpp2_queue_vector, napi);
+
+ /* Rx/Tx cause register
+ *
+ * Bits 0-15: each bit indicates received packets on the Rx queue
+ * (bit 0 is for Rx queue 0).
+ *
+ * Bits 16-23: each bit indicates transmitted packets on the Tx queue
+ * (bit 16 is for Tx queue 0).
+ *
+ * Each CPU has its own Rx/Tx cause register
+ */
+ cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+
+ cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
+ if (cause_misc) {
+ mvpp2_cause_error(port->dev, cause_misc);
+
+ /* Clear the cause register */
+ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
+ cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+ }
+
+ if (port->has_tx_irqs) {
+ cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ if (cause_tx) {
+ cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
+ mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+ }
+ }
+
+ /* Process RX packets */
+ cause_rx = cause_rx_tx &
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
+ cause_rx <<= qv->first_rxq;
+ cause_rx |= qv->pending_cause_rx;
+ while (cause_rx && budget > 0) {
+ int count;
+ struct mvpp2_rx_queue *rxq;
+
+ rxq = mvpp2_get_rx_queue(port, cause_rx);
+ if (!rxq)
+ break;
+
+ count = mvpp2_rx(port, napi, budget, rxq);
+ rx_done += count;
+ budget -= count;
+ if (budget > 0) {
+ /* Clear the bit associated to this Rx queue
+ * so that next iteration will continue from
+ * the next Rx queue.
+ */
+ cause_rx &= ~(1 << rxq->logic_rxq);
+ }
+ }
+
+ if (budget > 0) {
+ cause_rx = 0;
+ napi_complete_done(napi, rx_done);
+
+ mvpp2_qvec_interrupt_enable(qv);
+ }
+ qv->pending_cause_rx = cause_rx;
+ return rx_done;
+}
+
+static void mvpp22_mode_reconfigure(struct mvpp2_port *port,
+ phy_interface_t interface)
+{
+ u32 ctrl3;
+
+ /* Set the GMAC & XLG MAC in reset */
+ mvpp2_mac_reset_assert(port);
+
+ /* Set the MPCS and XPCS in reset */
+ mvpp22_pcs_reset_assert(port);
+
+ /* comphy reconfiguration */
+ mvpp22_comphy_init(port, interface);
+
+ /* gop reconfiguration */
+ mvpp22_gop_init(port, interface);
+
+ mvpp22_pcs_reset_deassert(port, interface);
+
+ if (mvpp2_port_supports_xlg(port)) {
+ ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
+ ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
+
+ if (mvpp2_is_xlg(interface))
+ ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
+ else
+ ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+
+ writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
+ }
+
+ if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface))
+ mvpp2_xlg_max_rx_size_set(port);
+ else
+ mvpp2_gmac_max_rx_size_set(port);
+}
+
+/* Set hw internals when starting port */
+static void mvpp2_start_dev(struct mvpp2_port *port)
+{
+ int i;
+
+ mvpp2_txp_max_tx_size_set(port);
+
+ for (i = 0; i < port->nqvecs; i++)
+ napi_enable(&port->qvecs[i].napi);
+
+ /* Enable interrupts on all threads */
+ mvpp2_interrupts_enable(port);
+
+ if (port->priv->hw_version >= MVPP22)
+ mvpp22_mode_reconfigure(port, port->phy_interface);
+
+ if (port->phylink) {
+ phylink_start(port->phylink);
+ } else {
+ mvpp2_acpi_start(port);
+ }
+
+ netif_tx_start_all_queues(port->dev);
+
+ clear_bit(0, &port->state);
+}
+
+/* Set hw internals when stopping port */
+static void mvpp2_stop_dev(struct mvpp2_port *port)
+{
+ int i;
+
+ set_bit(0, &port->state);
+
+ /* Disable interrupts on all threads */
+ mvpp2_interrupts_disable(port);
+
+ for (i = 0; i < port->nqvecs; i++)
+ napi_disable(&port->qvecs[i].napi);
+
+ if (port->phylink)
+ phylink_stop(port->phylink);
+ phy_power_off(port->comphy);
+}
+
+static int mvpp2_check_ringparam_valid(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ u16 new_rx_pending = ring->rx_pending;
+ u16 new_tx_pending = ring->tx_pending;
+
+ if (ring->rx_pending == 0 || ring->tx_pending == 0)
+ return -EINVAL;
+
+ if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
+ new_rx_pending = MVPP2_MAX_RXD_MAX;
+ else if (ring->rx_pending < MSS_THRESHOLD_START)
+ new_rx_pending = MSS_THRESHOLD_START;
+ else if (!IS_ALIGNED(ring->rx_pending, 16))
+ new_rx_pending = ALIGN(ring->rx_pending, 16);
+
+ if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
+ new_tx_pending = MVPP2_MAX_TXD_MAX;
+ else if (!IS_ALIGNED(ring->tx_pending, 32))
+ new_tx_pending = ALIGN(ring->tx_pending, 32);
+
+ /* The Tx ring size cannot be smaller than the minimum number of
+ * descriptors needed for TSO.
+ */
+ if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
+ new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
+
+ if (ring->rx_pending != new_rx_pending) {
+ netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
+ ring->rx_pending, new_rx_pending);
+ ring->rx_pending = new_rx_pending;
+ }
+
+ if (ring->tx_pending != new_tx_pending) {
+ netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
+ ring->tx_pending, new_tx_pending);
+ ring->tx_pending = new_tx_pending;
+ }
+
+ return 0;
+}
+
+static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
+{
+ u32 mac_addr_l, mac_addr_m, mac_addr_h;
+
+ mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+ mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
+ mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
+ addr[0] = (mac_addr_h >> 24) & 0xFF;
+ addr[1] = (mac_addr_h >> 16) & 0xFF;
+ addr[2] = (mac_addr_h >> 8) & 0xFF;
+ addr[3] = mac_addr_h & 0xFF;
+ addr[4] = mac_addr_m & 0xFF;
+ addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
+}
+
+static int mvpp2_irqs_init(struct mvpp2_port *port)
+{
+ int err, i;
+
+ for (i = 0; i < port->nqvecs; i++) {
+ struct mvpp2_queue_vector *qv = port->qvecs + i;
+
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
+ qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!qv->mask) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
+ }
+
+ err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
+ if (err)
+ goto err;
+
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
+ unsigned int cpu;
+
+ for_each_present_cpu(cpu) {
+ if (mvpp2_cpu_to_thread(port->priv, cpu) ==
+ qv->sw_thread_id)
+ cpumask_set_cpu(cpu, qv->mask);
+ }
+
+ irq_set_affinity_hint(qv->irq, qv->mask);
+ }
+ }
+
+ return 0;
+err:
+ for (i = 0; i < port->nqvecs; i++) {
+ struct mvpp2_queue_vector *qv = port->qvecs + i;
+
+ irq_set_affinity_hint(qv->irq, NULL);
+ kfree(qv->mask);
+ qv->mask = NULL;
+ free_irq(qv->irq, qv);
+ }
+
+ return err;
+}
+
+static void mvpp2_irqs_deinit(struct mvpp2_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->nqvecs; i++) {
+ struct mvpp2_queue_vector *qv = port->qvecs + i;
+
+ irq_set_affinity_hint(qv->irq, NULL);
+ kfree(qv->mask);
+ qv->mask = NULL;
+ irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
+ free_irq(qv->irq, qv);
+ }
+}
+
+static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
+{
+ return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
+ !(port->flags & MVPP2_F_LOOPBACK);
+}
+
+static int mvpp2_open(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2 *priv = port->priv;
+ unsigned char mac_bcast[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ bool valid = false;
+ int err;
+
+ err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
+ if (err) {
+ netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
+ return err;
+ }
+ err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
+ if (err) {
+ netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
+ return err;
+ }
+ err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
+ if (err) {
+ netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
+ return err;
+ }
+ err = mvpp2_prs_def_flow(port);
+ if (err) {
+ netdev_err(dev, "mvpp2_prs_def_flow failed\n");
+ return err;
+ }
+
+ /* Allocate the Rx/Tx queues */
+ err = mvpp2_setup_rxqs(port);
+ if (err) {
+ netdev_err(port->dev, "cannot allocate Rx queues\n");
+ return err;
+ }
+
+ err = mvpp2_setup_txqs(port);
+ if (err) {
+ netdev_err(port->dev, "cannot allocate Tx queues\n");
+ goto err_cleanup_rxqs;
+ }
+
+ err = mvpp2_irqs_init(port);
+ if (err) {
+ netdev_err(port->dev, "cannot init IRQs\n");
+ goto err_cleanup_txqs;
+ }
+
+ if (port->phylink) {
+ err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
+ if (err) {
+ netdev_err(port->dev, "could not attach PHY (%d)\n",
+ err);
+ goto err_free_irq;
+ }
+
+ valid = true;
+ }
+
+ if (priv->hw_version >= MVPP22 && port->port_irq) {
+ err = request_irq(port->port_irq, mvpp2_port_isr, 0,
+ dev->name, port);
+ if (err) {
+ netdev_err(port->dev,
+ "cannot request port link/ptp IRQ %d\n",
+ port->port_irq);
+ goto err_free_irq;
+ }
+
+ mvpp22_gop_setup_irq(port);
+
+ /* In default link is down */
+ netif_carrier_off(port->dev);
+
+ valid = true;
+ } else {
+ port->port_irq = 0;
+ }
+
+ if (!valid) {
+ netdev_err(port->dev,
+ "invalid configuration: no dt or link IRQ");
+ err = -ENOENT;
+ goto err_free_irq;
+ }
+
+ /* Unmask interrupts on all CPUs */
+ on_each_cpu(mvpp2_interrupts_unmask, port, 1);
+ mvpp2_shared_interrupt_mask_unmask(port, false);
+
+ mvpp2_start_dev(port);
+
+ /* Start hardware statistics gathering */
+ queue_delayed_work(priv->stats_queue, &port->stats_work,
+ MVPP2_MIB_COUNTERS_STATS_DELAY);
+
+ return 0;
+
+err_free_irq:
+ mvpp2_irqs_deinit(port);
+err_cleanup_txqs:
+ mvpp2_cleanup_txqs(port);
+err_cleanup_rxqs:
+ mvpp2_cleanup_rxqs(port);
+ return err;
+}
+
+static int mvpp2_stop(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu;
+ unsigned int thread;
+
+ mvpp2_stop_dev(port);
+
+ /* Mask interrupts on all threads */
+ on_each_cpu(mvpp2_interrupts_mask, port, 1);
+ mvpp2_shared_interrupt_mask_unmask(port, true);
+
+ if (port->phylink)
+ phylink_disconnect_phy(port->phylink);
+ if (port->port_irq)
+ free_irq(port->port_irq, port);
+
+ mvpp2_irqs_deinit(port);
+ if (!port->has_tx_irqs) {
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ port_pcpu = per_cpu_ptr(port->pcpu, thread);
+
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ port_pcpu->timer_scheduled = false;
+ }
+ }
+ mvpp2_cleanup_rxqs(port);
+ mvpp2_cleanup_txqs(port);
+
+ cancel_delayed_work_sync(&port->stats_work);
+
+ mvpp2_mac_reset_assert(port);
+ mvpp22_pcs_reset_assert(port);
+
+ return 0;
+}
+
+static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
+ struct netdev_hw_addr_list *list)
+{
+ struct netdev_hw_addr *ha;
+ int ret;
+
+ netdev_hw_addr_list_for_each(ha, list) {
+ ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
+{
+ if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ mvpp2_prs_vid_enable_filtering(port);
+ else
+ mvpp2_prs_vid_disable_filtering(port);
+
+ mvpp2_prs_mac_promisc_set(port->priv, port->id,
+ MVPP2_PRS_L2_UNI_CAST, enable);
+
+ mvpp2_prs_mac_promisc_set(port->priv, port->id,
+ MVPP2_PRS_L2_MULTI_CAST, enable);
+}
+
+static void mvpp2_set_rx_mode(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ /* Clear the whole UC and MC list */
+ mvpp2_prs_mac_del_all(port);
+
+ if (dev->flags & IFF_PROMISC) {
+ mvpp2_set_rx_promisc(port, true);
+ return;
+ }
+
+ mvpp2_set_rx_promisc(port, false);
+
+ if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
+ mvpp2_prs_mac_da_accept_list(port, &dev->uc))
+ mvpp2_prs_mac_promisc_set(port->priv, port->id,
+ MVPP2_PRS_L2_UNI_CAST, true);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ mvpp2_prs_mac_promisc_set(port->priv, port->id,
+ MVPP2_PRS_L2_MULTI_CAST, true);
+ return;
+ }
+
+ if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
+ mvpp2_prs_mac_da_accept_list(port, &dev->mc))
+ mvpp2_prs_mac_promisc_set(port->priv, port->id,
+ MVPP2_PRS_L2_MULTI_CAST, true);
+}
+
+static int mvpp2_set_mac_address(struct net_device *dev, void *p)
+{
+ const struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
+ if (err) {
+ /* Reconfigure parser accept the original MAC address */
+ mvpp2_prs_update_mac_da(dev, dev->dev_addr);
+ netdev_err(dev, "failed to change MAC address\n");
+ }
+ return err;
+}
+
+/* Shut down all the ports, reconfigure the pools as percpu or shared,
+ * then bring up again all ports.
+ */
+static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
+{
+ bool change_percpu = (percpu != priv->percpu_pools);
+ int numbufs = MVPP2_BM_POOLS_NUM, i;
+ struct mvpp2_port *port = NULL;
+ bool status[MVPP2_MAX_PORTS];
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ status[i] = netif_running(port->dev);
+ if (status[i])
+ mvpp2_stop(port->dev);
+ }
+
+ /* nrxqs is the same for all ports */
+ if (priv->percpu_pools)
+ numbufs = port->nrxqs * 2;
+
+ if (change_percpu)
+ mvpp2_bm_pool_update_priv_fc(priv, false);
+
+ for (i = 0; i < numbufs; i++)
+ mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
+
+ devm_kfree(port->dev->dev.parent, priv->bm_pools);
+ priv->percpu_pools = percpu;
+ mvpp2_bm_init(port->dev->dev.parent, priv);
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ if (percpu && port->ntxqs >= num_possible_cpus() * 2)
+ xdp_set_features_flag(port->dev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT);
+ else
+ xdp_clear_features_flag(port->dev);
+
+ mvpp2_swf_bm_pool_init(port);
+ if (status[i])
+ mvpp2_open(port->dev);
+ }
+
+ if (change_percpu)
+ mvpp2_bm_pool_update_priv_fc(priv, true);
+
+ return 0;
+}
+
+static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ bool running = netif_running(dev);
+ struct mvpp2 *priv = port->priv;
+ int err;
+
+ if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
+ netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
+ ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
+ mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
+ }
+
+ if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
+ netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
+ mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
+ return -EINVAL;
+ }
+
+ if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
+ if (priv->percpu_pools) {
+ netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
+ mvpp2_bm_switch_buffers(priv, false);
+ }
+ } else {
+ bool jumbo = false;
+ int i;
+
+ for (i = 0; i < priv->port_count; i++)
+ if (priv->port_list[i] != port &&
+ MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
+ MVPP2_BM_LONG_PKT_SIZE) {
+ jumbo = true;
+ break;
+ }
+
+ /* No port is using jumbo frames */
+ if (!jumbo) {
+ dev_info(port->dev->dev.parent,
+ "all ports have a low MTU, switching to per-cpu buffers");
+ mvpp2_bm_switch_buffers(priv, true);
+ }
+ }
+
+ if (running)
+ mvpp2_stop_dev(port);
+
+ err = mvpp2_bm_update_mtu(dev, mtu);
+ if (err) {
+ netdev_err(dev, "failed to change MTU\n");
+ /* Reconfigure BM to the original MTU */
+ mvpp2_bm_update_mtu(dev, dev->mtu);
+ } else {
+ port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+ }
+
+ if (running) {
+ mvpp2_start_dev(port);
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ }
+
+ return err;
+}
+
+static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
+{
+ enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
+ struct mvpp2 *priv = port->priv;
+ int err = -1, i;
+
+ if (!priv->percpu_pools)
+ return err;
+
+ if (!priv->page_pool[0])
+ return -ENOMEM;
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ if (port->xdp_prog) {
+ dma_dir = DMA_BIDIRECTIONAL;
+ break;
+ }
+ }
+
+ /* All pools are equal in terms of DMA direction */
+ if (priv->page_pool[0]->p.dma_dir != dma_dir)
+ err = mvpp2_bm_switch_buffers(priv, true);
+
+ return err;
+}
+
+static void
+mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ unsigned int start;
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct mvpp2_pcpu_stats *cpu_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+
+ cpu_stats = per_cpu_ptr(port->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
+ rx_packets = cpu_stats->rx_packets;
+ rx_bytes = cpu_stats->rx_bytes;
+ tx_packets = cpu_stats->tx_packets;
+ tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+}
+
+static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ void __iomem *ptp;
+ u32 gcr, int_mask;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.tx_type != HWTSTAMP_TX_OFF &&
+ config.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+
+ int_mask = gcr = 0;
+ if (config.tx_type != HWTSTAMP_TX_OFF) {
+ gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
+ int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
+ MVPP22_PTP_INT_MASK_QUEUE0;
+ }
+
+ /* It seems we must also release the TX reset when enabling the TSU */
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET;
+
+ if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
+ mvpp22_tai_start(port->priv->tai);
+
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ mvpp2_modify(ptp + MVPP22_PTP_GCR,
+ MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET |
+ MVPP22_PTP_GCR_TSU_ENABLE, gcr);
+ port->rx_hwtstamp = true;
+ } else {
+ port->rx_hwtstamp = false;
+ mvpp2_modify(ptp + MVPP22_PTP_GCR,
+ MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET |
+ MVPP22_PTP_GCR_TSU_ENABLE, gcr);
+ }
+
+ mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
+ MVPP22_PTP_INT_MASK_QUEUE1 |
+ MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
+
+ if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
+ mvpp22_tai_stop(port->priv->tai);
+
+ port->tx_hwtstamp_type = config.tx_type;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ config.tx_type = port->tx_hwtstamp_type;
+ config.rx_filter = port->rx_hwtstamp ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
+
+ info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ if (port->hwtstamp)
+ return mvpp2_set_ts_config(port, ifr);
+ break;
+
+ case SIOCGHWTSTAMP:
+ if (port->hwtstamp)
+ return mvpp2_get_ts_config(port, ifr);
+ break;
+ }
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_mii_ioctl(port->phylink, ifr, cmd);
+}
+
+static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = mvpp2_prs_vid_entry_add(port, vid);
+ if (ret)
+ netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
+ MVPP2_PRS_VLAN_FILT_MAX - 1);
+ return ret;
+}
+
+static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ mvpp2_prs_vid_entry_remove(port, vid);
+ return 0;
+}
+
+static int mvpp2_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = dev->features ^ features;
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ mvpp2_prs_vid_enable_filtering(port);
+ } else {
+ /* Invalidate all registered VID filters for this
+ * port
+ */
+ mvpp2_prs_vid_remove_all(port);
+
+ mvpp2_prs_vid_disable_filtering(port);
+ }
+ }
+
+ if (changed & NETIF_F_RXHASH) {
+ if (features & NETIF_F_RXHASH)
+ mvpp22_port_rss_enable(port);
+ else
+ mvpp22_port_rss_disable(port);
+ }
+
+ return 0;
+}
+
+static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog, *old_prog;
+ bool running = netif_running(port->dev);
+ bool reset = !prog != !port->xdp_prog;
+
+ if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ if (!port->priv->percpu_pools) {
+ NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->ntxqs < num_possible_cpus() * 2) {
+ NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
+ return -EOPNOTSUPP;
+ }
+
+ /* device is up and bpf is added/removed, must setup the RX queues */
+ if (running && reset)
+ mvpp2_stop(port->dev);
+
+ old_prog = xchg(&port->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ /* bpf is just replaced, RXQ and MTU are already setup */
+ if (!reset)
+ return 0;
+
+ /* device was up, restore the link */
+ if (running)
+ mvpp2_open(port->dev);
+
+ /* Check Page Pool DMA Direction */
+ mvpp2_check_pagepool_dma(port);
+
+ return 0;
+}
+
+static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mvpp2_xdp_setup(port, xdp);
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Ethtool methods */
+
+static int mvpp2_ethtool_nway_reset(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_nway_reset(port->phylink);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int
+mvpp2_ethtool_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int queue;
+
+ for (queue = 0; queue < port->nrxqs; queue++) {
+ struct mvpp2_rx_queue *rxq = port->rxqs[queue];
+
+ rxq->time_coal = c->rx_coalesce_usecs;
+ rxq->pkts_coal = c->rx_max_coalesced_frames;
+ mvpp2_rx_pkts_coal_set(port, rxq);
+ mvpp2_rx_time_coal_set(port, rxq);
+ }
+
+ if (port->has_tx_irqs) {
+ port->tx_time_coal = c->tx_coalesce_usecs;
+ mvpp2_tx_time_coal_set(port);
+ }
+
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+ txq->done_pkts_coal = c->tx_max_coalesced_frames;
+
+ if (port->has_tx_irqs)
+ mvpp2_tx_pkts_coal_set(port, txq);
+ }
+
+ return 0;
+}
+
+/* get coalescing for ethtools */
+static int
+mvpp2_ethtool_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
+ c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
+ c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
+ c->tx_coalesce_usecs = port->tx_time_coal;
+ return 0;
+}
+
+static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+ sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+static void
+mvpp2_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
+ ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
+ ring->rx_pending = port->rx_ring_size;
+ ring->tx_pending = port->tx_ring_size;
+}
+
+static int
+mvpp2_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ u16 prev_rx_ring_size = port->rx_ring_size;
+ u16 prev_tx_ring_size = port->tx_ring_size;
+ int err;
+
+ err = mvpp2_check_ringparam_valid(dev, ring);
+ if (err)
+ return err;
+
+ if (!netif_running(dev)) {
+ port->rx_ring_size = ring->rx_pending;
+ port->tx_ring_size = ring->tx_pending;
+ return 0;
+ }
+
+ /* The interface is running, so we have to force a
+ * reallocation of the queues
+ */
+ mvpp2_stop_dev(port);
+ mvpp2_cleanup_rxqs(port);
+ mvpp2_cleanup_txqs(port);
+
+ port->rx_ring_size = ring->rx_pending;
+ port->tx_ring_size = ring->tx_pending;
+
+ err = mvpp2_setup_rxqs(port);
+ if (err) {
+ /* Reallocate Rx queues with the original ring size */
+ port->rx_ring_size = prev_rx_ring_size;
+ ring->rx_pending = prev_rx_ring_size;
+ err = mvpp2_setup_rxqs(port);
+ if (err)
+ goto err_out;
+ }
+ err = mvpp2_setup_txqs(port);
+ if (err) {
+ /* Reallocate Tx queues with the original ring size */
+ port->tx_ring_size = prev_tx_ring_size;
+ ring->tx_pending = prev_tx_ring_size;
+ err = mvpp2_setup_txqs(port);
+ if (err)
+ goto err_clean_rxqs;
+ }
+
+ mvpp2_start_dev(port);
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+
+ return 0;
+
+err_clean_rxqs:
+ mvpp2_cleanup_rxqs(port);
+err_out:
+ netdev_err(dev, "failed to change ring parameters");
+ return err;
+}
+
+static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return;
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
+static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0, i, loc = 0;
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = mvpp2_ethtool_rxfh_get(port, info);
+ break;
+ case ETHTOOL_GRXRINGS:
+ info->data = port->nrxqs;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ info->rule_cnt = port->n_rfs_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = mvpp2_ethtool_cls_rule_get(port, info);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+ if (loc == info->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
+
+ if (port->rfs_rules[i])
+ rules[loc++] = i;
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return ret;
+}
+
+static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = mvpp2_ethtool_rxfh_set(port, info);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = mvpp2_ethtool_cls_rule_ins(port, info);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = mvpp2_ethtool_cls_rule_del(port, info);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
+}
+
+static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ if (indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_CRC32;
+
+ return ret;
+}
+
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ return -EOPNOTSUPP;
+
+ if (key)
+ return -EOPNOTSUPP;
+
+ if (indir)
+ ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
+
+ return ret;
+}
+
+static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *key, u8 *hfunc, u32 rss_context)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+ if (rss_context >= MVPP22_N_RSS_TABLES)
+ return -EINVAL;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_CRC32;
+
+ if (indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
+
+ return ret;
+}
+
+static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
+ const u32 *indir, const u8 *key,
+ const u8 hfunc, u32 *rss_context,
+ bool delete)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret;
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ return -EOPNOTSUPP;
+
+ if (key)
+ return -EOPNOTSUPP;
+
+ if (delete)
+ return mvpp22_port_rss_ctx_delete(port, *rss_context);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = mvpp22_port_rss_ctx_create(port, rss_context);
+ if (ret)
+ return ret;
+ }
+
+ return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
+}
+/* Device ops */
+
+static const struct net_device_ops mvpp2_netdev_ops = {
+ .ndo_open = mvpp2_open,
+ .ndo_stop = mvpp2_stop,
+ .ndo_start_xmit = mvpp2_tx,
+ .ndo_set_rx_mode = mvpp2_set_rx_mode,
+ .ndo_set_mac_address = mvpp2_set_mac_address,
+ .ndo_change_mtu = mvpp2_change_mtu,
+ .ndo_get_stats64 = mvpp2_get_stats64,
+ .ndo_eth_ioctl = mvpp2_ioctl,
+ .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
+ .ndo_set_features = mvpp2_set_features,
+ .ndo_bpf = mvpp2_xdp,
+ .ndo_xdp_xmit = mvpp2_xdp_xmit,
+};
+
+static const struct ethtool_ops mvpp2_eth_tool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .nway_reset = mvpp2_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = mvpp2_ethtool_get_ts_info,
+ .set_coalesce = mvpp2_ethtool_set_coalesce,
+ .get_coalesce = mvpp2_ethtool_get_coalesce,
+ .get_drvinfo = mvpp2_ethtool_get_drvinfo,
+ .get_ringparam = mvpp2_ethtool_get_ringparam,
+ .set_ringparam = mvpp2_ethtool_set_ringparam,
+ .get_strings = mvpp2_ethtool_get_strings,
+ .get_ethtool_stats = mvpp2_ethtool_get_stats,
+ .get_sset_count = mvpp2_ethtool_get_sset_count,
+ .get_pauseparam = mvpp2_ethtool_get_pause_param,
+ .set_pauseparam = mvpp2_ethtool_set_pause_param,
+ .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
+ .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
+ .get_rxnfc = mvpp2_ethtool_get_rxnfc,
+ .set_rxnfc = mvpp2_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
+ .get_rxfh = mvpp2_ethtool_get_rxfh,
+ .set_rxfh = mvpp2_ethtool_set_rxfh,
+ .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
+ .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
+};
+
+/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
+ * had a single IRQ defined per-port.
+ */
+static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
+ struct device_node *port_node)
+{
+ struct mvpp2_queue_vector *v = &port->qvecs[0];
+
+ v->first_rxq = 0;
+ v->nrxqs = port->nrxqs;
+ v->type = MVPP2_QUEUE_VECTOR_SHARED;
+ v->sw_thread_id = 0;
+ v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
+ v->port = port;
+ v->irq = irq_of_parse_and_map(port_node, 0);
+ if (v->irq <= 0)
+ return -EINVAL;
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
+
+ port->nqvecs = 1;
+
+ return 0;
+}
+
+static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
+ struct device_node *port_node)
+{
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_queue_vector *v;
+ int i, ret;
+
+ switch (queue_mode) {
+ case MVPP2_QDIST_SINGLE_MODE:
+ port->nqvecs = priv->nthreads + 1;
+ break;
+ case MVPP2_QDIST_MULTI_MODE:
+ port->nqvecs = priv->nthreads;
+ break;
+ }
+
+ for (i = 0; i < port->nqvecs; i++) {
+ char irqname[16];
+
+ v = port->qvecs + i;
+
+ v->port = port;
+ v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
+ v->sw_thread_id = i;
+ v->sw_thread_mask = BIT(i);
+
+ if (port->flags & MVPP2_F_DT_COMPAT)
+ snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
+ else
+ snprintf(irqname, sizeof(irqname), "hif%d", i);
+
+ if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
+ v->first_rxq = i;
+ v->nrxqs = 1;
+ } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
+ i == (port->nqvecs - 1)) {
+ v->first_rxq = 0;
+ v->nrxqs = port->nrxqs;
+ v->type = MVPP2_QUEUE_VECTOR_SHARED;
+
+ if (port->flags & MVPP2_F_DT_COMPAT)
+ strncpy(irqname, "rx-shared", sizeof(irqname));
+ }
+
+ if (port_node)
+ v->irq = of_irq_get_byname(port_node, irqname);
+ else
+ v->irq = fwnode_irq_get(port->fwnode, i);
+ if (v->irq <= 0) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < port->nqvecs; i++)
+ irq_dispose_mapping(port->qvecs[i].irq);
+ return ret;
+}
+
+static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
+ struct device_node *port_node)
+{
+ if (port->has_tx_irqs)
+ return mvpp2_multi_queue_vectors_init(port, port_node);
+ else
+ return mvpp2_simple_queue_vectors_init(port, port_node);
+}
+
+static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->nqvecs; i++)
+ irq_dispose_mapping(port->qvecs[i].irq);
+}
+
+/* Configure Rx queue group interrupt for this port */
+static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+ int i;
+
+ if (priv->hw_version == MVPP21) {
+ mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
+ port->nrxqs);
+ return;
+ }
+
+ /* Handle the more complicated PPv2.2 and PPv2.3 case */
+ for (i = 0; i < port->nqvecs; i++) {
+ struct mvpp2_queue_vector *qv = port->qvecs + i;
+
+ if (!qv->nrxqs)
+ continue;
+
+ val = qv->sw_thread_id;
+ val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
+ mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
+
+ val = qv->first_rxq;
+ val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
+ mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+ }
+}
+
+/* Initialize port HW */
+static int mvpp2_port_init(struct mvpp2_port *port)
+{
+ struct device *dev = port->dev->dev.parent;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ unsigned int thread;
+ int queue, err, val;
+
+ /* Checks for hardware constraints */
+ if (port->first_rxq + port->nrxqs >
+ MVPP2_MAX_PORTS * priv->max_port_rxqs)
+ return -EINVAL;
+
+ if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
+ return -EINVAL;
+
+ /* Disable port */
+ mvpp2_egress_disable(port);
+ mvpp2_port_disable(port);
+
+ if (mvpp2_is_xlg(port->phy_interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
+ port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
+
+ port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
+ GFP_KERNEL);
+ if (!port->txqs)
+ return -ENOMEM;
+
+ /* Associate physical Tx queues to this port and initialize.
+ * The mapping is predefined.
+ */
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ int queue_phy_id = mvpp2_txq_phys(port->id, queue);
+ struct mvpp2_tx_queue *txq;
+
+ txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
+ if (!txq) {
+ err = -ENOMEM;
+ goto err_free_percpu;
+ }
+
+ txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
+ if (!txq->pcpu) {
+ err = -ENOMEM;
+ goto err_free_percpu;
+ }
+
+ txq->id = queue_phy_id;
+ txq->log_id = queue;
+ txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
+ for (thread = 0; thread < priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ txq_pcpu->thread = thread;
+ }
+
+ port->txqs[queue] = txq;
+ }
+
+ port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
+ GFP_KERNEL);
+ if (!port->rxqs) {
+ err = -ENOMEM;
+ goto err_free_percpu;
+ }
+
+ /* Allocate and initialize Rx queue for this port */
+ for (queue = 0; queue < port->nrxqs; queue++) {
+ struct mvpp2_rx_queue *rxq;
+
+ /* Map physical Rx queue to port's logical Rx queue */
+ rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
+ if (!rxq) {
+ err = -ENOMEM;
+ goto err_free_percpu;
+ }
+ /* Map this Rx queue to a physical queue */
+ rxq->id = port->first_rxq + queue;
+ rxq->port = port->id;
+ rxq->logic_rxq = queue;
+
+ port->rxqs[queue] = rxq;
+ }
+
+ mvpp2_rx_irqs_setup(port);
+
+ /* Create Rx descriptor rings */
+ for (queue = 0; queue < port->nrxqs; queue++) {
+ struct mvpp2_rx_queue *rxq = port->rxqs[queue];
+
+ rxq->size = port->rx_ring_size;
+ rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
+ rxq->time_coal = MVPP2_RX_COAL_USEC;
+ }
+
+ mvpp2_ingress_disable(port);
+
+ /* Port default configuration */
+ mvpp2_defaults_set(port);
+
+ /* Port's classifier configuration */
+ mvpp2_cls_oversize_rxq_set(port);
+ mvpp2_cls_port_config(port);
+
+ if (mvpp22_rss_is_supported(port))
+ mvpp22_port_rss_init(port);
+
+ /* Provide an initial Rx packet size */
+ port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
+
+ /* Initialize pools for swf */
+ err = mvpp2_swf_bm_pool_init(port);
+ if (err)
+ goto err_free_percpu;
+
+ /* Clear all port stats */
+ mvpp2_read_stats(port);
+ memset(port->ethtool_stats, 0,
+ MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
+
+ return 0;
+
+err_free_percpu:
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ if (!port->txqs[queue])
+ continue;
+ free_percpu(port->txqs[queue]->pcpu);
+ }
+ return err;
+}
+
+static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
+ unsigned long *flags)
+{
+ char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
+ "tx-cpu3" };
+ int i;
+
+ for (i = 0; i < 5; i++)
+ if (of_property_match_string(port_node, "interrupt-names",
+ irqs[i]) < 0)
+ return false;
+
+ *flags |= MVPP2_F_DT_COMPAT;
+ return true;
+}
+
+/* Checks if the port dt description has the required Tx interrupts:
+ * - PPv2.1: there are no such interrupts.
+ * - PPv2.2 and PPv2.3:
+ * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
+ * - The new ones have: "hifX" with X in [0..8]
+ *
+ * All those variants are supported to keep the backward compatibility.
+ */
+static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
+ struct device_node *port_node,
+ unsigned long *flags)
+{
+ char name[5];
+ int i;
+
+ /* ACPI */
+ if (!port_node)
+ return true;
+
+ if (priv->hw_version == MVPP21)
+ return false;
+
+ if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
+ return true;
+
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
+ snprintf(name, 5, "hif%d", i);
+ if (of_property_match_string(port_node, "interrupt-names",
+ name) < 0)
+ return false;
+ }
+
+ return true;
+}
+
+static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
+ struct fwnode_handle *fwnode,
+ char **mac_from)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ char hw_mac_addr[ETH_ALEN] = {0};
+ char fw_mac_addr[ETH_ALEN];
+ int ret;
+
+ if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
+ *mac_from = "firmware node";
+ eth_hw_addr_set(dev, fw_mac_addr);
+ return 0;
+ }
+
+ if (priv->hw_version == MVPP21) {
+ mvpp21_get_mac_address(port, hw_mac_addr);
+ if (is_valid_ether_addr(hw_mac_addr)) {
+ *mac_from = "hardware";
+ eth_hw_addr_set(dev, hw_mac_addr);
+ return 0;
+ }
+ }
+
+ /* Only valid on OF enabled platforms */
+ ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ if (!ret) {
+ *mac_from = "nvmem cell";
+ eth_hw_addr_set(dev, fw_mac_addr);
+ return 0;
+ }
+
+ *mac_from = "random";
+ eth_hw_addr_random(dev);
+
+ return 0;
+}
+
+static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
+{
+ return container_of(config, struct mvpp2_port, phylink_config);
+}
+
+static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mvpp2_port, pcs_xlg);
+}
+
+static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mvpp2_port, pcs_gmac);
+}
+
+static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
+ u32 val;
+
+ if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER)
+ state->speed = SPEED_5000;
+ else
+ state->speed = SPEED_10000;
+ state->duplex = 1;
+ state->an_complete = 1;
+
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
+
+ state->pause = 0;
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_TX;
+ if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_RX;
+}
+
+static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
+ .pcs_get_state = mvpp2_xlg_pcs_get_state,
+ .pcs_config = mvpp2_xlg_pcs_config,
+};
+
+static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* When in 802.3z mode, we must have AN enabled:
+ * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ */
+ if (phy_interface_mode_is_8023z(state->interface) &&
+ !phylink_test(state->advertising, Autoneg))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+
+ state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
+ state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
+ state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
+
+ switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ state->speed = SPEED_1000;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ state->speed = SPEED_2500;
+ break;
+ default:
+ if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
+ state->speed = SPEED_1000;
+ else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+ }
+
+ state->pause = 0;
+ if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
+ state->pause |= MLO_PAUSE_RX;
+ if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
+ u32 mask, val, an, old_an, changed;
+
+ mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
+ MVPP2_GMAC_IN_BAND_AUTONEG |
+ MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_FLOW_CTRL_AUTONEG |
+ MVPP2_GMAC_AN_DUPLEX_EN;
+
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+ mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ val = MVPP2_GMAC_IN_BAND_AUTONEG;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the speed and duplex from PHY */
+ val |= MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z mode has fixed speed and duplex */
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+ /* The FLOW_CTRL_AUTONEG bit selects either the hardware
+ * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
+ * manually controls the GMAC pause modes.
+ */
+ if (permit_pause_to_mac)
+ val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
+
+ /* Configure advertisement bits */
+ mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
+ if (phylink_test(advertising, Pause))
+ val |= MVPP2_GMAC_FC_ADV_EN;
+ if (phylink_test(advertising, Asym_Pause))
+ val |= MVPP2_GMAC_FC_ADV_ASM_EN;
+ }
+ } else {
+ val = 0;
+ }
+
+ old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ an = (an & ~mask) | val;
+ changed = an ^ old_an;
+ if (changed)
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ /* We are only interested in the advertisement bits changing */
+ return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
+}
+
+static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
+ u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
+ .pcs_validate = mvpp2_gmac_pcs_validate,
+ .pcs_get_state = mvpp2_gmac_pcs_get_state,
+ .pcs_config = mvpp2_gmac_pcs_config,
+ .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
+};
+
+static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ u32 val;
+
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS,
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS);
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
+ MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
+ MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
+ MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
+ MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
+
+ /* Wait for reset to deassert */
+ do {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
+}
+
+static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ u32 old_ctrl0, ctrl0;
+ u32 old_ctrl2, ctrl2;
+ u32 old_ctrl4, ctrl4;
+
+ old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+ old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+
+ ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
+ ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK);
+
+ /* Configure port type */
+ if (phy_interface_mode_is_8023z(state->interface)) {
+ ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
+ ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+ ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
+ MVPP22_CTRL4_DP_CLK_SEL |
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
+ ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+ ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
+ MVPP22_CTRL4_DP_CLK_SEL |
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ } else if (phy_interface_mode_is_rgmii(state->interface)) {
+ ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
+ ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
+ MVPP22_CTRL4_SYNC_BYPASS_DIS |
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ }
+
+ /* Configure negotiation style */
+ if (!phylink_autoneg_inband(mode)) {
+ /* Phy or fixed speed - no in-band AN, nothing to do, leave the
+ * configured speed, duplex and flow control as-is.
+ */
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII in-band mode receives the speed and duplex from
+ * the PHY. Flow control information is not received. */
+ } else if (phy_interface_mode_is_8023z(state->interface)) {
+ /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
+ * they negotiate duplex: they are always operating with a fixed
+ * speed of 1000/2500Mbps in full duplex, so force 1000/2500
+ * speed and full duplex here.
+ */
+ ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
+ }
+
+ if (old_ctrl0 != ctrl0)
+ writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
+ if (old_ctrl2 != ctrl2)
+ writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+ if (old_ctrl4 != ctrl4)
+ writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
+}
+
+static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ /* Select the appropriate PCS operations depending on the
+ * configured interface mode. We will only switch to a mode
+ * that the validate() checks have already passed.
+ */
+ if (mvpp2_is_xlg(interface))
+ return &port->pcs_xlg;
+ else
+ return &port->pcs_gmac;
+}
+
+static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ /* Check for invalid configuration */
+ if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
+ netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
+ return -EINVAL;
+ }
+
+ if (port->phy_interface != interface ||
+ phylink_autoneg_inband(mode)) {
+ /* Force the link down when changing the interface or if in
+ * in-band mode to ensure we do not change the configuration
+ * while the hardware is indicating link is up. We force both
+ * XLG and GMAC down to ensure that they're both in a known
+ * state.
+ */
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_FORCE_LINK_DOWN,
+ MVPP2_GMAC_FORCE_LINK_DOWN);
+
+ if (mvpp2_port_supports_xlg(port))
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
+ }
+
+ /* Make sure the port is disabled when reconfiguring the mode */
+ mvpp2_port_disable(port);
+
+ if (port->phy_interface != interface) {
+ /* Place GMAC into reset */
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
+ MVPP2_GMAC_PORT_RESET_MASK,
+ MVPP2_GMAC_PORT_RESET_MASK);
+
+ if (port->priv->hw_version >= MVPP22) {
+ mvpp22_gop_mask_irq(port);
+
+ phy_power_off(port->comphy);
+
+ /* Reconfigure the serdes lanes */
+ mvpp22_mode_reconfigure(port, interface);
+ }
+ }
+
+ return 0;
+}
+
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ /* mac (re)configuration */
+ if (mvpp2_is_xlg(state->interface))
+ mvpp2_xlg_config(port, mode, state);
+ else if (phy_interface_mode_is_rgmii(state->interface) ||
+ phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII)
+ mvpp2_gmac_config(port, mode, state);
+
+ if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
+ mvpp2_port_loopback_set(port, state);
+}
+
+static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ if (port->priv->hw_version >= MVPP22 &&
+ port->phy_interface != interface) {
+ port->phy_interface = interface;
+
+ /* Unmask interrupts */
+ mvpp22_gop_unmask_irq(port);
+ }
+
+ if (!mvpp2_is_xlg(interface)) {
+ /* Release GMAC reset and wait */
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
+ MVPP2_GMAC_PORT_RESET_MASK, 0);
+
+ while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+ MVPP2_GMAC_PORT_RESET_MASK)
+ continue;
+ }
+
+ mvpp2_port_enable(port);
+
+ /* Allow the link to come up if in in-band mode, otherwise the
+ * link is forced via mac_link_down()/mac_link_up()
+ */
+ if (phylink_autoneg_inband(mode)) {
+ if (mvpp2_is_xlg(interface))
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
+ else
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_FORCE_LINK_DOWN, 0);
+ }
+
+ return 0;
+}
+
+static void mvpp2_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ u32 val;
+ int i;
+
+ if (mvpp2_is_xlg(interface)) {
+ if (!phylink_autoneg_inband(mode)) {
+ val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ if (tx_pause)
+ val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
+ if (rx_pause)
+ val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
+ MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
+ }
+ } else {
+ if (!phylink_autoneg_inband(mode)) {
+ val = MVPP2_GMAC_FORCE_LINK_PASS;
+
+ if (speed == SPEED_1000 || speed == SPEED_2500)
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+ else if (speed == SPEED_100)
+ val |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+ if (duplex == DUPLEX_FULL)
+ val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_DOWN |
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
+ }
+
+ /* We can always update the flow control enable bits;
+ * these will only be effective if flow control AN
+ * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
+ */
+ val = 0;
+ if (tx_pause)
+ val |= MVPP22_CTRL4_TX_FC_EN;
+ if (rx_pause)
+ val |= MVPP22_CTRL4_RX_FC_EN;
+
+ mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
+ MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
+ val);
+ }
+
+ if (port->priv->global_tx_fc) {
+ port->tx_fc = tx_pause;
+ if (tx_pause)
+ mvpp2_rxq_enable_fc(port);
+ else
+ mvpp2_rxq_disable_fc(port);
+ if (port->priv->percpu_pools) {
+ for (i = 0; i < port->nrxqs; i++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause);
+ }
+ if (port->priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause);
+ }
+
+ mvpp2_port_enable(port);
+
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ netif_tx_wake_all_queues(port->dev);
+}
+
+static void mvpp2_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ u32 val;
+
+ if (!phylink_autoneg_inband(mode)) {
+ if (mvpp2_is_xlg(interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+ }
+
+ netif_tx_stop_all_queues(port->dev);
+ mvpp2_egress_disable(port);
+ mvpp2_ingress_disable(port);
+
+ mvpp2_port_disable(port);
+}
+
+static const struct phylink_mac_ops mvpp2_phylink_ops = {
+ .mac_select_pcs = mvpp2_select_pcs,
+ .mac_prepare = mvpp2_mac_prepare,
+ .mac_config = mvpp2_mac_config,
+ .mac_finish = mvpp2_mac_finish,
+ .mac_link_up = mvpp2_mac_link_up,
+ .mac_link_down = mvpp2_mac_link_down,
+};
+
+/* Work-around for ACPI */
+static void mvpp2_acpi_start(struct mvpp2_port *port)
+{
+ /* Phylink isn't used as of now for ACPI, so the MAC has to be
+ * configured manually when the interface is started. This will
+ * be removed as soon as the phylink ACPI support lands in.
+ */
+ struct phylink_link_state state = {
+ .interface = port->phy_interface,
+ };
+ struct phylink_pcs *pcs;
+
+ pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface);
+
+ mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
+ mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
+ pcs->ops->pcs_config(pcs, PHYLINK_PCS_NEG_INBAND_ENABLED,
+ port->phy_interface, state.advertising,
+ false);
+ mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
+ mvpp2_mac_link_up(&port->phylink_config, NULL,
+ MLO_AN_INBAND, port->phy_interface,
+ SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
+}
+
+/* In order to ensure backward compatibility for ACPI, check if the port
+ * firmware node comprises the necessary description allowing to use phylink.
+ */
+static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode)
+{
+ if (!is_acpi_node(port_fwnode))
+ return false;
+
+ return (!fwnode_property_present(port_fwnode, "phy-handle") &&
+ !fwnode_property_present(port_fwnode, "managed") &&
+ !fwnode_get_named_child_node(port_fwnode, "fixed-link"));
+}
+
+/* Ports initialization */
+static int mvpp2_port_probe(struct platform_device *pdev,
+ struct fwnode_handle *port_fwnode,
+ struct mvpp2 *priv)
+{
+ struct phy *comphy = NULL;
+ struct mvpp2_port *port;
+ struct mvpp2_port_pcpu *port_pcpu;
+ struct device_node *port_node = to_of_node(port_fwnode);
+ netdev_features_t features;
+ struct net_device *dev;
+ struct phylink *phylink;
+ char *mac_from = "";
+ unsigned int ntxqs, nrxqs, thread;
+ unsigned long flags = 0;
+ bool has_tx_irqs;
+ u32 id;
+ int phy_mode;
+ int err, i;
+
+ has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
+ if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
+ dev_err(&pdev->dev,
+ "not enough IRQs to support multi queue mode\n");
+ return -EINVAL;
+ }
+
+ ntxqs = MVPP2_MAX_TXQ;
+ nrxqs = mvpp2_get_nrxqs(priv);
+
+ dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
+ if (!dev)
+ return -ENOMEM;
+
+ phy_mode = fwnode_get_phy_mode(port_fwnode);
+ if (phy_mode < 0) {
+ dev_err(&pdev->dev, "incorrect phy mode\n");
+ err = phy_mode;
+ goto err_free_netdev;
+ }
+
+ /*
+ * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
+ * Existing usage of 10GBASE-KR is not correct; no backplane
+ * negotiation is done, and this driver does not actually support
+ * 10GBASE-KR.
+ */
+ if (phy_mode == PHY_INTERFACE_MODE_10GKR)
+ phy_mode = PHY_INTERFACE_MODE_10GBASER;
+
+ if (port_node) {
+ comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
+ if (IS_ERR(comphy)) {
+ if (PTR_ERR(comphy) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_free_netdev;
+ }
+ comphy = NULL;
+ }
+ }
+
+ if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "missing port-id value\n");
+ goto err_free_netdev;
+ }
+
+ dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &mvpp2_netdev_ops;
+ dev->ethtool_ops = &mvpp2_eth_tool_ops;
+
+ port = netdev_priv(dev);
+ port->dev = dev;
+ port->fwnode = port_fwnode;
+ port->ntxqs = ntxqs;
+ port->nrxqs = nrxqs;
+ port->priv = priv;
+ port->has_tx_irqs = has_tx_irqs;
+ port->flags = flags;
+
+ err = mvpp2_queue_vectors_init(port, port_node);
+ if (err)
+ goto err_free_netdev;
+
+ if (port_node)
+ port->port_irq = of_irq_get_byname(port_node, "link");
+ else
+ port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
+ if (port->port_irq == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_deinit_qvecs;
+ }
+ if (port->port_irq <= 0)
+ /* the link irq is optional */
+ port->port_irq = 0;
+
+ if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
+ port->flags |= MVPP2_F_LOOPBACK;
+
+ port->id = id;
+ if (priv->hw_version == MVPP21)
+ port->first_rxq = port->id * port->nrxqs;
+ else
+ port->first_rxq = port->id * priv->max_port_rxqs;
+
+ port->of_node = port_node;
+ port->phy_interface = phy_mode;
+ port->comphy = comphy;
+
+ if (priv->hw_version == MVPP21) {
+ port->base = devm_platform_ioremap_resource(pdev, 2 + id);
+ if (IS_ERR(port->base)) {
+ err = PTR_ERR(port->base);
+ goto err_free_irq;
+ }
+
+ port->stats_base = port->priv->lms_base +
+ MVPP21_MIB_COUNTERS_OFFSET +
+ port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
+ } else {
+ if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
+ &port->gop_id)) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "missing gop-port-id value\n");
+ goto err_deinit_qvecs;
+ }
+
+ port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
+ port->stats_base = port->priv->iface_base +
+ MVPP22_MIB_COUNTERS_OFFSET +
+ port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
+
+ /* We may want a property to describe whether we should use
+ * MAC hardware timestamping.
+ */
+ if (priv->tai)
+ port->hwtstamp = true;
+ }
+
+ /* Alloc per-cpu and ethtool stats */
+ port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
+ if (!port->stats) {
+ err = -ENOMEM;
+ goto err_free_irq;
+ }
+
+ port->ethtool_stats = devm_kcalloc(&pdev->dev,
+ MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
+ sizeof(u64), GFP_KERNEL);
+ if (!port->ethtool_stats) {
+ err = -ENOMEM;
+ goto err_free_stats;
+ }
+
+ mutex_init(&port->gather_stats_lock);
+ INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
+
+ err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
+ if (err < 0)
+ goto err_free_stats;
+
+ port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
+ port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = mvpp2_port_init(port);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init port %d\n", id);
+ goto err_free_stats;
+ }
+
+ mvpp2_port_periodic_xon_disable(port);
+
+ mvpp2_mac_reset_assert(port);
+ mvpp22_pcs_reset_assert(port);
+
+ port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+ if (!port->pcpu) {
+ err = -ENOMEM;
+ goto err_free_txq_pcpu;
+ }
+
+ if (!port->has_tx_irqs) {
+ for (thread = 0; thread < priv->nthreads; thread++) {
+ port_pcpu = per_cpu_ptr(port->pcpu, thread);
+
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED_SOFT);
+ port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ port_pcpu->timer_scheduled = false;
+ port_pcpu->dev = dev;
+ }
+ }
+
+ features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO;
+ dev->features = features | NETIF_F_RXCSUM;
+ dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (mvpp22_rss_is_supported(port)) {
+ dev->hw_features |= NETIF_F_RXHASH;
+ dev->features |= NETIF_F_NTUPLE;
+ }
+
+ if (!port->priv->percpu_pools)
+ mvpp2_set_hw_csum(port, port->pool_long->id);
+ else if (port->ntxqs >= num_possible_cpus() * 2)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
+ dev->vlan_features |= features;
+ netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
+
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* MTU range: 68 - 9704 */
+ dev->min_mtu = ETH_MIN_MTU;
+ /* 9704 == 9728 - 20 and rounding to 8 */
+ dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+ dev->dev.of_node = port_node;
+
+ port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
+ port->pcs_gmac.neg_mode = true;
+ port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
+ port->pcs_xlg.neg_mode = true;
+
+ if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
+ port->phylink_config.dev = &dev->dev;
+ port->phylink_config.type = PHYLINK_NETDEV;
+ port->phylink_config.mac_capabilities =
+ MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
+
+ if (port->priv->global_tx_fc)
+ port->phylink_config.mac_capabilities |=
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+
+ if (mvpp2_port_supports_xlg(port)) {
+ /* If a COMPHY is present, we can support any of
+ * the serdes modes and switch between them.
+ */
+ if (comphy) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ }
+
+ if (comphy)
+ port->phylink_config.mac_capabilities |=
+ MAC_10000FD | MAC_5000FD;
+ else if (phy_mode == PHY_INTERFACE_MODE_5GBASER)
+ port->phylink_config.mac_capabilities |=
+ MAC_5000FD;
+ else
+ port->phylink_config.mac_capabilities |=
+ MAC_10000FD;
+ }
+
+ if (mvpp2_port_supports_rgmii(port))
+ phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
+
+ if (comphy) {
+ /* If a COMPHY is present, we can support any of the
+ * serdes modes and switch between them.
+ */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* No COMPHY, with only 2500BASE-X mode supported */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
+ phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ /* No COMPHY, we can switch between 1000BASE-X and SGMII
+ */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ }
+
+ phylink = phylink_create(&port->phylink_config, port_fwnode,
+ phy_mode, &mvpp2_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_free_port_pcpu;
+ }
+ port->phylink = phylink;
+ } else {
+ dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
+ port->phylink = NULL;
+ }
+
+ /* Cycle the comphy to power it down, saving 270mW per port -
+ * don't worry about an error powering it up. When the comphy
+ * driver does this, we can remove this code.
+ */
+ if (port->comphy) {
+ err = mvpp22_comphy_init(port, port->phy_interface);
+ if (err == 0)
+ phy_power_off(port->comphy);
+ }
+
+ err = register_netdev(dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register netdev\n");
+ goto err_phylink;
+ }
+ netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
+
+ priv->port_list[priv->port_count++] = port;
+
+ return 0;
+
+err_phylink:
+ if (port->phylink)
+ phylink_destroy(port->phylink);
+err_free_port_pcpu:
+ free_percpu(port->pcpu);
+err_free_txq_pcpu:
+ for (i = 0; i < port->ntxqs; i++)
+ free_percpu(port->txqs[i]->pcpu);
+err_free_stats:
+ free_percpu(port->stats);
+err_free_irq:
+ if (port->port_irq)
+ irq_dispose_mapping(port->port_irq);
+err_deinit_qvecs:
+ mvpp2_queue_vectors_deinit(port);
+err_free_netdev:
+ free_netdev(dev);
+ return err;
+}
+
+/* Ports removal routine */
+static void mvpp2_port_remove(struct mvpp2_port *port)
+{
+ int i;
+
+ unregister_netdev(port->dev);
+ if (port->phylink)
+ phylink_destroy(port->phylink);
+ free_percpu(port->pcpu);
+ free_percpu(port->stats);
+ for (i = 0; i < port->ntxqs; i++)
+ free_percpu(port->txqs[i]->pcpu);
+ mvpp2_queue_vectors_deinit(port);
+ if (port->port_irq)
+ irq_dispose_mapping(port->port_irq);
+ free_netdev(port->dev);
+}
+
+/* Initialize decoding windows */
+static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
+ struct mvpp2 *priv)
+{
+ u32 win_enable;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
+ mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
+
+ if (i < 4)
+ mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
+ }
+
+ win_enable = 0;
+
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ mvpp2_write(priv, MVPP2_WIN_BASE(i),
+ (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
+ dram->mbus_dram_target_id);
+
+ mvpp2_write(priv, MVPP2_WIN_SIZE(i),
+ (cs->size - 1) & 0xffff0000);
+
+ win_enable |= (1 << i);
+ }
+
+ mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
+}
+
+/* Initialize Rx FIFO's */
+static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
+{
+ int port;
+
+ for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
+ }
+
+ mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
+ MVPP2_RX_FIFO_PORT_MIN_PKT);
+ mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
+{
+ int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
+
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
+}
+
+/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
+ * 4kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 44kB space among all active ports.
+ * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
+ * SGMII link.
+ */
+static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
+{
+ int remaining_ports_count;
+ unsigned long port_map;
+ int size_remainder;
+ int port, size;
+
+ /* The loopback requires fixed 4kB of the FIFO space assignment. */
+ mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
+ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
+
+ /* Set RX FIFO size to 0 for inactive ports. */
+ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
+ mvpp22_rx_fifo_set_hw(priv, port, 0);
+
+ /* Assign remaining RX FIFO space among all active ports. */
+ size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
+ remaining_ports_count = hweight_long(port_map);
+
+ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ if (remaining_ports_count == 1)
+ size = size_remainder;
+ else if (port == 0)
+ size = max(size_remainder / remaining_ports_count,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
+ else if (port == 1)
+ size = max(size_remainder / remaining_ports_count,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
+ else
+ size = size_remainder / remaining_ports_count;
+
+ size_remainder -= size;
+ remaining_ports_count--;
+
+ mvpp22_rx_fifo_set_hw(priv, port, size);
+ }
+
+ mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
+ MVPP2_RX_FIFO_PORT_MIN_PKT);
+ mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+/* Configure Rx FIFO Flow control thresholds */
+static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
+{
+ int port, val;
+
+ /* Port 0: maximum speed -10Gb/s port
+ * required by spec RX FIFO threshold 9KB
+ * Port 1: maximum speed -5Gb/s port
+ * required by spec RX FIFO threshold 4KB
+ * Port 2: maximum speed -1Gb/s port
+ * required by spec RX FIFO threshold 2KB
+ */
+
+ /* Without loopback port */
+ for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
+ if (port == 0) {
+ val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ } else if (port == 1) {
+ val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ } else {
+ val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ }
+ }
+}
+
+/* Configure Rx FIFO Flow control thresholds */
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
+{
+ int val;
+
+ val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
+
+ if (en)
+ val |= MVPP2_RX_FC_EN;
+ else
+ val &= ~MVPP2_RX_FC_EN;
+
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+}
+
+static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
+{
+ int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
+
+ mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
+ mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
+}
+
+/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
+ * 1kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 18kB space among all active ports.
+ * The 10G interface should use 10kB (which is maximum possible size
+ * per single port).
+ */
+static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
+{
+ int remaining_ports_count;
+ unsigned long port_map;
+ int size_remainder;
+ int port, size;
+
+ /* The loopback requires fixed 1kB of the FIFO space assignment. */
+ mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
+ MVPP22_TX_FIFO_DATA_SIZE_1KB);
+ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
+
+ /* Set TX FIFO size to 0 for inactive ports. */
+ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
+ mvpp22_tx_fifo_set_hw(priv, port, 0);
+
+ /* Assign remaining TX FIFO space among all active ports. */
+ size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
+ remaining_ports_count = hweight_long(port_map);
+
+ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ if (remaining_ports_count == 1)
+ size = min(size_remainder,
+ MVPP22_TX_FIFO_DATA_SIZE_10KB);
+ else if (port == 0)
+ size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
+ else
+ size = size_remainder / remaining_ports_count;
+
+ size_remainder -= size;
+ remaining_ports_count--;
+
+ mvpp22_tx_fifo_set_hw(priv, port, size);
+ }
+}
+
+static void mvpp2_axi_init(struct mvpp2 *priv)
+{
+ u32 val, rdval, wrval;
+
+ mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
+
+ /* AXI Bridge Configuration */
+
+ rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
+ << MVPP22_AXI_ATTR_CACHE_OFFS;
+ rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+ wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
+ << MVPP22_AXI_ATTR_CACHE_OFFS;
+ wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+ /* BM */
+ mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
+ mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
+
+ /* Descriptors */
+ mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
+ mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
+
+ /* Buffer Data */
+ mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
+
+ val = MVPP22_AXI_CODE_CACHE_NON_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+ mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
+ mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
+
+ val = MVPP22_AXI_CODE_CACHE_RD_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+ mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
+
+ val = MVPP22_AXI_CODE_CACHE_WR_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+ mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
+}
+
+/* Initialize network controller common part HW */
+static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+ const struct mbus_dram_target_info *dram_target_info;
+ int err, i;
+ u32 val;
+
+ /* MBUS windows configuration */
+ dram_target_info = mv_mbus_dram_info();
+ if (dram_target_info)
+ mvpp2_conf_mbus_windows(dram_target_info, priv);
+
+ if (priv->hw_version >= MVPP22)
+ mvpp2_axi_init(priv);
+
+ /* Disable HW PHY polling */
+ if (priv->hw_version == MVPP21) {
+ val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
+ writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ } else {
+ val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+ val &= ~MVPP22_SMI_POLLING_EN;
+ writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+ }
+
+ /* Allocate and initialize aggregated TXQs */
+ priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
+ sizeof(*priv->aggr_txqs),
+ GFP_KERNEL);
+ if (!priv->aggr_txqs)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
+ priv->aggr_txqs[i].id = i;
+ priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
+ err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
+ if (err < 0)
+ return err;
+ }
+
+ /* Fifo Init */
+ if (priv->hw_version == MVPP21) {
+ mvpp2_rx_fifo_init(priv);
+ } else {
+ mvpp22_rx_fifo_init(priv);
+ mvpp22_tx_fifo_init(priv);
+ if (priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_set_tresh(priv);
+ }
+
+ if (priv->hw_version == MVPP21)
+ writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
+ priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
+
+ /* Allow cache snoop when transmiting packets */
+ mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
+
+ /* Buffer Manager initialization */
+ err = mvpp2_bm_init(&pdev->dev, priv);
+ if (err < 0)
+ return err;
+
+ /* Parser default initialization */
+ err = mvpp2_prs_default_init(pdev, priv);
+ if (err < 0)
+ return err;
+
+ /* Classifier default initialization */
+ mvpp2_cls_init(priv);
+
+ return 0;
+}
+
+static int mvpp2_get_sram(struct platform_device *pdev,
+ struct mvpp2 *priv)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ if (has_acpi_companion(&pdev->dev))
+ dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n");
+ else
+ dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n");
+ return 0;
+ }
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->cm3_base = base;
+ return 0;
+}
+
+static int mvpp2_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ struct fwnode_handle *port_fwnode;
+ struct mvpp2 *priv;
+ struct resource *res;
+ void __iomem *base;
+ int i, shared;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev);
+
+ /* multi queue mode isn't supported on PPV2.1, fallback to single
+ * mode
+ */
+ if (priv->hw_version == MVPP21)
+ queue_mode = MVPP2_QDIST_SINGLE_MODE;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (priv->hw_version == MVPP21) {
+ priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->lms_base))
+ return PTR_ERR(priv->lms_base);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
+ if (has_acpi_companion(&pdev->dev)) {
+ /* In case the MDIO memory region is declared in
+ * the ACPI, it can already appear as 'in-use'
+ * in the OS. Because it is overlapped by second
+ * region of the network controller, make
+ * sure it is released, before requesting it again.
+ * The care is taken by mvpp2 driver to avoid
+ * concurrent access to this memory region.
+ */
+ release_resource(res);
+ }
+ priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->iface_base))
+ return PTR_ERR(priv->iface_base);
+
+ /* Map CM3 SRAM */
+ err = mvpp2_get_sram(pdev, priv);
+ if (err)
+ dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
+
+ /* Enable global Flow Control only if handler to SRAM not NULL */
+ if (priv->cm3_base)
+ priv->global_tx_fc = true;
+ }
+
+ if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) {
+ priv->sysctrl_base =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "marvell,system-controller");
+ if (IS_ERR(priv->sysctrl_base))
+ /* The system controller regmap is optional for dt
+ * compatibility reasons. When not provided, the
+ * configuration of the GoP relies on the
+ * firmware/bootloader.
+ */
+ priv->sysctrl_base = NULL;
+ }
+
+ if (priv->hw_version >= MVPP22 &&
+ mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
+ priv->percpu_pools = 1;
+
+ mvpp2_setup_bm_pool();
+
+
+ priv->nthreads = min_t(unsigned int, num_present_cpus(),
+ MVPP2_MAX_THREADS);
+
+ shared = num_present_cpus() - priv->nthreads;
+ if (shared > 0)
+ bitmap_set(&priv->lock_map, 0,
+ min_t(int, shared, MVPP2_MAX_THREADS));
+
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
+ u32 addr_space_sz;
+
+ addr_space_sz = (priv->hw_version == MVPP21 ?
+ MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
+ priv->swth_base[i] = base + i * addr_space_sz;
+ }
+
+ if (priv->hw_version == MVPP21)
+ priv->max_port_rxqs = 8;
+ else
+ priv->max_port_rxqs = 32;
+
+ if (dev_of_node(&pdev->dev)) {
+ priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
+ if (IS_ERR(priv->pp_clk))
+ return PTR_ERR(priv->pp_clk);
+ err = clk_prepare_enable(priv->pp_clk);
+ if (err < 0)
+ return err;
+
+ priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
+ if (IS_ERR(priv->gop_clk)) {
+ err = PTR_ERR(priv->gop_clk);
+ goto err_pp_clk;
+ }
+ err = clk_prepare_enable(priv->gop_clk);
+ if (err < 0)
+ goto err_pp_clk;
+
+ if (priv->hw_version >= MVPP22) {
+ priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
+ if (IS_ERR(priv->mg_clk)) {
+ err = PTR_ERR(priv->mg_clk);
+ goto err_gop_clk;
+ }
+
+ err = clk_prepare_enable(priv->mg_clk);
+ if (err < 0)
+ goto err_gop_clk;
+
+ priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk");
+ if (IS_ERR(priv->mg_core_clk)) {
+ err = PTR_ERR(priv->mg_core_clk);
+ goto err_mg_clk;
+ }
+
+ err = clk_prepare_enable(priv->mg_core_clk);
+ if (err < 0)
+ goto err_mg_clk;
+ }
+
+ priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk");
+ if (IS_ERR(priv->axi_clk)) {
+ err = PTR_ERR(priv->axi_clk);
+ goto err_mg_core_clk;
+ }
+
+ err = clk_prepare_enable(priv->axi_clk);
+ if (err < 0)
+ goto err_mg_core_clk;
+
+ /* Get system's tclk rate */
+ priv->tclk = clk_get_rate(priv->pp_clk);
+ } else {
+ err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk);
+ if (err) {
+ dev_err(&pdev->dev, "missing clock-frequency value\n");
+ return err;
+ }
+ }
+
+ if (priv->hw_version >= MVPP22) {
+ err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
+ if (err)
+ goto err_axi_clk;
+ /* Sadly, the BM pools all share the same register to
+ * store the high 32 bits of their address. So they
+ * must all have the same high 32 bits, which forces
+ * us to restrict coherent memory to DMA_BIT_MASK(32).
+ */
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err_axi_clk;
+ }
+
+ /* Map DTS-active ports. Should be done before FIFO mvpp2_init */
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
+ priv->port_map |= BIT(i);
+ }
+
+ if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
+ priv->hw_version = MVPP23;
+
+ /* Init mss lock */
+ spin_lock_init(&priv->mss_spinlock);
+
+ /* Initialize network controller */
+ err = mvpp2_init(pdev, priv);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to initialize controller\n");
+ goto err_axi_clk;
+ }
+
+ err = mvpp22_tai_probe(&pdev->dev, priv);
+ if (err < 0)
+ goto err_axi_clk;
+
+ /* Initialize ports */
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ err = mvpp2_port_probe(pdev, port_fwnode, priv);
+ if (err < 0)
+ goto err_port_probe;
+ }
+
+ if (priv->port_count == 0) {
+ dev_err(&pdev->dev, "no ports enabled\n");
+ err = -ENODEV;
+ goto err_axi_clk;
+ }
+
+ /* Statistics must be gathered regularly because some of them (like
+ * packets counters) are 32-bit registers and could overflow quite
+ * quickly. For instance, a 10Gb link used at full bandwidth with the
+ * smallest packets (64B) will overflow a 32-bit counter in less than
+ * 30 seconds. Then, use a workqueue to fill 64-bit counters.
+ */
+ snprintf(priv->queue_name, sizeof(priv->queue_name),
+ "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
+ priv->port_count > 1 ? "+" : "");
+ priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
+ if (!priv->stats_queue) {
+ err = -ENOMEM;
+ goto err_port_probe;
+ }
+
+ if (priv->global_tx_fc && priv->hw_version >= MVPP22) {
+ err = mvpp2_enable_global_fc(priv);
+ if (err)
+ dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n");
+ }
+
+ mvpp2_dbgfs_init(priv, pdev->name);
+
+ platform_set_drvdata(pdev, priv);
+ return 0;
+
+err_port_probe:
+ fwnode_handle_put(port_fwnode);
+
+ i = 0;
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ if (priv->port_list[i])
+ mvpp2_port_remove(priv->port_list[i]);
+ i++;
+ }
+err_axi_clk:
+ clk_disable_unprepare(priv->axi_clk);
+err_mg_core_clk:
+ clk_disable_unprepare(priv->mg_core_clk);
+err_mg_clk:
+ clk_disable_unprepare(priv->mg_clk);
+err_gop_clk:
+ clk_disable_unprepare(priv->gop_clk);
+err_pp_clk:
+ clk_disable_unprepare(priv->pp_clk);
+ return err;
+}
+
+static int mvpp2_remove(struct platform_device *pdev)
+{
+ struct mvpp2 *priv = platform_get_drvdata(pdev);
+ struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
+ struct fwnode_handle *port_fwnode;
+
+ mvpp2_dbgfs_cleanup(priv);
+
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ if (priv->port_list[i]) {
+ mutex_destroy(&priv->port_list[i]->gather_stats_lock);
+ mvpp2_port_remove(priv->port_list[i]);
+ }
+ i++;
+ }
+
+ destroy_workqueue(priv->stats_queue);
+
+ if (priv->percpu_pools)
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+ for (i = 0; i < poolnum; i++) {
+ struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
+
+ mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
+ }
+
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
+ struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
+
+ dma_free_coherent(&pdev->dev,
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ aggr_txq->descs,
+ aggr_txq->descs_dma);
+ }
+
+ if (is_acpi_node(port_fwnode))
+ return 0;
+
+ clk_disable_unprepare(priv->axi_clk);
+ clk_disable_unprepare(priv->mg_core_clk);
+ clk_disable_unprepare(priv->mg_clk);
+ clk_disable_unprepare(priv->pp_clk);
+ clk_disable_unprepare(priv->gop_clk);
+
+ return 0;
+}
+
+static const struct of_device_id mvpp2_match[] = {
+ {
+ .compatible = "marvell,armada-375-pp2",
+ .data = (void *)MVPP21,
+ },
+ {
+ .compatible = "marvell,armada-7k-pp22",
+ .data = (void *)MVPP22,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mvpp2_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id mvpp2_acpi_match[] = {
+ { "MRVL0110", MVPP22 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
+#endif
+
+static struct platform_driver mvpp2_driver = {
+ .probe = mvpp2_probe,
+ .remove = mvpp2_remove,
+ .driver = {
+ .name = MVPP2_DRIVER_NAME,
+ .of_match_table = mvpp2_match,
+ .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
+ },
+};
+
+static int __init mvpp2_driver_init(void)
+{
+ return platform_driver_register(&mvpp2_driver);
+}
+module_init(mvpp2_driver_init);
+
+static void __exit mvpp2_driver_exit(void)
+{
+ platform_driver_unregister(&mvpp2_driver);
+ mvpp2_dbgfs_exit();
+}
+module_exit(mvpp2_driver_exit);
+
+MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
new file mode 100644
index 000000000..9af22f497
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -0,0 +1,2519 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Header Parser helpers for Marvell PPv2 Network Controller
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <uapi/linux/ppp_defs.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+
+/* Update parser tcam and sram hw entries */
+static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+{
+ int i;
+
+ if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ return -EINVAL;
+
+ /* Clear entry invalidation bit */
+ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+
+ /* Write sram index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
+
+ /* Write tcam index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
+
+ return 0;
+}
+
+/* Initialize tcam entry from hw */
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid)
+{
+ int i;
+
+ if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ return -EINVAL;
+
+ memset(pe, 0, sizeof(*pe));
+ pe->index = tid;
+
+ /* Write tcam index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+
+ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
+ MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
+ if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+ return MVPP2_PRS_TCAM_ENTRY_INVALID;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
+
+ /* Write sram index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
+
+ return 0;
+}
+
+/* Invalidate tcam hw entry */
+static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
+{
+ /* Write index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
+ MVPP2_PRS_TCAM_INV_MASK);
+}
+
+/* Enable shadow table entry and set its lookup ID */
+static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
+{
+ priv->prs_shadow[index].valid = true;
+ priv->prs_shadow[index].lu = lu;
+}
+
+/* Update ri fields in shadow table entry */
+static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
+ unsigned int ri, unsigned int ri_mask)
+{
+ priv->prs_shadow[index].ri_mask = ri_mask;
+ priv->prs_shadow[index].ri = ri;
+}
+
+/* Update lookup field in tcam sw entry */
+static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
+{
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
+}
+
+/* Update mask for single port in tcam sw entry */
+static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
+ unsigned int port, bool add)
+{
+ if (add)
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
+ else
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
+}
+
+/* Update port map in tcam sw entry */
+static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
+ unsigned int ports)
+{
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
+}
+
+/* Obtain port map from tcam sw entry */
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+{
+ return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
+}
+
+/* Set byte of data and its enable bits in tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char byte,
+ unsigned char enable)
+{
+ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
+}
+
+/* Get byte of data and its enable bits from tcam sw entry */
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable)
+{
+ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+ *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
+ *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
+}
+
+/* Compare tcam data bytes with a pattern */
+static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
+ u16 data)
+{
+ u16 tcam_data;
+
+ tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
+ return tcam_data == data;
+}
+
+/* Update ai bits in tcam sw entry */
+static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
+ unsigned int bits, unsigned int enable)
+{
+ int i;
+
+ for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
+ if (!(enable & BIT(i)))
+ continue;
+
+ if (bits & BIT(i))
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
+ else
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
+ }
+
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
+}
+
+/* Get ai bits from tcam sw entry */
+static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
+{
+ return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
+}
+
+/* Set ethertype in tcam sw entry */
+static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
+ unsigned short ethertype)
+{
+ mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
+ mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
+}
+
+/* Set vid in tcam sw entry */
+static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
+ unsigned short vid)
+{
+ mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
+ mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
+}
+
+/* Set bits in sram sw entry */
+static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
+ u32 val)
+{
+ pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
+}
+
+/* Clear bits in sram sw entry */
+static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
+ u32 val)
+{
+ pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
+}
+
+/* Update ri bits in sram sw entry */
+static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
+ unsigned int bits, unsigned int mask)
+{
+ unsigned int i;
+
+ for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
+ if (!(mask & BIT(i)))
+ continue;
+
+ if (bits & BIT(i))
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
+ 1);
+ else
+ mvpp2_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_RI_OFFS + i,
+ 1);
+
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
+ }
+}
+
+/* Obtain ri bits from sram sw entry */
+static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
+{
+ return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
+}
+
+/* Update ai bits in sram sw entry */
+static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
+ unsigned int bits, unsigned int mask)
+{
+ unsigned int i;
+
+ for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
+ if (!(mask & BIT(i)))
+ continue;
+
+ if (bits & BIT(i))
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
+ 1);
+ else
+ mvpp2_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_AI_OFFS + i,
+ 1);
+
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
+ }
+}
+
+/* Read ai bits from sram sw entry */
+static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
+{
+ u8 bits;
+ /* ai is stored on bits 90->97; so it spreads across two u32 */
+ int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
+ int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
+
+ bits = (pe->sram[ai_off] >> ai_shift) |
+ (pe->sram[ai_off + 1] << (32 - ai_shift));
+
+ return bits;
+}
+
+/* In sram sw entry set lookup ID field of the tcam key to be used in the next
+ * lookup interation
+ */
+static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
+ unsigned int lu)
+{
+ int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
+
+ mvpp2_prs_sram_bits_clear(pe, sram_next_off,
+ MVPP2_PRS_SRAM_NEXT_LU_MASK);
+ mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
+}
+
+/* In the sram sw entry set sign and value of the next lookup offset
+ * and the offset value generated to the classifier
+ */
+static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
+ unsigned int op)
+{
+ /* Set sign */
+ if (shift < 0) {
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+ shift = 0 - shift;
+ } else {
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+ }
+
+ /* Set value */
+ pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
+ shift & MVPP2_PRS_SRAM_SHIFT_MASK;
+
+ /* Reset and set operation */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
+
+ /* Set base offset as current */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* In the sram sw entry set sign and value of the user defined offset
+ * generated to the classifier
+ */
+static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
+ unsigned int type, int offset,
+ unsigned int op)
+{
+ /* Set sign */
+ if (offset < 0) {
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+ offset = 0 - offset;
+ } else {
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+ }
+
+ /* Set value */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+ MVPP2_PRS_SRAM_UDF_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+ offset & MVPP2_PRS_SRAM_UDF_MASK);
+
+ /* Set offset type */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
+ MVPP2_PRS_SRAM_UDF_TYPE_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
+
+ /* Set offset operation */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+ op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
+
+ /* Set base offset as current */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* Find parser flow entry */
+static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
+ for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
+ u8 bits;
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ bits = mvpp2_prs_sram_ai_get(&pe);
+
+ /* Sram store classification lookup ID in AI bits [5:0] */
+ if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
+ return tid;
+ }
+
+ return -ENOENT;
+}
+
+/* Return first free tcam index, seeking from start to end */
+static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
+ unsigned char end)
+{
+ int tid;
+
+ if (start > end)
+ swap(start, end);
+
+ for (tid = start; tid <= end; tid++) {
+ if (!priv->prs_shadow[tid].valid)
+ return tid;
+ }
+
+ return -EINVAL;
+}
+
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+ unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+ struct mvpp2_prs_entry pe;
+ unsigned int len;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* For all ports - drop flow control frames */
+ pe.index = MVPP2_PE_FC_DROP;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Enable/disable dropping all mac da's */
+static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
+{
+ struct mvpp2_prs_entry pe;
+
+ if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
+ /* Entry exist - update port only */
+ mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ pe.index = MVPP2_PE_DROP_ALL;
+
+ /* Non-promiscuous mode for all ports - DROP unknown packets */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ }
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set port to unicast or multicast promiscuous mode */
+void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ enum mvpp2_prs_l2_cast l2_cast, bool add)
+{
+ struct mvpp2_prs_entry pe;
+ unsigned char cast_match;
+ unsigned int ri;
+ int tid;
+
+ if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
+ cast_match = MVPP2_PRS_UCAST_VAL;
+ tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
+ ri = MVPP2_PRS_RI_L2_UCAST;
+ } else {
+ cast_match = MVPP2_PRS_MCAST_VAL;
+ tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
+ ri = MVPP2_PRS_RI_L2_MCAST;
+ }
+
+ /* promiscuous mode - Accept unknown unicast or multicast packets */
+ if (priv->prs_shadow[tid].valid) {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ } else {
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ pe.index = tid;
+
+ /* Continue - set next lookup */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+ /* Set result info bits */
+ mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
+
+ /* Match UC or MC addresses */
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
+ MVPP2_PRS_CAST_MASK);
+
+ /* Shift to ethertype */
+ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ }
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set entry for dsa packets */
+static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
+ bool tagged, bool extend)
+{
+ struct mvpp2_prs_entry pe;
+ int tid, shift;
+
+ if (extend) {
+ tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
+ shift = 8;
+ } else {
+ tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
+ shift = 4;
+ }
+
+ if (priv->prs_shadow[tid].valid) {
+ /* Entry exist - update port only */
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = tid;
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
+
+ if (tagged) {
+ /* Set tagged bit in DSA tag */
+ mvpp2_prs_tcam_data_byte_set(&pe, 0,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+
+ /* Set ai bits for next iteration */
+ if (extend)
+ mvpp2_prs_sram_ai_update(&pe, 1,
+ MVPP2_PRS_SRAM_AI_MASK);
+ else
+ mvpp2_prs_sram_ai_update(&pe, 0,
+ MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Set result info bits to 'single vlan' */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ /* If packet is tagged continue check vid filtering */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
+ } else {
+ /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
+ mvpp2_prs_sram_shift_set(&pe, shift,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set result info bits to 'no vlans' */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ }
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ }
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set entry for dsa ethertype */
+static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
+ bool add, bool tagged, bool extend)
+{
+ struct mvpp2_prs_entry pe;
+ int tid, shift, port_mask;
+
+ if (extend) {
+ tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
+ MVPP2_PE_ETYPE_EDSA_UNTAGGED;
+ port_mask = 0;
+ shift = 8;
+ } else {
+ tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
+ MVPP2_PE_ETYPE_DSA_UNTAGGED;
+ port_mask = MVPP2_PRS_PORT_MASK;
+ shift = 4;
+ }
+
+ if (priv->prs_shadow[tid].valid) {
+ /* Entry exist - update port only */
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = tid;
+
+ /* Set ethertype */
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
+ mvpp2_prs_match_etype(&pe, 2, 0);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
+ MVPP2_PRS_RI_DSA_MASK);
+ /* Shift ethertype + 2 byte reserved + tag*/
+ mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
+
+ if (tagged) {
+ /* Set tagged bit in DSA tag */
+ mvpp2_prs_tcam_data_byte_set(&pe,
+ MVPP2_ETH_TYPE_LEN + 2 + 3,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0,
+ MVPP2_PRS_SRAM_AI_MASK);
+ /* If packet is tagged continue check vlans */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ } else {
+ /* Set result info bits to 'no vlans' */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ }
+ /* Mask/unmask all ports, depending on dsa type */
+ mvpp2_prs_tcam_port_map_set(&pe, port_mask);
+ }
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Search for existing single/triple vlan entry */
+static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+ for (tid = MVPP2_PE_FIRST_FREE_TID;
+ tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+ unsigned int ri_bits, ai_bits;
+ bool match;
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
+ if (!match)
+ continue;
+
+ /* Get vlan type */
+ ri_bits = mvpp2_prs_sram_ri_get(&pe);
+ ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+
+ /* Get current ai value from tcam */
+ ai_bits = mvpp2_prs_tcam_ai_get(&pe);
+ /* Clear double vlan bit */
+ ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
+
+ if (ai != ai_bits)
+ continue;
+
+ if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+ ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+ return tid;
+ }
+
+ return -ENOENT;
+}
+
+/* Add/update single/triple vlan entry */
+static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
+ unsigned int port_map)
+{
+ struct mvpp2_prs_entry pe;
+ int tid_aux, tid;
+ int ret = 0;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_vlan_find(priv, tpid, ai);
+
+ if (tid < 0) {
+ /* Create new tcam entry */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ /* Get last double vlan tid */
+ for (tid_aux = MVPP2_PE_LAST_FREE_TID;
+ tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
+ unsigned int ri_bits;
+
+ if (!priv->prs_shadow[tid_aux].valid ||
+ priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ ri_bits = mvpp2_prs_sram_ri_get(&pe);
+ if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
+ MVPP2_PRS_RI_VLAN_DOUBLE)
+ break;
+ }
+
+ if (tid <= tid_aux)
+ return -EINVAL;
+
+ memset(&pe, 0, sizeof(pe));
+ pe.index = tid;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+
+ mvpp2_prs_match_etype(&pe, 0, tpid);
+
+ /* VLAN tag detected, proceed with VID filtering */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ } else {
+ ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ }
+ mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ }
+ /* Update ports' mask */
+ mvpp2_prs_tcam_port_map_set(&pe, port_map);
+
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return ret;
+}
+
+/* Get first free double vlan ai number */
+static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
+{
+ int i;
+
+ for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
+ if (!priv->prs_double_vlans[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+/* Search for existing double vlan entry */
+static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
+ unsigned short tpid2)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+ for (tid = MVPP2_PE_FIRST_FREE_TID;
+ tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+ unsigned int ri_mask;
+ bool match;
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
+ mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
+
+ if (!match)
+ continue;
+
+ ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
+ if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
+ return tid;
+ }
+
+ return -ENOENT;
+}
+
+/* Add or update double vlan entry */
+static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
+ unsigned short tpid2,
+ unsigned int port_map)
+{
+ int tid_aux, tid, ai, ret = 0;
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
+
+ if (tid < 0) {
+ /* Create new tcam entry */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ /* Set ai value for new double vlan entry */
+ ai = mvpp2_prs_double_vlan_ai_free_get(priv);
+ if (ai < 0)
+ return ai;
+
+ /* Get first single/triple vlan tid */
+ for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
+ tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
+ unsigned int ri_bits;
+
+ if (!priv->prs_shadow[tid_aux].valid ||
+ priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ ri_bits = mvpp2_prs_sram_ri_get(&pe);
+ ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+ if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+ ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+ break;
+ }
+
+ if (tid >= tid_aux)
+ return -ERANGE;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = tid;
+
+ priv->prs_double_vlans[ai] = true;
+
+ mvpp2_prs_match_etype(&pe, 0, tpid1);
+ mvpp2_prs_match_etype(&pe, 4, tpid2);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ /* Shift 4 bytes - skip outer vlan tag */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
+ MVPP2_PRS_SRAM_AI_MASK);
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ }
+
+ /* Update ports' mask */
+ mvpp2_prs_tcam_port_map_set(&pe, port_map);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return ret;
+}
+
+/* IPv4 header parsing for fragmentation and L4 offset */
+static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
+ unsigned int ri, unsigned int ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+ (proto != IPPROTO_IGMP))
+ return -EINVAL;
+
+ /* Not fragmented packet */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = tid;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
+ MVPP2_PRS_TCAM_PROTO_MASK_L);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
+ MVPP2_PRS_TCAM_PROTO_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Fragmented packet */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+ /* Clear ri before updating */
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+
+ mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
+ ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* IPv4 L3 multicast or broadcast */
+static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
+{
+ struct mvpp2_prs_entry pe;
+ int mask, tid;
+
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = tid;
+
+ switch (l3_cast) {
+ case MVPP2_PRS_L3_MULTI_CAST:
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
+ MVPP2_PRS_IPV4_MC_MASK);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ break;
+ case MVPP2_PRS_L3_BROAD_CAST:
+ mask = MVPP2_PRS_IPV4_BC_MASK;
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
+ mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Set entries for protocols over IPv6 */
+static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
+ unsigned int ri, unsigned int ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+ (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
+ return -EINVAL;
+
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct ipv6hdr) - 6,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Write HW */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* IPv6 L3 multicast entry */
+static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
+ return -EINVAL;
+
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Shift back to IPv6 NH */
+ mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
+ MVPP2_PRS_IPV6_MC_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Parser per-port initialization */
+static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
+ int lu_max, int offset)
+{
+ u32 val;
+
+ /* Set lookup ID */
+ val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
+ val &= ~MVPP2_PRS_PORT_LU_MASK(port);
+ val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
+ mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
+
+ /* Set maximum number of loops for packet received from port */
+ val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
+ val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
+ val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
+ mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
+
+ /* Set initial offset for packet header extraction for the first
+ * searching loop
+ */
+ val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
+ val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
+ val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
+ mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
+}
+
+/* Default flow entries initialization for all ports */
+static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int port;
+
+ for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Set flow ID*/
+ mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_hw_write(priv, &pe);
+ }
+}
+
+/* Set default entry for Marvell Header field */
+static void mvpp2_prs_mh_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+
+ pe.index = MVPP2_PE_MH_DEFAULT;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Set MH entry that skip parser */
+ pe.index = MVPP2_PE_MH_SKIP_PRS;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set default entires (place holder) for promiscuous, non-promiscuous and
+ * multicast MAC addresses
+ */
+static void mvpp2_prs_mac_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* Non-promiscuous mode for all ports - DROP unknown packets */
+ pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Create dummy entries for drop all and promiscuous modes */
+ mvpp2_prs_drop_fc(priv);
+ mvpp2_prs_mac_drop_all_set(priv, 0, false);
+ mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
+ mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
+}
+
+/* Set default entries for various types of dsa packets */
+static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+
+ /* None tagged EDSA entry - place holder */
+ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_EDSA);
+
+ /* Tagged EDSA entry - place holder */
+ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+ /* None tagged DSA entry - place holder */
+ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_DSA);
+
+ /* Tagged DSA entry - place holder */
+ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+ /* None tagged EDSA ethertype entry - place holder*/
+ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+
+ /* Tagged EDSA ethertype entry - place holder*/
+ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+ /* None tagged DSA ethertype entry */
+ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+ /* Tagged DSA ethertype entry */
+ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+ /* Set default entry, in case DSA or EDSA tag not found */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = MVPP2_PE_DSA_DEFAULT;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+
+ /* Shift 0 bytes */
+ mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+
+ /* Clear all sram ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Initialize parser entries for VID filtering */
+static void mvpp2_prs_vid_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* Set default vid entry */
+ pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
+
+ /* Skip VLAN header - Set offset to 4 bytes */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Set default vid entry for extended DSA*/
+ memset(&pe, 0, sizeof(pe));
+
+ /* Set default vid entry */
+ pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
+ MVPP2_PRS_EDSA_VID_AI_BIT);
+
+ /* Skip VLAN header - Set offset to 8 bytes */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Match basic ethertypes */
+static int mvpp2_prs_etype_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int tid, ihl;
+
+ /* Ethertype: PPPoE */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
+
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
+ MVPP2_PRS_RI_PPPOE_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
+ MVPP2_PRS_RI_PPPOE_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Ethertype: ARP */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
+
+ /* Generate flow in the next iteration*/
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = true;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Ethertype: LBTD */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
+
+ /* Generate flow in the next iteration*/
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = true;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Ethertype: IPv4 with header length >= 5 */
+ for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | ihl,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L4 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ MVPP2_ETH_TYPE_LEN + (ihl * 4),
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+ }
+
+ /* Ethertype: IPv6 without options */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
+
+ /* Skip DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = MVPP2_PE_ETH_TYPE_UN;
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Generate flow in the next iteration*/
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset even it's unknown L3 */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = true;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Configure vlan entries and detect up to 2 successive VLAN tags.
+ * Possible options:
+ * 0x8100, 0x88A8
+ * 0x8100, 0x8100
+ * 0x8100
+ * 0x88A8
+ */
+static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int err;
+
+ priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
+ MVPP2_PRS_DBL_VLANS_MAX,
+ GFP_KERNEL);
+ if (!priv->prs_double_vlans)
+ return -ENOMEM;
+
+ /* Double VLAN: 0x88A8, 0x8100 */
+ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Double VLAN: 0x8100, 0x8100 */
+ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Single VLAN: 0x88a8 */
+ err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Single VLAN: 0x8100 */
+ err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Set default double vlan entry */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = MVPP2_PE_VLAN_DBL;
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ /* Clear ai for next iterations */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
+ MVPP2_PRS_DBL_VLAN_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Set default vlan none entry */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = MVPP2_PE_VLAN_NONE;
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Set entries for PPPoE ethertype */
+static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int tid, ihl;
+
+ /* IPv4 over PPPoE with header length >= 5 */
+ for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | ihl,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ /* Set L4 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ MVPP2_ETH_TYPE_LEN + (ihl * 4),
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(priv, &pe);
+ }
+
+ /* IPv6 over PPPoE */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Jump to DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Non-IP over PPPoE */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Set L3 offset even if it's unknown L3 */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Initialize entries for IPv4 */
+static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int err;
+
+ /* Set entries for TCP, UDP and IGMP over IPv4 */
+ err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ if (err)
+ return err;
+
+ /* IPv4 Broadcast */
+ err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
+ if (err)
+ return err;
+
+ /* IPv4 Multicast */
+ err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
+ if (err)
+ return err;
+
+ /* Default IPv4 entry for unknown protocols */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = MVPP2_PE_IP4_PROTO_UN;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Default IPv4 entry for unicast address */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = MVPP2_PE_IP4_ADDR_UN;
+
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Initialize entries for IPv6 */
+static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int tid, err;
+
+ /* Set entries for TCP, UDP and ICMP over IPv6 */
+ err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ if (err)
+ return err;
+
+ /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
+ /* Result Info: UDF7=1, DS lite */
+ err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
+ MVPP2_PRS_RI_UDF7_IP6_LITE,
+ MVPP2_PRS_RI_UDF7_MASK);
+ if (err)
+ return err;
+
+ /* IPv6 multicast */
+ err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
+ if (err)
+ return err;
+
+ /* Entry for checking hop limit */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
+ MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Default IPv6 entry for unknown protocols */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_PROTO_UN;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ /* Set L4 offset relatively to our current place */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct ipv6hdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Default IPv6 entry for unknown ext protocols */
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_EXT_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Default IPv6 entry for unicast address */
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_ADDR_UN;
+
+ /* Finished: go to IPv6 again */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Shift back to IPV6 NH */
+ mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Find tcam entry with matched pair <vid,port> */
+static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
+{
+ unsigned char byte[2], enable[2];
+ struct mvpp2_prs_entry pe;
+ u16 rvid, rmask;
+ int tid;
+
+ /* Go through the all entries with MVPP2_PRS_LU_VID */
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+ if (!port->priv->prs_shadow[tid].valid ||
+ port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
+ continue;
+
+ mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+
+ mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+ mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+
+ rvid = ((byte[0] & 0xf) << 8) + byte[1];
+ rmask = ((enable[0] & 0xf) << 8) + enable[1];
+
+ if (rvid != vid || rmask != mask)
+ continue;
+
+ return tid;
+ }
+
+ return -ENOENT;
+}
+
+/* Write parser entry for VID filtering */
+int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
+{
+ unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
+ port->id * MVPP2_PRS_VLAN_FILT_MAX;
+ unsigned int mask = 0xfff, reg_val, shift;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* Scan TCAM and see if entry with this <vid,port> already exist */
+ tid = mvpp2_prs_vid_range_find(port, vid, mask);
+
+ reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
+ if (reg_val & MVPP2_DSA_EXTENDED)
+ shift = MVPP2_VLAN_TAG_EDSA_LEN;
+ else
+ shift = MVPP2_VLAN_TAG_LEN;
+
+ /* No such entry */
+ if (tid < 0) {
+
+ /* Go through all entries from first to last in vlan range */
+ tid = mvpp2_prs_tcam_first_free(priv, vid_start,
+ vid_start +
+ MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
+
+ /* There isn't room for a new VID filter */
+ if (tid < 0)
+ return tid;
+
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+ pe.index = tid;
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ }
+
+ /* Enable the current port */
+ mvpp2_prs_tcam_port_set(&pe, port->id, true);
+
+ /* Continue - set next lookup */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Skip VLAN header - Set offset to 4 or 8 bytes */
+ mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set match on VID */
+ mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Write parser entry for VID filtering */
+void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
+{
+ struct mvpp2 *priv = port->priv;
+ int tid;
+
+ /* Scan TCAM and see if entry with this <vid,port> already exist */
+ tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
+
+ /* No such entry */
+ if (tid < 0)
+ return;
+
+ mvpp2_prs_hw_inv(priv, tid);
+ priv->prs_shadow[tid].valid = false;
+}
+
+/* Remove all existing VID filters on this port */
+void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ int tid;
+
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+ if (priv->prs_shadow[tid].valid) {
+ mvpp2_prs_hw_inv(priv, tid);
+ priv->prs_shadow[tid].valid = false;
+ }
+ }
+}
+
+/* Remove VID filering entry for this port */
+void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
+{
+ unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
+ struct mvpp2 *priv = port->priv;
+
+ /* Invalidate the guard entry */
+ mvpp2_prs_hw_inv(priv, tid);
+
+ priv->prs_shadow[tid].valid = false;
+}
+
+/* Add guard entry that drops packets when no VID is matched on this port */
+void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
+{
+ unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
+ struct mvpp2 *priv = port->priv;
+ unsigned int reg_val, shift;
+ struct mvpp2_prs_entry pe;
+
+ if (priv->prs_shadow[tid].valid)
+ return;
+
+ memset(&pe, 0, sizeof(pe));
+
+ pe.index = tid;
+
+ reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
+ if (reg_val & MVPP2_DSA_EXTENDED)
+ shift = MVPP2_VLAN_TAG_EDSA_LEN;
+ else
+ shift = MVPP2_VLAN_TAG_LEN;
+
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port->id, true);
+
+ /* Continue - set next lookup */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Skip VLAN header - Set offset to 4 or 8 bytes */
+ mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Drop VLAN packets that don't belong to any VIDs on this port */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Parser default initialization */
+int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+ int err, index, i;
+
+ /* Enable tcam table */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
+
+ /* Clear all tcam and sram entries */
+ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
+
+ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
+ }
+
+ /* Invalidate all tcam entries */
+ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
+ mvpp2_prs_hw_inv(priv, index);
+
+ priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
+ sizeof(*priv->prs_shadow),
+ GFP_KERNEL);
+ if (!priv->prs_shadow)
+ return -ENOMEM;
+
+ /* Always start from lookup = 0 */
+ for (index = 0; index < MVPP2_MAX_PORTS; index++)
+ mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
+ MVPP2_PRS_PORT_LU_MAX, 0);
+
+ mvpp2_prs_def_flow_init(priv);
+
+ mvpp2_prs_mh_init(priv);
+
+ mvpp2_prs_mac_init(priv);
+
+ mvpp2_prs_dsa_init(priv);
+
+ mvpp2_prs_vid_init(priv);
+
+ err = mvpp2_prs_etype_init(priv);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_vlan_init(pdev, priv);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_pppoe_init(priv);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip6_init(priv);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip4_init(priv);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Compare MAC DA with tcam entry data */
+static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
+ const u8 *da, unsigned char *mask)
+{
+ unsigned char tcam_byte, tcam_mask;
+ int index;
+
+ for (index = 0; index < ETH_ALEN; index++) {
+ mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
+ if (tcam_mask != mask[index])
+ return false;
+
+ if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
+ return false;
+ }
+
+ return true;
+}
+
+/* Find tcam entry with matched pair <MAC DA, port> */
+static int
+mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
+ unsigned char *mask, int udf_type)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Go through the all entires with MVPP2_PRS_LU_MAC */
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned int entry_pmap;
+
+ if (!priv->prs_shadow[tid].valid ||
+ (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+ (priv->prs_shadow[tid].udf != udf_type))
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
+ entry_pmap == pmap)
+ return tid;
+ }
+
+ return -ENOENT;
+}
+
+/* Update parser's mac da entry */
+int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
+{
+ unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ struct mvpp2 *priv = port->priv;
+ unsigned int pmap, len, ri;
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
+ tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
+ MVPP2_PRS_UDF_MAC_DEF);
+
+ /* No such entry */
+ if (tid < 0) {
+ if (!add)
+ return 0;
+
+ /* Create new TCAM entry */
+ /* Go through the all entries from first to last */
+ tid = mvpp2_prs_tcam_first_free(priv,
+ MVPP2_PE_MAC_RANGE_START,
+ MVPP2_PE_MAC_RANGE_END);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ }
+
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port->id, add);
+
+ /* Invalidate the entry if no ports are left enabled */
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ if (pmap == 0) {
+ if (add)
+ return -EINVAL;
+
+ mvpp2_prs_hw_inv(priv, pe.index);
+ priv->prs_shadow[pe.index].valid = false;
+ return 0;
+ }
+
+ /* Continue - set next lookup */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+ /* Set result info bits */
+ if (is_broadcast_ether_addr(da)) {
+ ri = MVPP2_PRS_RI_L2_BCAST;
+ } else if (is_multicast_ether_addr(da)) {
+ ri = MVPP2_PRS_RI_L2_MCAST;
+ } else {
+ ri = MVPP2_PRS_RI_L2_UCAST;
+
+ if (ether_addr_equal(da, port->dev->dev_addr))
+ ri |= MVPP2_PRS_RI_MAC_ME_MASK;
+ }
+
+ mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+ MVPP2_PRS_RI_MAC_ME_MASK);
+ mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+ MVPP2_PRS_RI_MAC_ME_MASK);
+
+ /* Shift to ethertype */
+ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Update shadow table and hw entry */
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int err;
+
+ /* Remove old parser entry */
+ err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
+ if (err)
+ return err;
+
+ /* Add new parser entry */
+ err = mvpp2_prs_mac_da_accept(port, da, true);
+ if (err)
+ return err;
+
+ /* Set addr in the device */
+ eth_hw_addr_set(dev, da);
+
+ return 0;
+}
+
+void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int index, tid;
+
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+ if (!priv->prs_shadow[tid].valid ||
+ (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+ (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ /* We only want entries active on this port */
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ /* Read mac addr from entry */
+ for (index = 0; index < ETH_ALEN; index++)
+ mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+ &da_mask[index]);
+
+ /* Special cases : Don't remove broadcast and port's own
+ * address
+ */
+ if (is_broadcast_ether_addr(da) ||
+ ether_addr_equal(da, port->dev->dev_addr))
+ continue;
+
+ /* Remove entry from TCAM */
+ mvpp2_prs_mac_da_accept(port, da, false);
+ }
+}
+
+int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
+{
+ switch (type) {
+ case MVPP2_TAG_TYPE_EDSA:
+ /* Add port to EDSA entries */
+ mvpp2_prs_dsa_tag_set(priv, port, true,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(priv, port, true,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ /* Remove port from DSA entries */
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ break;
+
+ case MVPP2_TAG_TYPE_DSA:
+ /* Add port to DSA entries */
+ mvpp2_prs_dsa_tag_set(priv, port, true,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(priv, port, true,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ /* Remove port from EDSA entries */
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ break;
+
+ case MVPP2_TAG_TYPE_MH:
+ case MVPP2_TAG_TYPE_NONE:
+ /* Remove port form EDSA and DSA entries */
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ break;
+
+ default:
+ if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ u8 *ri_byte, *ri_byte_mask;
+ int tid, i;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_tcam_first_free(priv,
+ MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ ri_byte = (u8 *)&ri;
+ ri_byte_mask = (u8 *)&ri_mask;
+
+ mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ for (i = 0; i < 4; i++) {
+ mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
+ ri_byte_mask[i]);
+ }
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Set prs flow for the port */
+int mvpp2_prs_def_flow(struct mvpp2_port *port)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_flow_find(port->priv, port->id);
+
+ /* Such entry not exist */
+ if (tid < 0) {
+ /* Go through the all entires from last to first */
+ tid = mvpp2_prs_tcam_first_free(port->priv,
+ MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ /* Set flow ID*/
+ mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ } else {
+ mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ }
+
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
+ mvpp2_prs_hw_write(port->priv, &pe);
+
+ return 0;
+}
+
+int mvpp2_prs_hits(struct mvpp2 *priv, int index)
+{
+ u32 val;
+
+ if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
+ return -EINVAL;
+
+ mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
+
+ val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
+
+ val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+
+ return val;
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
new file mode 100644
index 000000000..5ce5907be
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header Parser definitions for Marvell PPv2 Network Controller
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ */
+#ifndef _MVPP2_PRS_H_
+#define _MVPP2_PRS_H_
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "mvpp2.h"
+
+/* Parser constants */
+#define MVPP2_PRS_TCAM_SRAM_SIZE 256
+#define MVPP2_PRS_TCAM_WORDS 6
+#define MVPP2_PRS_SRAM_WORDS 4
+#define MVPP2_PRS_FLOW_ID_SIZE 64
+#define MVPP2_PRS_FLOW_ID_MASK 0x3f
+#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
+#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
+#define MVPP2_PRS_IPV4_HEAD 0x40
+#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
+#define MVPP2_PRS_IPV4_MC 0xe0
+#define MVPP2_PRS_IPV4_MC_MASK 0xf0
+#define MVPP2_PRS_IPV4_BC_MASK 0xff
+#define MVPP2_PRS_IPV4_IHL_MIN 0x5
+#define MVPP2_PRS_IPV4_IHL_MAX 0xf
+#define MVPP2_PRS_IPV4_IHL_MASK 0xf
+#define MVPP2_PRS_IPV6_MC 0xff
+#define MVPP2_PRS_IPV6_MC_MASK 0xff
+#define MVPP2_PRS_IPV6_HOP_MASK 0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
+#define MVPP2_PRS_DBL_VLANS_MAX 100
+#define MVPP2_PRS_CAST_MASK BIT(0)
+#define MVPP2_PRS_MCAST_VAL BIT(0)
+#define MVPP2_PRS_UCAST_VAL 0x0
+
+/* Tcam structure:
+ * - lookup ID - 4 bits
+ * - port ID - 1 byte
+ * - additional information - 1 byte
+ * - header data - 8 bytes
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
+ */
+#define MVPP2_PRS_AI_BITS 8
+#define MVPP2_PRS_AI_MASK 0xff
+#define MVPP2_PRS_PORT_MASK 0xff
+#define MVPP2_PRS_LU_MASK 0xf
+
+/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */
+#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2)
+#define MVPP2_PRS_BYTE_IN_WORD(byte) ((byte) % 2)
+
+#define MVPP2_PRS_TCAM_EN(data) ((data) << 16)
+#define MVPP2_PRS_TCAM_AI_WORD 4
+#define MVPP2_PRS_TCAM_AI(ai) (ai)
+#define MVPP2_PRS_TCAM_AI_EN(ai) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai))
+#define MVPP2_PRS_TCAM_PORT_WORD 4
+#define MVPP2_PRS_TCAM_PORT(p) ((p) << 8)
+#define MVPP2_PRS_TCAM_PORT_EN(p) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p))
+#define MVPP2_PRS_TCAM_LU_WORD 5
+#define MVPP2_PRS_TCAM_LU(lu) (lu)
+#define MVPP2_PRS_TCAM_LU_EN(lu) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu))
+#define MVPP2_PRS_TCAM_INV_WORD 5
+
+#define MVPP2_PRS_VID_TCAM_BYTE 2
+
+/* TCAM range for unicast and multicast filtering. We have 25 entries per port,
+ * with 4 dedicated to UC filtering and the rest to multicast filtering.
+ * Additionnally we reserve one entry for the broadcast address, and one for
+ * each port's own address.
+ */
+#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25
+#define MVPP2_PRS_MAC_RANGE_SIZE 80
+
+/* Number of entries per port dedicated to UC and MC filtering */
+#define MVPP2_PRS_MAC_UC_FILT_MAX 4
+#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \
+ MVPP2_PRS_MAC_UC_FILT_MAX)
+
+/* There is a TCAM range reserved for VLAN filtering entries, range size is 33
+ * 10 VLAN ID filter entries per port
+ * 1 default VLAN filter entry per port
+ * It is assumed that there are 3 ports for filter, not including loopback port
+ */
+#define MVPP2_PRS_VLAN_FILT_MAX 11
+#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33
+
+#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2)
+#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1)
+
+/* Tcam entries ID */
+#define MVPP2_PE_DROP_ALL 0
+#define MVPP2_PE_FIRST_FREE_TID 1
+
+/* MAC filtering range */
+#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1)
+#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
+ MVPP2_PRS_MAC_RANGE_SIZE + 1)
+/* VLAN filtering range */
+#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 32)
+#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
+ MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
+#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
+#define MVPP2_PE_MH_SKIP_PRS (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
+#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
+#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
+#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
+#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22)
+#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21)
+#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20)
+#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
+#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
+#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
+#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
+#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
+#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
+#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
+#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
+#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
+#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
+#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
+#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
+#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
+#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
+#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
+#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
+#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
+#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
+#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+
+#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \
+ ((port) * MVPP2_PRS_VLAN_FILT_MAX))
+#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
+ + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
+/* Index of default vid filter for given port */
+#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
+ + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
+
+/* Sram structure
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
+ */
+#define MVPP2_PRS_SRAM_RI_OFFS 0
+#define MVPP2_PRS_SRAM_RI_WORD 0
+#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
+#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
+#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
+#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
+#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
+#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff
+#define MVPP2_PRS_SRAM_UDF_OFFS 73
+#define MVPP2_PRS_SRAM_UDF_BITS 8
+#define MVPP2_PRS_SRAM_UDF_MASK 0xff
+#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
+#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
+#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
+#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
+#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
+#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
+#define MVPP2_PRS_SRAM_AI_OFFS 90
+#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
+#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
+#define MVPP2_PRS_SRAM_AI_MASK 0xff
+#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
+#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
+#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
+#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
+
+/* Sram result info bits assignment */
+#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
+#define MVPP2_PRS_RI_DSA_MASK 0x2
+#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_NONE 0x0
+#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
+#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
+#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
+#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
+#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_UCAST 0x0
+#define MVPP2_PRS_RI_L2_MCAST BIT(9)
+#define MVPP2_PRS_RI_L2_BCAST BIT(10)
+#define MVPP2_PRS_RI_PPPOE_MASK 0x800
+#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_UN 0x0
+#define MVPP2_PRS_RI_L3_IP4 BIT(12)
+#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
+#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
+#define MVPP2_PRS_RI_L3_IP6 BIT(14)
+#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
+#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_UCAST 0x0
+#define MVPP2_PRS_RI_L3_MCAST BIT(15)
+#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
+#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
+#define MVPP2_PRS_RI_UDF3_MASK 0x300000
+#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
+#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
+#define MVPP2_PRS_RI_L4_TCP BIT(22)
+#define MVPP2_PRS_RI_L4_UDP BIT(23)
+#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
+#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
+#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
+#define MVPP2_PRS_RI_DROP_MASK 0x80000000
+
+#define MVPP2_PRS_IP_MASK (MVPP2_PRS_RI_L3_PROTO_MASK | \
+ MVPP2_PRS_RI_IP_FRAG_MASK | \
+ MVPP2_PRS_RI_L4_PROTO_MASK)
+
+/* Sram additional info bits assignment */
+#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
+#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
+#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
+#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
+#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
+#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
+#define MVPP2_PRS_SINGLE_VLAN_AI 0
+#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
+#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0)
+
+/* DSA/EDSA type */
+#define MVPP2_PRS_TAGGED true
+#define MVPP2_PRS_UNTAGGED false
+#define MVPP2_PRS_EDSA true
+#define MVPP2_PRS_DSA false
+
+/* MAC entries, shadow udf */
+enum mvpp2_prs_udf {
+ MVPP2_PRS_UDF_MAC_DEF,
+ MVPP2_PRS_UDF_MAC_RANGE,
+ MVPP2_PRS_UDF_L2_DEF,
+ MVPP2_PRS_UDF_L2_DEF_COPY,
+ MVPP2_PRS_UDF_L2_USER,
+};
+
+/* Lookup ID */
+enum mvpp2_prs_lookup {
+ MVPP2_PRS_LU_MH,
+ MVPP2_PRS_LU_MAC,
+ MVPP2_PRS_LU_DSA,
+ MVPP2_PRS_LU_VLAN,
+ MVPP2_PRS_LU_VID,
+ MVPP2_PRS_LU_L2,
+ MVPP2_PRS_LU_PPPOE,
+ MVPP2_PRS_LU_IP4,
+ MVPP2_PRS_LU_IP6,
+ MVPP2_PRS_LU_FLOWS,
+ MVPP2_PRS_LU_LAST,
+};
+
+struct mvpp2_prs_entry {
+ u32 index;
+ u32 tcam[MVPP2_PRS_TCAM_WORDS];
+ u32 sram[MVPP2_PRS_SRAM_WORDS];
+};
+
+struct mvpp2_prs_result_info {
+ u32 ri;
+ u32 ri_mask;
+};
+
+struct mvpp2_prs_shadow {
+ bool valid;
+ bool finish;
+
+ /* Lookup ID */
+ int lu;
+
+ /* User defined offset */
+ int udf;
+
+ /* Result info */
+ u32 ri;
+ u32 ri_mask;
+};
+
+int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv);
+
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid);
+
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe);
+
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable);
+
+int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add);
+
+int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type);
+
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask);
+
+int mvpp2_prs_def_flow(struct mvpp2_port *port);
+
+void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port);
+
+void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port);
+
+int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid);
+
+void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid);
+
+void mvpp2_prs_vid_remove_all(struct mvpp2_port *port);
+
+void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ enum mvpp2_prs_l2_cast l2_cast, bool add);
+
+void mvpp2_prs_mac_del_all(struct mvpp2_port *port);
+
+int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da);
+
+int mvpp2_prs_hits(struct mvpp2 *priv, int index);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c
new file mode 100644
index 000000000..95862aff4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell PP2.2 TAI support
+ *
+ * Note:
+ * Do NOT use the event capture support.
+ * Do Not even set the MPP muxes to allow PTP_EVENT_REQ to be used.
+ * It will disrupt the operation of this driver, and there is nothing
+ * that this driver can do to prevent that. Even using PTP_EVENT_REQ
+ * as an output will be seen as a trigger input, which can't be masked.
+ * When ever a trigger input is seen, the action in the TCFCR0_TCF
+ * field will be performed - whether it is a set, increment, decrement
+ * read, or frequency update.
+ *
+ * Other notes (useful, not specified in the documentation):
+ * - PTP_PULSE_OUT (PTP_EVENT_REQ MPP)
+ * It looks like the hardware can't generate a pulse at nsec=0. (The
+ * output doesn't trigger if the nsec field is zero.)
+ * Note: when configured as an output via the register at 0xfX441120,
+ * the input is still very much alive, and will trigger the current TCF
+ * function.
+ * - PTP_CLK_OUT (PTP_TRIG_GEN MPP)
+ * This generates a "PPS" signal determined by the CCC registers. It
+ * seems this is not aligned to the TOD counter in any way (it may be
+ * initially, but if you specify a non-round second interval, it won't,
+ * and you can't easily get it back.)
+ * - PTP_PCLK_OUT
+ * This generates a 50% duty cycle clock based on the TOD counter, and
+ * seems it can be set to any period of 1ns resolution. It is probably
+ * limited by the TOD step size. Its period is defined by the PCLK_CCC
+ * registers. Again, its alignment to the second is questionable.
+ *
+ * Consequently, we support none of these.
+ */
+#include <linux/io.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/slab.h>
+
+#include "mvpp2.h"
+
+#define CR0_SW_NRESET BIT(0)
+
+#define TCFCR0_PHASE_UPDATE_ENABLE BIT(8)
+#define TCFCR0_TCF_MASK (7 << 2)
+#define TCFCR0_TCF_UPDATE (0 << 2)
+#define TCFCR0_TCF_FREQUPDATE (1 << 2)
+#define TCFCR0_TCF_INCREMENT (2 << 2)
+#define TCFCR0_TCF_DECREMENT (3 << 2)
+#define TCFCR0_TCF_CAPTURE (4 << 2)
+#define TCFCR0_TCF_NOP (7 << 2)
+#define TCFCR0_TCF_TRIGGER BIT(0)
+
+#define TCSR_CAPTURE_1_VALID BIT(1)
+#define TCSR_CAPTURE_0_VALID BIT(0)
+
+struct mvpp2_tai {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ void __iomem *base;
+ spinlock_t lock;
+ u64 period; // nanosecond period in 32.32 fixed point
+ /* This timestamp is updated every two seconds */
+ struct timespec64 stamp;
+};
+
+static void mvpp2_tai_modify(void __iomem *reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = readl_relaxed(reg) & ~mask;
+ val |= set & mask;
+ writel(val, reg);
+}
+
+static void mvpp2_tai_write(u32 val, void __iomem *reg)
+{
+ writel_relaxed(val & 0xffff, reg);
+}
+
+static u32 mvpp2_tai_read(void __iomem *reg)
+{
+ return readl_relaxed(reg) & 0xffff;
+}
+
+static struct mvpp2_tai *ptp_to_tai(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct mvpp2_tai, caps);
+}
+
+static void mvpp22_tai_read_ts(struct timespec64 *ts, void __iomem *base)
+{
+ ts->tv_sec = (u64)mvpp2_tai_read(base + 0) << 32 |
+ mvpp2_tai_read(base + 4) << 16 |
+ mvpp2_tai_read(base + 8);
+
+ ts->tv_nsec = mvpp2_tai_read(base + 12) << 16 |
+ mvpp2_tai_read(base + 16);
+
+ /* Read and discard fractional part */
+ readl_relaxed(base + 20);
+ readl_relaxed(base + 24);
+}
+
+static void mvpp2_tai_write_tlv(const struct timespec64 *ts, u32 frac,
+ void __iomem *base)
+{
+ mvpp2_tai_write(ts->tv_sec >> 32, base + MVPP22_TAI_TLV_SEC_HIGH);
+ mvpp2_tai_write(ts->tv_sec >> 16, base + MVPP22_TAI_TLV_SEC_MED);
+ mvpp2_tai_write(ts->tv_sec, base + MVPP22_TAI_TLV_SEC_LOW);
+ mvpp2_tai_write(ts->tv_nsec >> 16, base + MVPP22_TAI_TLV_NANO_HIGH);
+ mvpp2_tai_write(ts->tv_nsec, base + MVPP22_TAI_TLV_NANO_LOW);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW);
+}
+
+static void mvpp2_tai_op(u32 op, void __iomem *base)
+{
+ /* Trigger the operation. Note that an external unmaskable
+ * event on PTP_EVENT_REQ will also trigger this action.
+ */
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ op | TCFCR0_TCF_TRIGGER);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+}
+
+/* The adjustment has a range of +0.5ns to -0.5ns in 2^32 steps, so has units
+ * of 2^-32 ns.
+ *
+ * units(s) = 1 / (2^32 * 10^9)
+ * fractional = abs_scaled_ppm / (2^16 * 10^6)
+ *
+ * What we want to achieve:
+ * freq_adjusted = freq_nominal * (1 + fractional)
+ * freq_delta = freq_adjusted - freq_nominal => positive = faster
+ * freq_delta = freq_nominal * (1 + fractional) - freq_nominal
+ * So: freq_delta = freq_nominal * fractional
+ *
+ * However, we are dealing with periods, so:
+ * period_adjusted = period_nominal / (1 + fractional)
+ * period_delta = period_nominal - period_adjusted => positive = faster
+ * period_delta = period_nominal * fractional / (1 + fractional)
+ *
+ * Hence:
+ * period_delta = period_nominal * abs_scaled_ppm /
+ * (2^16 * 10^6 + abs_scaled_ppm)
+ *
+ * To avoid overflow, we reduce both sides of the divide operation by a factor
+ * of 16.
+ */
+static u64 mvpp22_calc_frac_ppm(struct mvpp2_tai *tai, long abs_scaled_ppm)
+{
+ u64 val = tai->period * abs_scaled_ppm >> 4;
+
+ return div_u64(val, (1000000 << 12) + (abs_scaled_ppm >> 4));
+}
+
+static s32 mvpp22_calc_max_adj(struct mvpp2_tai *tai)
+{
+ return 1000000;
+}
+
+static int mvpp22_tai_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+ bool neg_adj;
+ s32 frac;
+ u64 val;
+
+ neg_adj = scaled_ppm < 0;
+ if (neg_adj)
+ scaled_ppm = -scaled_ppm;
+
+ val = mvpp22_calc_frac_ppm(tai, scaled_ppm);
+
+ /* Convert to a signed 32-bit adjustment */
+ if (neg_adj) {
+ /* -S32_MIN warns, -val < S32_MIN fails, so go for the easy
+ * solution.
+ */
+ if (val > 0x80000000)
+ return -ERANGE;
+
+ frac = -val;
+ } else {
+ if (val > S32_MAX)
+ return -ERANGE;
+
+ frac = val;
+ }
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW);
+ mvpp2_tai_op(TCFCR0_TCF_FREQUPDATE, base);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static int mvpp22_tai_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ struct timespec64 ts;
+ unsigned long flags;
+ void __iomem *base;
+ u32 tcf;
+
+ /* We can't deal with S64_MIN */
+ if (delta == S64_MIN)
+ return -ERANGE;
+
+ if (delta < 0) {
+ delta = -delta;
+ tcf = TCFCR0_TCF_DECREMENT;
+ } else {
+ tcf = TCFCR0_TCF_INCREMENT;
+ }
+
+ ts = ns_to_timespec64(delta);
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write_tlv(&ts, 0, base);
+ mvpp2_tai_op(tcf, base);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static int mvpp22_tai_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+ u32 tcsr;
+ int ret;
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ /* XXX: the only way to read the PTP time is for the CPU to trigger
+ * an event. However, there is no way to distinguish between the CPU
+ * triggered event, and an external event on PTP_EVENT_REQ. So this
+ * is incompatible with external use of PTP_EVENT_REQ.
+ */
+ ptp_read_system_prets(sts);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ TCFCR0_TCF_CAPTURE | TCFCR0_TCF_TRIGGER);
+ ptp_read_system_postts(sts);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+
+ tcsr = readl(base + MVPP22_TAI_TCSR);
+ if (tcsr & TCSR_CAPTURE_1_VALID) {
+ mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV1_SEC_HIGH);
+ ret = 0;
+ } else if (tcsr & TCSR_CAPTURE_0_VALID) {
+ mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV0_SEC_HIGH);
+ ret = 0;
+ } else {
+ /* We don't seem to have a reading... */
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return ret;
+}
+
+static int mvpp22_tai_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write_tlv(ts, 0, base);
+
+ /* Trigger an update to load the value from the TLV registers
+ * into the TOD counter. Note that an external unmaskable event on
+ * PTP_EVENT_REQ will also trigger this action.
+ */
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_PHASE_UPDATE_ENABLE |
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ TCFCR0_TCF_UPDATE | TCFCR0_TCF_TRIGGER);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static long mvpp22_tai_aux_work(struct ptp_clock_info *ptp)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+
+ mvpp22_tai_gettimex64(ptp, &tai->stamp, NULL);
+
+ return msecs_to_jiffies(2000);
+}
+
+static void mvpp22_tai_set_step(struct mvpp2_tai *tai)
+{
+ void __iomem *base = tai->base;
+ u32 nano, frac;
+
+ nano = upper_32_bits(tai->period);
+ frac = lower_32_bits(tai->period);
+
+ /* As the fractional nanosecond is a signed offset, if the MSB (sign)
+ * bit is set, we have to increment the whole nanoseconds.
+ */
+ if (frac >= 0x80000000)
+ nano += 1;
+
+ mvpp2_tai_write(nano, base + MVPP22_TAI_TOD_STEP_NANO_CR);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TOD_STEP_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TOD_STEP_FRAC_LOW);
+}
+
+static void mvpp22_tai_init(struct mvpp2_tai *tai)
+{
+ void __iomem *base = tai->base;
+
+ mvpp22_tai_set_step(tai);
+
+ /* Release the TAI reset */
+ mvpp2_tai_modify(base + MVPP22_TAI_CR0, CR0_SW_NRESET, CR0_SW_NRESET);
+}
+
+int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai)
+{
+ return ptp_clock_index(tai->ptp_clock);
+}
+
+void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp)
+{
+ struct timespec64 ts;
+ int delta;
+
+ /* The tstamp consists of 2 bits of seconds and 30 bits of nanoseconds.
+ * We use our stored timestamp (tai->stamp) to form a full timestamp,
+ * and we must read the seconds exactly once.
+ */
+ ts.tv_sec = READ_ONCE(tai->stamp.tv_sec);
+ ts.tv_nsec = tstamp & 0x3fffffff;
+
+ /* Calculate the delta in seconds between our stored timestamp and
+ * the value read from the queue. Allow timestamps one second in the
+ * past, otherwise consider them to be in the future.
+ */
+ delta = ((tstamp >> 30) - (ts.tv_sec & 3)) & 3;
+ if (delta == 3)
+ delta -= 4;
+ ts.tv_sec += delta;
+
+ memset(hwtstamp, 0, sizeof(*hwtstamp));
+ hwtstamp->hwtstamp = timespec64_to_ktime(ts);
+}
+
+void mvpp22_tai_start(struct mvpp2_tai *tai)
+{
+ long delay;
+
+ delay = mvpp22_tai_aux_work(&tai->caps);
+
+ ptp_schedule_worker(tai->ptp_clock, delay);
+}
+
+void mvpp22_tai_stop(struct mvpp2_tai *tai)
+{
+ ptp_cancel_worker_sync(tai->ptp_clock);
+}
+
+static void mvpp22_tai_remove(void *priv)
+{
+ struct mvpp2_tai *tai = priv;
+
+ if (!IS_ERR(tai->ptp_clock))
+ ptp_clock_unregister(tai->ptp_clock);
+}
+
+int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv)
+{
+ struct mvpp2_tai *tai;
+ int ret;
+
+ tai = devm_kzalloc(dev, sizeof(*tai), GFP_KERNEL);
+ if (!tai)
+ return -ENOMEM;
+
+ spin_lock_init(&tai->lock);
+
+ tai->base = priv->iface_base;
+
+ /* The step size consists of three registers - a 16-bit nanosecond step
+ * size, and a 32-bit fractional nanosecond step size split over two
+ * registers. The fractional nanosecond step size has units of 2^-32ns.
+ *
+ * To calculate this, we calculate:
+ * (10^9 + freq / 2) / (freq * 2^-32)
+ * which gives us the nanosecond step to the nearest integer in 16.32
+ * fixed point format, and the fractional part of the step size with
+ * the MSB inverted. With rounding of the fractional nanosecond, and
+ * simplification, this becomes:
+ * (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq
+ *
+ * So:
+ * div = (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq
+ * nano = upper_32_bits(div);
+ * frac = lower_32_bits(div) ^ 0x80000000;
+ * Will give the values for the registers.
+ *
+ * This is all seems perfect, but alas it is not when considering the
+ * whole story. The system is clocked from 25MHz, which is multiplied
+ * by a PLL to 1GHz, and then divided by three, giving 333333333Hz
+ * (recurring). This gives exactly 3ns, but using 333333333Hz with
+ * the above gives an error of 13*2^-32ns.
+ *
+ * Consequently, we use the period rather than calculating from the
+ * frequency.
+ */
+ tai->period = 3ULL << 32;
+
+ mvpp22_tai_init(tai);
+
+ tai->caps.owner = THIS_MODULE;
+ strscpy(tai->caps.name, "Marvell PP2.2", sizeof(tai->caps.name));
+ tai->caps.max_adj = mvpp22_calc_max_adj(tai);
+ tai->caps.adjfine = mvpp22_tai_adjfine;
+ tai->caps.adjtime = mvpp22_tai_adjtime;
+ tai->caps.gettimex64 = mvpp22_tai_gettimex64;
+ tai->caps.settime64 = mvpp22_tai_settime64;
+ tai->caps.do_aux_work = mvpp22_tai_aux_work;
+
+ ret = devm_add_action(dev, mvpp22_tai_remove, tai);
+ if (ret)
+ return ret;
+
+ tai->ptp_clock = ptp_clock_register(&tai->caps, dev);
+ if (IS_ERR(tai->ptp_clock))
+ return PTR_ERR(tai->ptp_clock);
+
+ priv->tai = tai;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Kconfig b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
new file mode 100644
index 000000000..0d7db8153
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC Driver Configuration
+#
+
+config OCTEON_EP
+ tristate "Marvell Octeon PCI Endpoint NIC Driver"
+ depends on 64BIT
+ depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver supports networking functionality of Marvell's
+ Octeon PCI Endpoint NIC.
+
+ To know the list of devices supported by this driver, refer
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst>.
+
+ To compile this drivers as a module, choose M here. Name of the
+ module is octeon_ep.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile
new file mode 100644
index 000000000..2026c8118
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC
+#
+
+obj-$(CONFIG_OCTEON_EP) += octeon_ep.o
+
+octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \
+ octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
new file mode 100644
index 000000000..90c3a4199
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_regs_cn9k_pf.h"
+
+#define CTRL_MBOX_MAX_PF 128
+#define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
+
+#define FW_HB_INTERVAL_IN_SECS 1
+#define FW_HB_MISS_COUNT 10
+
+/* Names of Hardware non-queue generic interrupts */
+static char *cn93_non_ioq_msix_names[] = {
+ "epf_ire_rint",
+ "epf_ore_rint",
+ "epf_vfire_rint0",
+ "epf_vfire_rint1",
+ "epf_vfore_rint0",
+ "epf_vfore_rint1",
+ "epf_mbox_rint0",
+ "epf_mbox_rint1",
+ "epf_oei_rint",
+ "epf_dma_rint",
+ "epf_dma_vf_rint0",
+ "epf_dma_vf_rint1",
+ "epf_pp_vf_rint0",
+ "epf_pp_vf_rint1",
+ "epf_misc_rint",
+ "epf_rsvd",
+};
+
+/* Dump useful hardware CSRs for debug purpose */
+static void cn93_dump_regs(struct octep_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_ERR_TYPE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static int cn93_reset_iq(struct octep_device *oct, int q_no)
+{
+ struct octep_config *conf = oct->conf;
+ u64 val = 0ULL;
+
+ dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
+
+ /* Get absolute queue number */
+ q_no += conf->pf_ring_cfg.srn;
+
+ /* Disable the Tx/Instruction Ring */
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = 0xFFFFFFFF;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ return 0;
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_reset_oq(struct octep_device *oct, int q_no)
+{
+ u64 val = 0ULL;
+
+ q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ /* Disable Output (Rx) Ring */
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no));
+ octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_reset_io_queues_cn93_pf(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_reset_iq(oct, q);
+ cn93_reset_oq(oct, q);
+ }
+}
+
+/* Initialize windowed addresses to access some hardware registers */
+static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+
+ oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64);
+ oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64);
+ oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64);
+ oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64);
+}
+
+/* Configure Hardware mapping: inform hardware which rings belong to PF. */
+static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
+ u64 regval = 0;
+
+ if (oct->pcie_port)
+ regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS;
+
+ octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval);
+
+ regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q));
+ dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
+ CN93_SDP_EPVF_RING(pf_srn + q), regval);
+ }
+}
+
+/* Initialize configuration limits and initial active config 93xx PF. */
+static void octep_init_config_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u8 link = 0;
+ u64 val;
+ int pos;
+
+ /* Read ring configuration:
+ * PF ring count, number of VFs and rings per VF supported
+ */
+ val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO);
+ conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val);
+ conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
+ conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val);
+ conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
+ conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val);
+
+ val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port));
+ conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
+ conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
+ conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
+
+ conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_64BYTE_INSTR;
+ conf->iq.pkind = 0;
+ conf->iq.db_min = OCTEP_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR;
+ conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
+ conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names;
+
+ pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_byte(oct->pdev,
+ pos + PCI_SRIOV_FUNC_LINK,
+ &link);
+ link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
+ }
+ conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
+ (0x400000ull * 7) +
+ (link * CTRL_MBOX_SZ);
+
+ conf->hb_interval = FW_HB_INTERVAL_IN_SECS;
+ conf->max_hb_miss_cnt = FW_HB_MISS_COUNT;
+
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ struct octep_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_R_IN_CTL_IDLE));
+ }
+
+ reg_val |= CN93_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_R_IN_CTL_IS_64B;
+ reg_val |= CN93_R_IN_CTL_ESR;
+ octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no),
+ iq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no),
+ iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val;
+ u64 oq_ctl = 0ULL;
+ u32 time_threshold = 0;
+ struct octep_oq *oq = oct->oq[oq_no];
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_R_OUT_CTL_ES_P);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+
+ oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0)
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) |
+ CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a PF mailbox */
+static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
+{
+ struct octep_mbox *mbox = oct->mbox[q_no];
+
+ mbox->q_no = q_no;
+
+ /* PF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0);
+
+ /* PF to VF DATA reg. PF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF to PF DATA reg. PF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Process non-ioq interrupts required to keep pf interface running.
+ * OEI_RINT is needed for control mailbox
+ */
+static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
+{
+ bool handled = false;
+ u64 reg0;
+
+ /* Check for OEI INTR */
+ reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
+ if (reg0) {
+ dev_info(&oct->pdev->dev,
+ "Received OEI_RINT intr: 0x%llx\n",
+ reg0);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0);
+ if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
+ queue_work(octep_wq, &oct->ctrl_mbox_task);
+ else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
+ atomic_set(&oct->hb_miss_cnt, 0);
+
+ handled = true;
+ }
+
+ return handled;
+}
+
+/* Interrupts handler for all non-queue generic interrupts. */
+static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for IRERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "received IRERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct,
+ CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on IQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ goto irq_handled;
+ }
+
+ /* Check for ORERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received ORERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val);
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on OQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+
+ goto irq_handled;
+ }
+
+ /* Check for VFIRE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for VFORE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFORE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MBOX INTR and OEI INTR */
+ if (octep_poll_non_ioq_interrupts_cn93_pf(oct))
+ goto irq_handled;
+
+ /* Check for DMA INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
+ if (reg_val) {
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for DMA VF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for PPVF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MISC INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MISC_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
+irq_handled:
+ return IRQ_HANDLED;
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data)
+{
+ struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
+ struct octep_oq *oq = vector->oq;
+
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* soft reset of 93xx */
+static int octep_soft_reset_cn93_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n");
+
+ octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ /* Set core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1);
+ /* Wait for 100ms as Octeon resets. */
+ mdelay(100);
+ /* clear core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1);
+
+ return 0;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_reinit_regs_cn93_pf(struct octep_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+}
+
+/* Disable all interrupts */
+static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
+
+ while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assined to PF */
+static void octep_enable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_enable_iq_cn93_pf(oct, q);
+ octep_enable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assined to PF */
+static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 reg_val = 0ULL;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assined to PF */
+static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assined to PF */
+static void octep_disable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ int q = 0;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_disable_iq_cn93_pf(oct, q);
+ octep_disable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_dump_registers_cn93_pf(struct octep_device *oct)
+{
+ u8 srn, num_rings, q;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (q = srn; q < srn + num_rings; q++)
+ cn93_dump_regs(oct, q);
+}
+
+/**
+ * octep_device_setup_cn93_pf() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - setup window access to hardware registers.
+ * - set initial configuration and max limits.
+ * - setup hardware mapping of rings to the PF device.
+ */
+void octep_device_setup_cn93_pf(struct octep_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf;
+ oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
+ oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
+
+ oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
+ oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
+
+ oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
+ oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf;
+ oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cn93_pf;
+
+ oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf;
+
+ oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf;
+ oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf;
+ oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf;
+
+ oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf;
+ oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf;
+ oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf;
+ oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf;
+
+ oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf;
+
+ octep_setup_pci_window_regs_cn93_pf(oct);
+
+ oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff;
+ dev_info(&oct->pdev->dev,
+ "Octeon device using PCIE Port %d\n", oct->pcie_port);
+
+ octep_init_config_cn93_pf(oct);
+ octep_configure_ring_mapping_cn93_pf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
new file mode 100644
index 000000000..df7cd39d9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_CONFIG_H_
+#define _OCTEP_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_32BYTE_INSTR 32
+#define OCTEP_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_DB_MIN 1
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_IQ_INTR_THRESHOLD 0x0
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_OQ_PKTS_PER_INTR 128
+#define OCTEP_OQ_REFILL_THRESHOLD (OCTEP_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings)
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn)
+
+#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind)
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_MAX_VFS(cfg) ((cfg)->sriov_cfg.max_vfs)
+#define CFG_GET_ACTIVE_VFS(cfg) ((cfg)->sriov_cfg.active_vfs)
+#define CFG_GET_MAX_RPVF(cfg) ((cfg)->sriov_cfg.max_rings_per_vf)
+#define CFG_GET_ACTIVE_RPVF(cfg) ((cfg)->sriov_cfg.active_rings_per_vf)
+#define CFG_GET_VF_SRN(cfg) ((cfg)->sriov_cfg.vf_srn)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX(cfg) ((cfg)->msix_cfg.non_ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX_NAMES(cfg) ((cfg)->msix_cfg.non_ioq_msix_names)
+
+#define CFG_GET_CTRL_MBOX_MEM_ADDR(cfg) ((cfg)->ctrl_mbox_cfg.barmem_addr)
+
+/* Hardware Tx Queue configuration. */
+struct octep_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* pkind for packets sent to Octeon */
+ u16 pkind;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+};
+
+/* Tx/Rx configuration */
+struct octep_pf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+
+ /* Starting IOQ number: this changes based on which PEM is used */
+ u16 srn;
+};
+
+/* Octeon Hardware SRIOV config */
+struct octep_sriov_config {
+ /* Max number of VF devices supported */
+ u16 max_vfs;
+
+ /* Number of VF devices enabled */
+ u16 active_vfs;
+
+ /* Max number of rings assigned to VF */
+ u8 max_rings_per_vf;
+
+ /* Number of rings enabled per VF */
+ u8 active_rings_per_vf;
+
+ /* starting ring number of VF's: ring-0 of VF-0 of the PF */
+ u16 vf_srn;
+};
+
+/* Octeon MSI-x config. */
+struct octep_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+
+ /* Number of Non IOQ interrupts */
+ u16 non_ioq_msix;
+
+ /* Names of Non IOQ interrupts */
+ char **non_ioq_msix_names;
+};
+
+struct octep_ctrl_mbox_config {
+ /* Barmem address for control mbox */
+ void __iomem *barmem_addr;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_config {
+ /* Input Queue attributes. */
+ struct octep_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_oq_config oq;
+
+ /* NIC Port Configuration */
+ struct octep_pf_ring_config pf_ring_cfg;
+
+ /* SRIOV configuration of the PF */
+ struct octep_sriov_config sriov_cfg;
+
+ /* MSI-X interrupt config */
+ struct octep_msix_config msix_cfg;
+
+ /* ctrl mbox config */
+ struct octep_ctrl_mbox_config ctrl_mbox_cfg;
+
+ /* Configured maximum heartbeat miss count */
+ u32 max_hb_miss_cnt;
+
+ /* Configured firmware heartbeat interval in secs */
+ u32 hb_interval;
+};
+#endif /* _OCTEP_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h b/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h
new file mode 100644
index 000000000..0c741e752
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Marvell.
+ */
+#ifndef __OCTEP_CP_VERSION_H__
+#define __OCTEP_CP_VERSION_H__
+
+#define OCTEP_CP_VERSION(a, b, c) ((((a) & 0xff) << 16) + \
+ (((b) & 0xff) << 8) + \
+ ((c) & 0xff))
+
+#endif /* __OCTEP_CP_VERSION_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
new file mode 100644
index 000000000..9d53c1402
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "octep_ctrl_mbox.h"
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Timeout in msecs for message response */
+#define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100
+/* Time in msecs to wait for message response */
+#define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10
+
+/* Size of mbox info in bytes */
+#define OCTEP_CTRL_MBOX_INFO_SZ 256
+/* Size of mbox host to fw queue info in bytes */
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16
+/* Size of mbox fw to host queue info in bytes */
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16
+
+#define OCTEP_CTRL_MBOX_TOTAL_INFO_SZ (OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ)
+
+#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(m) (m)
+#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(m) ((m) + 8)
+#define OCTEP_CTRL_MBOX_INFO_HOST_VERSION(m) ((m) + 16)
+#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS(m) ((m) + 24)
+#define OCTEP_CTRL_MBOX_INFO_FW_VERSION(m) ((m) + 136)
+#define OCTEP_CTRL_MBOX_INFO_FW_STATUS(m) ((m) + 144)
+
+#define OCTEP_CTRL_MBOX_H2FQ_INFO(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ)
+#define OCTEP_CTRL_MBOX_H2FQ_PROD(m) (OCTEP_CTRL_MBOX_H2FQ_INFO(m))
+#define OCTEP_CTRL_MBOX_H2FQ_CONS(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO(m)) + 4)
+#define OCTEP_CTRL_MBOX_H2FQ_SZ(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO(m)) + 8)
+
+#define OCTEP_CTRL_MBOX_F2HQ_INFO(m) ((m) + \
+ OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ)
+#define OCTEP_CTRL_MBOX_F2HQ_PROD(m) (OCTEP_CTRL_MBOX_F2HQ_INFO(m))
+#define OCTEP_CTRL_MBOX_F2HQ_CONS(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO(m)) + 4)
+#define OCTEP_CTRL_MBOX_F2HQ_SZ(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO(m)) + 8)
+
+static const u32 mbox_hdr_sz = sizeof(union octep_ctrl_mbox_msg_hdr);
+
+static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 inc, u32 sz)
+{
+ return (index + inc) % sz;
+}
+
+static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 sz)
+{
+ return sz - (abs(pi - ci) % sz);
+}
+
+static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 sz)
+{
+ return (abs(pi - ci) % sz);
+}
+
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
+{
+ u64 magic_num, status, fw_versions;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (!mbox->barmem) {
+ pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem);
+ return -EINVAL;
+ }
+
+ magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(mbox->barmem));
+ if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) {
+ pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num);
+ return -EINVAL;
+ }
+
+ status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem));
+ if (status != OCTEP_CTRL_MBOX_STATUS_READY) {
+ pr_info("octep_ctrl_mbox : Firmware is not ready.\n");
+ return -EINVAL;
+ }
+
+ fw_versions = readq(OCTEP_CTRL_MBOX_INFO_FW_VERSION(mbox->barmem));
+ mbox->min_fw_version = ((fw_versions & 0xffffffff00000000ull) >> 32);
+ mbox->max_fw_version = (fw_versions & 0xffffffff);
+ mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(mbox->barmem));
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INIT,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
+
+ mutex_init(&mbox->h2fq_lock);
+ mutex_init(&mbox->f2hq_lock);
+
+ mbox->h2fq.sz = readl(OCTEP_CTRL_MBOX_H2FQ_SZ(mbox->barmem));
+ mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD(mbox->barmem);
+ mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS(mbox->barmem);
+ mbox->h2fq.hw_q = mbox->barmem + OCTEP_CTRL_MBOX_TOTAL_INFO_SZ;
+
+ mbox->f2hq.sz = readl(OCTEP_CTRL_MBOX_F2HQ_SZ(mbox->barmem));
+ mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD(mbox->barmem);
+ mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS(mbox->barmem);
+ mbox->f2hq.hw_q = mbox->barmem +
+ OCTEP_CTRL_MBOX_TOTAL_INFO_SZ +
+ mbox->h2fq.sz;
+
+ writeq(mbox->version, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem));
+ /* ensure ready state is seen after everything is initialized */
+ wmb();
+ writeq(OCTEP_CTRL_MBOX_STATUS_READY,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
+
+ pr_info("Octep ctrl mbox : Init successful.\n");
+
+ return 0;
+}
+
+static void
+octep_write_mbox_data(struct octep_ctrl_mbox_q *q, u32 *pi, u32 ci, void *buf, u32 w_sz)
+{
+ u8 __iomem *qbuf;
+ u32 cp_sz;
+
+ /* Assumption: Caller has ensured enough write space */
+ qbuf = (q->hw_q + *pi);
+ if (*pi < ci) {
+ /* copy entire w_sz */
+ memcpy_toio(qbuf, buf, w_sz);
+ *pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz);
+ } else {
+ /* copy up to end of queue */
+ cp_sz = min((q->sz - *pi), w_sz);
+ memcpy_toio(qbuf, buf, cp_sz);
+ w_sz -= cp_sz;
+ *pi = octep_ctrl_mbox_circq_inc(*pi, cp_sz, q->sz);
+ if (w_sz) {
+ /* roll over and copy remaining w_sz */
+ buf += cp_sz;
+ qbuf = (q->hw_q + *pi);
+ memcpy_toio(qbuf, buf, w_sz);
+ *pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz);
+ }
+ }
+}
+
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_mbox_msg_buf *sg;
+ struct octep_ctrl_mbox_q *q;
+ u32 pi, ci, buf_sz, w_sz;
+ int s;
+
+ if (!mbox || !msg)
+ return -EINVAL;
+
+ if (readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)) != OCTEP_CTRL_MBOX_STATUS_READY)
+ return -EIO;
+
+ mutex_lock(&mbox->h2fq_lock);
+ q = &mbox->h2fq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+
+ if (octep_ctrl_mbox_circq_space(pi, ci, q->sz) < (msg->hdr.s.sz + mbox_hdr_sz)) {
+ mutex_unlock(&mbox->h2fq_lock);
+ return -EAGAIN;
+ }
+
+ octep_write_mbox_data(q, &pi, ci, (void *)&msg->hdr, mbox_hdr_sz);
+ buf_sz = msg->hdr.s.sz;
+ for (s = 0; ((s < msg->sg_num) && (buf_sz > 0)); s++) {
+ sg = &msg->sg_list[s];
+ w_sz = (sg->sz <= buf_sz) ? sg->sz : buf_sz;
+ octep_write_mbox_data(q, &pi, ci, sg->msg, w_sz);
+ buf_sz -= w_sz;
+ }
+ writel(pi, q->hw_prod);
+ mutex_unlock(&mbox->h2fq_lock);
+
+ return 0;
+}
+
+static void
+octep_read_mbox_data(struct octep_ctrl_mbox_q *q, u32 pi, u32 *ci, void *buf, u32 r_sz)
+{
+ u8 __iomem *qbuf;
+ u32 cp_sz;
+
+ /* Assumption: Caller has ensured enough read space */
+ qbuf = (q->hw_q + *ci);
+ if (*ci < pi) {
+ /* copy entire r_sz */
+ memcpy_fromio(buf, qbuf, r_sz);
+ *ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz);
+ } else {
+ /* copy up to end of queue */
+ cp_sz = min((q->sz - *ci), r_sz);
+ memcpy_fromio(buf, qbuf, cp_sz);
+ r_sz -= cp_sz;
+ *ci = octep_ctrl_mbox_circq_inc(*ci, cp_sz, q->sz);
+ if (r_sz) {
+ /* roll over and copy remaining r_sz */
+ buf += cp_sz;
+ qbuf = (q->hw_q + *ci);
+ memcpy_fromio(buf, qbuf, r_sz);
+ *ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz);
+ }
+ }
+}
+
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_mbox_msg_buf *sg;
+ u32 pi, ci, r_sz, buf_sz, q_depth;
+ struct octep_ctrl_mbox_q *q;
+ int s;
+
+ if (readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)) != OCTEP_CTRL_MBOX_STATUS_READY)
+ return -EIO;
+
+ mutex_lock(&mbox->f2hq_lock);
+ q = &mbox->f2hq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+
+ q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz);
+ if (q_depth < mbox_hdr_sz) {
+ mutex_unlock(&mbox->f2hq_lock);
+ return -EAGAIN;
+ }
+
+ octep_read_mbox_data(q, pi, &ci, (void *)&msg->hdr, mbox_hdr_sz);
+ buf_sz = msg->hdr.s.sz;
+ for (s = 0; ((s < msg->sg_num) && (buf_sz > 0)); s++) {
+ sg = &msg->sg_list[s];
+ r_sz = (sg->sz <= buf_sz) ? sg->sz : buf_sz;
+ octep_read_mbox_data(q, pi, &ci, sg->msg, r_sz);
+ buf_sz -= r_sz;
+ }
+ writel(ci, q->hw_cons);
+ mutex_unlock(&mbox->f2hq_lock);
+
+ return 0;
+}
+
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox)
+{
+ if (!mbox)
+ return -EINVAL;
+ if (!mbox->barmem)
+ return -EINVAL;
+
+ writeq(0, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem));
+ writeq(OCTEP_CTRL_MBOX_STATUS_INVALID,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
+ /* ensure uninit state is written before uninitialization */
+ wmb();
+
+ mutex_destroy(&mbox->h2fq_lock);
+ mutex_destroy(&mbox->f2hq_lock);
+
+ pr_info("Octep ctrl mbox : Uninit successful.\n");
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
new file mode 100644
index 000000000..7f8135788
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+ #ifndef __OCTEP_CTRL_MBOX_H__
+#define __OCTEP_CTRL_MBOX_H__
+
+/* barmem structure
+ * |===========================================|
+ * |Info (16 + 120 + 120 = 256 bytes) |
+ * |-------------------------------------------|
+ * |magic number (8 bytes) |
+ * |bar memory size (4 bytes) |
+ * |reserved (4 bytes) |
+ * |-------------------------------------------|
+ * |host version (8 bytes) |
+ * |host status (8 bytes) |
+ * |host reserved (104 bytes) |
+ * |-------------------------------------------|
+ * |fw version (8 bytes) |
+ * |fw status (8 bytes) |
+ * |fw reserved (104 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |max element size (4 bytes) |
+ * |reserved (4 bytes) |
+ * |===========================================|
+ * |Fw to Host Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |max element size (4 bytes) |
+ * |reserved (4 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue ((total size-288/2) bytes)|
+ * |-------------------------------------------|
+ * | |
+ * |===========================================|
+ * |===========================================|
+ * |Fw to Host Queue ((total size-288/2) bytes)|
+ * |-------------------------------------------|
+ * | |
+ * |===========================================|
+ */
+
+#define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull
+
+/* Valid request message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0)
+/* Valid response message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1)
+/* Valid notification, no response required */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2)
+/* Valid custom message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_CUSTOM BIT(3)
+
+#define OCTEP_CTRL_MBOX_MSG_DESC_MAX 4
+
+enum octep_ctrl_mbox_status {
+ OCTEP_CTRL_MBOX_STATUS_INVALID = 0,
+ OCTEP_CTRL_MBOX_STATUS_INIT,
+ OCTEP_CTRL_MBOX_STATUS_READY,
+ OCTEP_CTRL_MBOX_STATUS_UNINIT
+};
+
+/* mbox message */
+union octep_ctrl_mbox_msg_hdr {
+ u64 words[2];
+ struct {
+ /* must be 0 */
+ u16 reserved1:15;
+ /* vf_idx is valid if 1 */
+ u16 is_vf:1;
+ /* sender vf index 0-(n-1), 0 if (is_vf==0) */
+ u16 vf_idx;
+ /* total size of message excluding header */
+ u32 sz;
+ /* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */
+ u32 flags;
+ /* identifier to match responses */
+ u16 msg_id;
+ u16 reserved2;
+ } s;
+};
+
+/* mbox message buffer */
+struct octep_ctrl_mbox_msg_buf {
+ u32 reserved1;
+ u16 reserved2;
+ /* size of buffer */
+ u16 sz;
+ /* pointer to message buffer */
+ void *msg;
+};
+
+/* mbox message */
+struct octep_ctrl_mbox_msg {
+ /* mbox transaction header */
+ union octep_ctrl_mbox_msg_hdr hdr;
+ /* number of sg buffer's */
+ int sg_num;
+ /* message buffer's */
+ struct octep_ctrl_mbox_msg_buf sg_list[OCTEP_CTRL_MBOX_MSG_DESC_MAX];
+};
+
+/* Mbox queue */
+struct octep_ctrl_mbox_q {
+ /* size of queue buffer */
+ u32 sz;
+ /* producer address in bar mem */
+ u8 __iomem *hw_prod;
+ /* consumer address in bar mem */
+ u8 __iomem *hw_cons;
+ /* q base address in bar mem */
+ u8 __iomem *hw_q;
+};
+
+struct octep_ctrl_mbox {
+ /* control plane version */
+ u64 version;
+ /* size of bar memory */
+ u32 barmem_sz;
+ /* pointer to BAR memory */
+ u8 __iomem *barmem;
+ /* host-to-fw queue */
+ struct octep_ctrl_mbox_q h2fq;
+ /* fw-to-host queue */
+ struct octep_ctrl_mbox_q f2hq;
+ /* lock for h2fq */
+ struct mutex h2fq_lock;
+ /* lock for f2hq */
+ struct mutex f2hq_lock;
+ /* Min control plane version supported by firmware */
+ u32 min_fw_version;
+ /* Max control plane version supported by firmware */
+ u32 max_fw_version;
+};
+
+/* Initialize control mbox.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox);
+
+/* Send mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ * @param msg: non-null pointer to struct octep_ctrl_mbox_msg.
+ * Caller should fill msg.sz and msg.desc.sz for each message.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Retrieve mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ * @param msg: non-null pointer to struct octep_ctrl_mbox_msg.
+ * Caller should fill msg.sz and msg.desc.sz for each message.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Uninitialize control mbox.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox);
+
+#endif /* __OCTEP_CTRL_MBOX_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
new file mode 100644
index 000000000..17bfd5cdf
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+/* Control plane version */
+#define OCTEP_CP_VERSION_CURRENT OCTEP_CP_VERSION(1, 0, 0)
+
+static const u32 req_hdr_sz = sizeof(union octep_ctrl_net_req_hdr);
+static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu);
+static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac);
+static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state);
+static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info);
+static atomic_t ctrl_net_msg_id;
+
+/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */
+static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = {
+ [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_LINK_INFO] =
+ OCTEP_CP_VERSION(1, 0, 0)
+};
+
+/* Control plane version in which OCTEP_CTRL_NET_F2H_CMD was added */
+static const u32 octep_ctrl_net_f2h_cmd_versions[OCTEP_CTRL_NET_F2H_CMD_MAX] = {
+ [OCTEP_CTRL_NET_F2H_CMD_INVALID ... OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS] =
+ OCTEP_CP_VERSION(1, 0, 0)
+};
+
+static void init_send_req(struct octep_ctrl_mbox_msg *msg, void *buf,
+ u16 sz, int vfid)
+{
+ msg->hdr.s.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg->hdr.s.msg_id = atomic_inc_return(&ctrl_net_msg_id) &
+ GENMASK(sizeof(msg->hdr.s.msg_id) * BITS_PER_BYTE, 0);
+ msg->hdr.s.sz = req_hdr_sz + sz;
+ msg->sg_num = 1;
+ msg->sg_list[0].msg = buf;
+ msg->sg_list[0].sz = msg->hdr.s.sz;
+ if (vfid != OCTEP_CTRL_NET_INVALID_VFID) {
+ msg->hdr.s.is_vf = 1;
+ msg->hdr.s.vf_idx = vfid;
+ }
+}
+
+static int octep_send_mbox_req(struct octep_device *oct,
+ struct octep_ctrl_net_wait_data *d,
+ bool wait_for_response)
+{
+ int err, ret, cmd;
+
+ /* check if firmware is compatible for this request */
+ cmd = d->data.req.hdr.s.cmd;
+ if (octep_ctrl_net_h2f_cmd_versions[cmd] > oct->ctrl_mbox.max_fw_version ||
+ octep_ctrl_net_h2f_cmd_versions[cmd] < oct->ctrl_mbox.min_fw_version)
+ return -EOPNOTSUPP;
+
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &d->msg);
+ if (err < 0)
+ return err;
+
+ if (!wait_for_response)
+ return 0;
+
+ d->done = 0;
+ INIT_LIST_HEAD(&d->list);
+ list_add_tail(&d->list, &oct->ctrl_req_wait_list);
+ ret = wait_event_interruptible_timeout(oct->ctrl_req_wait_q,
+ (d->done != 0),
+ msecs_to_jiffies(500));
+ list_del(&d->list);
+ if (ret == 0 || ret == 1)
+ return -EAGAIN;
+
+ /**
+ * (ret == 0) cond = false && timeout, return 0
+ * (ret < 0) interrupted by signal, return 0
+ * (ret == 1) cond = true && timeout, return 1
+ * (ret >= 1) cond = true && !timeout, return 1
+ */
+
+ if (d->data.resp.hdr.s.reply != OCTEP_CTRL_NET_REPLY_OK)
+ return -EAGAIN;
+
+ return 0;
+}
+
+int octep_ctrl_net_init(struct octep_device *oct)
+{
+ struct octep_ctrl_mbox *ctrl_mbox;
+ struct pci_dev *pdev = oct->pdev;
+ int ret;
+
+ init_waitqueue_head(&oct->ctrl_req_wait_q);
+ INIT_LIST_HEAD(&oct->ctrl_req_wait_list);
+
+ /* Initialize control mbox */
+ ctrl_mbox = &oct->ctrl_mbox;
+ ctrl_mbox->version = OCTEP_CP_VERSION_CURRENT;
+ ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf);
+ ret = octep_ctrl_mbox_init(ctrl_mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize control mbox\n");
+ return ret;
+ }
+ dev_info(&pdev->dev, "Control plane versions host: %llx, firmware: %x:%x\n",
+ ctrl_mbox->version, ctrl_mbox->min_fw_version,
+ ctrl_mbox->max_fw_version);
+ oct->ctrl_mbox_ifstats_offset = ctrl_mbox->barmem_sz;
+
+ return 0;
+}
+
+int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+ int err;
+
+ init_send_req(&d.msg, (void *)req, state_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ return d.data.resp.link.state;
+}
+
+int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, state_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP :
+ OCTEP_CTRL_NET_STATE_DOWN;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, state_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP :
+ OCTEP_CTRL_NET_STATE_DOWN;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+ int err;
+
+ init_send_req(&d.msg, req, mac_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ memcpy(addr, d.data.resp.mac.addr, ETH_ALEN);
+
+ return 0;
+}
+
+int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, mac_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req->mac.cmd = OCTEP_CTRL_NET_CMD_SET;
+ memcpy(&req->mac.addr, addr, ETH_ALEN);
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, mtu_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
+ req->mtu.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->mtu.val = mtu;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
+ struct octep_iface_rx_stats *rx_stats,
+ struct octep_iface_tx_stats *tx_stats)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+ struct octep_ctrl_net_h2f_resp *resp;
+ int err;
+
+ init_send_req(&d.msg, req, 0, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ resp = &d.data.resp;
+ memcpy(rx_stats, &resp->if_stats.rx_stats, sizeof(struct octep_iface_rx_stats));
+ memcpy(tx_stats, &resp->if_stats.tx_stats, sizeof(struct octep_iface_tx_stats));
+ return 0;
+}
+
+int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
+ struct octep_iface_link_info *link_info)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+ struct octep_ctrl_net_h2f_resp *resp;
+ int err;
+
+ init_send_req(&d.msg, req, link_info_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req->link_info.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ resp = &d.data.resp;
+ link_info->supported_modes = resp->link_info.supported_modes;
+ link_info->advertised_modes = resp->link_info.advertised_modes;
+ link_info->autoneg = resp->link_info.autoneg;
+ link_info->pause = resp->link_info.pause;
+ link_info->speed = resp->link_info.speed;
+
+ return 0;
+}
+
+int octep_ctrl_net_set_link_info(struct octep_device *oct, int vfid,
+ struct octep_iface_link_info *link_info,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, link_info_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req->link_info.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->link_info.info.advertised_modes = link_info->advertised_modes;
+ req->link_info.info.autoneg = link_info->autoneg;
+ req->link_info.info.pause = link_info->pause;
+ req->link_info.info.speed = link_info->speed;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+static void process_mbox_resp(struct octep_device *oct,
+ struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_net_wait_data *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list) {
+ if (pos->msg.hdr.s.msg_id == msg->hdr.s.msg_id) {
+ memcpy(&pos->data.resp,
+ msg->sg_list[0].msg,
+ msg->hdr.s.sz);
+ pos->done = 1;
+ wake_up_interruptible_all(&oct->ctrl_req_wait_q);
+ break;
+ }
+ }
+}
+
+static int process_mbox_notify(struct octep_device *oct,
+ struct octep_ctrl_mbox_msg *msg)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_ctrl_net_f2h_req *req;
+ int cmd;
+
+ req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg;
+ cmd = req->hdr.s.cmd;
+
+ /* check if we support this command */
+ if (octep_ctrl_net_f2h_cmd_versions[cmd] > OCTEP_CP_VERSION_CURRENT ||
+ octep_ctrl_net_f2h_cmd_versions[cmd] < OCTEP_CP_VERSION_CURRENT)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
+ if (netif_running(netdev)) {
+ if (req->link.state) {
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ netif_carrier_on(netdev);
+ } else {
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ netif_carrier_off(netdev);
+ }
+ }
+ break;
+ default:
+ pr_info("Unknown mbox req : %u\n", req->hdr.s.cmd);
+ break;
+ }
+
+ return 0;
+}
+
+void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
+{
+ static u16 msg_sz = sizeof(union octep_ctrl_net_max_data);
+ union octep_ctrl_net_max_data data = {0};
+ struct octep_ctrl_mbox_msg msg = {0};
+ int ret;
+
+ msg.hdr.s.sz = msg_sz;
+ msg.sg_num = 1;
+ msg.sg_list[0].sz = msg_sz;
+ msg.sg_list[0].msg = &data;
+ while (true) {
+ /* mbox will overwrite msg.hdr.s.sz so initialize it */
+ msg.hdr.s.sz = msg_sz;
+ ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, (struct octep_ctrl_mbox_msg *)&msg);
+ if (ret < 0)
+ break;
+
+ if (msg.hdr.s.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP)
+ process_mbox_resp(oct, &msg);
+ else if (msg.hdr.s.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY)
+ process_mbox_notify(oct, &msg);
+ }
+}
+
+int octep_ctrl_net_uninit(struct octep_device *oct)
+{
+ struct octep_ctrl_net_wait_data *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list)
+ pos->done = 1;
+
+ wake_up_interruptible_all(&oct->ctrl_req_wait_q);
+
+ octep_ctrl_mbox_uninit(&oct->ctrl_mbox);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
new file mode 100644
index 000000000..1c2ef4ee3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef __OCTEP_CTRL_NET_H__
+#define __OCTEP_CTRL_NET_H__
+
+#include "octep_cp_version.h"
+
+#define OCTEP_CTRL_NET_INVALID_VFID (-1)
+
+/* Supported commands */
+enum octep_ctrl_net_cmd {
+ OCTEP_CTRL_NET_CMD_GET = 0,
+ OCTEP_CTRL_NET_CMD_SET,
+};
+
+/* Supported states */
+enum octep_ctrl_net_state {
+ OCTEP_CTRL_NET_STATE_DOWN = 0,
+ OCTEP_CTRL_NET_STATE_UP,
+};
+
+/* Supported replies */
+enum octep_ctrl_net_reply {
+ OCTEP_CTRL_NET_REPLY_OK = 0,
+ OCTEP_CTRL_NET_REPLY_GENERIC_FAIL,
+ OCTEP_CTRL_NET_REPLY_INVALID_PARAM,
+};
+
+/* Supported host to fw commands */
+enum octep_ctrl_net_h2f_cmd {
+ OCTEP_CTRL_NET_H2F_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_H2F_CMD_MTU,
+ OCTEP_CTRL_NET_H2F_CMD_MAC,
+ OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_XSTATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_Q_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
+ OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+ OCTEP_CTRL_NET_H2F_CMD_MAX
+};
+
+/* Supported fw to host commands */
+enum octep_ctrl_net_f2h_cmd {
+ OCTEP_CTRL_NET_F2H_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS,
+ OCTEP_CTRL_NET_F2H_CMD_MAX
+};
+
+union octep_ctrl_net_req_hdr {
+ u64 words[1];
+ struct {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* reserved */
+ u16 rsvd0;
+ } s;
+};
+
+/* get/set mtu request */
+struct octep_ctrl_net_h2f_req_cmd_mtu {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get/set mac request */
+struct octep_ctrl_net_h2f_req_cmd_mac {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get/set link state, rx state */
+struct octep_ctrl_net_h2f_req_cmd_state {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* link info */
+struct octep_ctrl_net_link_info {
+ /* Bitmap of Supported link speeds/modes */
+ u64 supported_modes;
+ /* Bitmap of Advertised link speeds/modes */
+ u64 advertised_modes;
+ /* Autonegotation state; bit 0=disabled; bit 1=enabled */
+ u8 autoneg;
+ /* Pause frames setting. bit 0=disabled; bit 1=enabled */
+ u8 pause;
+ /* Negotiated link speed in Mbps */
+ u32 speed;
+};
+
+/* get/set link info */
+struct octep_ctrl_net_h2f_req_cmd_link_info {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* struct octep_ctrl_net_link_info */
+ struct octep_ctrl_net_link_info info;
+};
+
+/* Host to fw request data */
+struct octep_ctrl_net_h2f_req {
+ union octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_req_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_req_cmd_mac mac;
+ struct octep_ctrl_net_h2f_req_cmd_state link;
+ struct octep_ctrl_net_h2f_req_cmd_state rx;
+ struct octep_ctrl_net_h2f_req_cmd_link_info link_info;
+ };
+} __packed;
+
+union octep_ctrl_net_resp_hdr {
+ u64 words[1];
+ struct {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* octep_ctrl_net_reply */
+ u16 reply;
+ } s;
+};
+
+/* get mtu response */
+struct octep_ctrl_net_h2f_resp_cmd_mtu {
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get mac response */
+struct octep_ctrl_net_h2f_resp_cmd_mac {
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get if_stats, xstats, q_stats request */
+struct octep_ctrl_net_h2f_resp_cmd_get_stats {
+ struct octep_iface_rx_stats rx_stats;
+ struct octep_iface_tx_stats tx_stats;
+};
+
+/* get link state, rx state response */
+struct octep_ctrl_net_h2f_resp_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Host to fw response data */
+struct octep_ctrl_net_h2f_resp {
+ union octep_ctrl_net_resp_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_resp_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_resp_cmd_mac mac;
+ struct octep_ctrl_net_h2f_resp_cmd_get_stats if_stats;
+ struct octep_ctrl_net_h2f_resp_cmd_state link;
+ struct octep_ctrl_net_h2f_resp_cmd_state rx;
+ struct octep_ctrl_net_link_info link_info;
+ };
+} __packed;
+
+/* link state notofication */
+struct octep_ctrl_net_f2h_req_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Fw to host request data */
+struct octep_ctrl_net_f2h_req {
+ union octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_f2h_req_cmd_state link;
+ };
+};
+
+/* Fw to host response data */
+struct octep_ctrl_net_f2h_resp {
+ union octep_ctrl_net_resp_hdr hdr;
+};
+
+/* Max data size to be transferred over mbox */
+union octep_ctrl_net_max_data {
+ struct octep_ctrl_net_h2f_req h2f_req;
+ struct octep_ctrl_net_h2f_resp h2f_resp;
+ struct octep_ctrl_net_f2h_req f2h_req;
+ struct octep_ctrl_net_f2h_resp f2h_resp;
+};
+
+struct octep_ctrl_net_wait_data {
+ struct list_head list;
+ int done;
+ struct octep_ctrl_mbox_msg msg;
+ union {
+ struct octep_ctrl_net_h2f_req req;
+ struct octep_ctrl_net_h2f_resp resp;
+ } data;
+};
+
+/** Initialize data for ctrl net.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on error.
+ */
+int octep_ctrl_net_init(struct octep_device *oct);
+
+/** Get link status from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ *
+ * return value: link status 0=down, 1=up.
+ */
+int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid);
+
+/** Set link status in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param up: boolean status.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure
+ */
+int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response);
+
+/** Set rx state in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param up: boolean status.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response);
+
+/** Get mac address from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param addr: non-null pointer to mac address.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr);
+
+/** Set mac address in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param addr: non-null pointer to mac address.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
+ bool wait_for_response);
+
+/** Set mtu in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param mtu: mtu.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
+ bool wait_for_response);
+
+/** Get interface statistics from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param rx_stats: non-null pointer struct octep_iface_rx_stats.
+ * @param tx_stats: non-null pointer struct octep_iface_tx_stats.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
+ struct octep_iface_rx_stats *rx_stats,
+ struct octep_iface_tx_stats *tx_stats);
+
+/** Get link info from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param link_info: non-null pointer to struct octep_iface_link_info.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
+ struct octep_iface_link_info *link_info);
+
+/** Set link info in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param link_info: non-null pointer to struct octep_iface_link_info.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_link_info(struct octep_device *oct,
+ int vfid,
+ struct octep_iface_link_info *link_info,
+ bool wait_for_response);
+
+/** Poll for firmware messages and process them.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ */
+void octep_ctrl_net_recv_fw_messages(struct octep_device *oct);
+
+/** Uninitialize data for ctrl net.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on error.
+ */
+int octep_ctrl_net_uninit(struct octep_device *oct);
+
+#endif /* __OCTEP_CTRL_NET_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
new file mode 100644
index 000000000..7d0124b28
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "tx_hw_underflow",
+ "tx_hw_control",
+ "tx_less_than_64",
+ "tx_equal_64",
+ "tx_equal_65_to_127",
+ "tx_equal_128_to_255",
+ "tx_equal_256_to_511",
+ "tx_equal_512_to_1023",
+ "tx_equal_1024_to_1518",
+ "tx_greater_than_1518",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_hw_mcast",
+ "rx_pause_pkts",
+ "rx_pause_bytes",
+ "rx_dropped_pkts_fifo_full",
+ "rx_dropped_bytes_fifo_full",
+ "rx_err_pkts",
+};
+
+#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+octep_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_tx_stats *iface_tx_stats;
+ struct octep_iface_rx_stats *iface_rx_stats;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_packets = 0;
+ rx_bytes = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+ octep_ctrl_net_get_if_stats(oct,
+ OCTEP_CTRL_NET_INVALID_VFID,
+ iface_rx_stats,
+ iface_tx_stats);
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ tx_busy_errors += iq->stats.tx_busy;
+
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_packets;
+ data[i++] = tx_packets;
+ data[i++] = rx_bytes;
+ data[i++] = tx_bytes;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full +
+ iface_rx_stats->err_pkts;
+ data[i++] = iface_tx_stats->xscol +
+ iface_tx_stats->xsdef;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_tx_stats->undflw;
+ data[i++] = iface_tx_stats->ctl;
+ data[i++] = iface_tx_stats->hist_lt64;
+ data[i++] = iface_tx_stats->hist_eq64;
+ data[i++] = iface_tx_stats->hist_65to127;
+ data[i++] = iface_tx_stats->hist_128to255;
+ data[i++] = iface_tx_stats->hist_256to511;
+ data[i++] = iface_tx_stats->hist_512to1023;
+ data[i++] = iface_tx_stats->hist_1024to1518;
+ data[i++] = iface_tx_stats->hist_gt1518;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->mcast_pkts;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->pause_pkts;
+ data[i++] = iface_rx_stats->pause_octets;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+ data[i++] = iface_rx_stats->err_pkts;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \
+{ \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ link_info = &oct->link_info;
+ octep_ctrl_net_get_link_info(oct, OCTEP_CTRL_NET_INVALID_VFID, link_info);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ if (link_info->pause) {
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static int octep_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info link_info_new;
+ struct octep_iface_link_info *link_info;
+ u64 advertised = 0;
+ u8 autoneg = 0;
+ int err;
+
+ link_info = &oct->link_info;
+ memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info));
+
+ /* Only Full duplex is supported;
+ * Assume full duplex when duplex is unknown.
+ */
+ if (cmd->base.duplex != DUPLEX_FULL &&
+ cmd->base.duplex != DUPLEX_UNKNOWN)
+ return -EOPNOTSUPP;
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED))
+ return -EOPNOTSUPP;
+ autoneg = 1;
+ }
+
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cmd->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseT_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseR_FEC))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseLR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseLR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseLR_ER_FR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseLR4_ER4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4);
+
+ if (advertised == link_info->advertised_modes &&
+ cmd->base.speed == link_info->speed &&
+ cmd->base.autoneg == link_info->autoneg)
+ return 0;
+
+ link_info_new.advertised_modes = advertised;
+ link_info_new.speed = cmd->base.speed;
+ link_info_new.autoneg = autoneg;
+
+ err = octep_ctrl_net_set_link_info(oct, OCTEP_CTRL_NET_INVALID_VFID,
+ &link_info_new, true);
+ if (err)
+ return err;
+
+ memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info));
+ return 0;
+}
+
+static const struct ethtool_ops octep_ethtool_ops = {
+ .get_drvinfo = octep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_get_strings,
+ .get_sset_count = octep_get_sset_count,
+ .get_ethtool_stats = octep_get_ethtool_stats,
+ .get_link_ksettings = octep_get_link_ksettings,
+ .set_link_ksettings = octep_set_link_ksettings,
+};
+
+void octep_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
new file mode 100644
index 000000000..2ee1374db
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -0,0 +1,1273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+#define OCTEP_INTR_POLL_TIME_MSECS 100
+struct workqueue_struct *octep_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_alloc_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+ struct octep_ioq_vector *ioq_vector;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_free_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_enable_msix_range(struct octep_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ oct->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_disable_msix(struct octep_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_non_ioq_intr_handler() - common handler for all generic interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.non_ioq_intr_handler(oct);
+}
+
+/**
+ * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_ioq_vector *ioq_vector = data;
+ struct octep_device *oct = ioq_vector->octep_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_request_irqs(struct octep_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ char **non_ioq_msix_names;
+ int num_non_ioq_msix;
+ int ret, i, j;
+
+ num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf);
+ non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf);
+
+ oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix,
+ OCTEP_MSIX_NAME_SIZE, GFP_KERNEL);
+ if (!oct->non_ioq_irq_names)
+ goto alloc_err;
+
+ /* First few MSI-X interrupts are non-queue interrupts */
+ for (i = 0; i < num_non_ioq_msix; i++) {
+ char *irq_name;
+
+ irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
+ "%s-%s", netdev->name, non_ioq_msix_names[i]);
+ ret = request_irq(msix_entry->vector,
+ octep_non_ioq_intr_handler, 0,
+ irq_name, oct);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for %s; err=%d",
+ irq_name, ret);
+ goto non_ioq_irq_err;
+ }
+ }
+
+ /* Request IRQs for Tx/Rx queues */
+ for (j = 0; j < oct->num_oqs; j++) {
+ ioq_vector = oct->ioq_vector[j];
+ msix_entry = &oct->msix_entries[j + num_non_ioq_msix];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, j);
+ ret = request_irq(msix_entry->vector,
+ octep_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ j, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(j % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (j) {
+ --j;
+ ioq_vector = oct->ioq_vector[j];
+ msix_entry = &oct->msix_entries[j + num_non_ioq_msix];
+
+ irq_set_affinity_hint(msix_entry->vector, NULL);
+ free_irq(msix_entry->vector, ioq_vector);
+ }
+non_ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+ kfree(oct->non_ioq_irq_names);
+ oct->non_ioq_irq_names = NULL;
+alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_free_irqs(struct octep_device *oct)
+{
+ int i;
+
+ /* First few MSI-X interrupts are non queue interrupts; free them */
+ for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++)
+ free_irq(oct->msix_entries[i].vector, oct);
+ kfree(oct->non_ioq_irq_names);
+
+ /* Free IRQs for Input/Output (Tx/Rx) queues */
+ for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector,
+ oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_setup_irqs(struct octep_device *oct)
+{
+ if (octep_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_disable_msix(oct);
+enable_msix_err:
+ octep_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_clean_irqs(struct octep_device *oct)
+{
+ octep_free_irqs(oct);
+ octep_disable_msix(oct);
+ octep_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ wmb();
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_iq_process_completions(ioq_vector->iq, budget);
+ rx_done = octep_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+}
+
+/**
+ * octep_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_add(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
+ octep_napi_poll);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_delete(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_enable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_disable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+/**
+ * octep_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_open(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_napi_add(oct);
+ octep_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, true,
+ false);
+ octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, true,
+ false);
+ oct->poll_non_ioq_intr = false;
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_oq_dbell_init(oct);
+
+ ret = octep_ctrl_net_get_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID);
+ if (ret > 0)
+ octep_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_clean_irqs(oct);
+setup_irq_err:
+ octep_free_oqs(oct);
+setup_oq_err:
+ octep_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_stop(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, false,
+ false);
+ octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, false,
+ false);
+
+ /* Stop Tx from stack */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_napi_disable(oct);
+ octep_napi_delete(oct);
+
+ octep_clean_irqs(oct);
+ octep_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_free_oqs(oct);
+ octep_free_iqs(oct);
+
+ oct->poll_non_ioq_intr = true;
+ queue_delayed_work(octep_wq, &oct->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static inline int octep_iq_full_check(struct octep_iq *iq)
+{
+ if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ return 0;
+
+ /* Stop the queue if unable to send */
+ netif_stop_subqueue(iq->netdev, iq->q_no);
+
+ /* check again and restart the queue, in case NAPI has just freed
+ * enough Tx ring entries.
+ */
+ if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD)) {
+ netif_start_subqueue(iq->netdev, iq->q_no);
+ iq->stats.restart_cnt++;
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_tx_sglist_desc *sglist;
+ struct octep_tx_buffer *tx_buffer;
+ struct octep_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_instr_hdr *ih;
+ struct octep_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ u16 q_no, wi;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+ if (octep_iq_full_check(iq)) {
+ iq->stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->pkind;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+ memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ skb_tx_timestamp(skb);
+ atomic_inc(&iq->instr_pending);
+ wi++;
+ if (wi == iq->max_count)
+ wi = 0;
+ iq->host_write_index = wi;
+ /* Flush the hw descriptor before writing to doorbell */
+ wmb();
+
+ /* Ring Doorbell to notify the NIC there is a new packet */
+ writel(1, iq->doorbell_reg);
+ iq->stats.instr_posted++;
+ return NETDEV_TX_OK;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[3], DMA_TO_DEVICE);
+ sglist[0].len[3] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE);
+ sglist[si >> 2].len[3 - (si & 3)] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * octep_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ struct octep_device *oct = netdev_priv(netdev);
+ int q;
+
+ if (netif_running(netdev))
+ octep_ctrl_net_get_if_stats(oct,
+ OCTEP_CTRL_NET_INVALID_VFID,
+ &oct->iface_rx_stats,
+ &oct->iface_tx_stats);
+
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->collisions = oct->iface_tx_stats.xscol;
+ stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
+}
+
+/**
+ * octep_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_stop(netdev);
+ octep_open(netdev);
+ }
+ rtnl_unlock();
+}
+
+/**
+ * octep_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ queue_work(octep_wq, &oct->tx_timeout_task);
+}
+
+static int octep_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_ctrl_net_set_mac_addr(oct, OCTEP_CTRL_NET_INVALID_VFID,
+ addr->sa_data, true);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ int err = 0;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_ctrl_net_set_mtu(oct, OCTEP_CTRL_NET_INVALID_VFID, new_mtu,
+ true);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+
+ return err;
+}
+
+static const struct net_device_ops octep_netdev_ops = {
+ .ndo_open = octep_open,
+ .ndo_stop = octep_stop,
+ .ndo_start_xmit = octep_start_xmit,
+ .ndo_get_stats64 = octep_get_stats64,
+ .ndo_tx_timeout = octep_tx_timeout,
+ .ndo_set_mac_address = octep_set_mac,
+ .ndo_change_mtu = octep_change_mtu,
+};
+
+/**
+ * octep_intr_poll_task - work queue task to process non-ioq interrupts.
+ *
+ * @work: pointer to mbox work_struct
+ *
+ * Process non-ioq interrupts to handle control mailbox, pfvf mailbox.
+ **/
+static void octep_intr_poll_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ intr_poll_task.work);
+
+ if (!oct->poll_non_ioq_intr) {
+ dev_info(&oct->pdev->dev, "Interrupt poll task stopped.\n");
+ return;
+ }
+
+ oct->hw_ops.poll_non_ioq_interrupts(oct);
+ queue_delayed_work(octep_wq, &oct->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+}
+
+/**
+ * octep_hb_timeout_task - work queue task to check firmware heartbeat.
+ *
+ * @work: pointer to hb work_struct
+ *
+ * Check for heartbeat miss count. Uninitialize oct device if miss count
+ * exceeds configured max heartbeat miss count.
+ *
+ **/
+static void octep_hb_timeout_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ hb_task.work);
+
+ int miss_cnt;
+
+ miss_cnt = atomic_inc_return(&oct->hb_miss_cnt);
+ if (miss_cnt < oct->conf->max_hb_miss_cnt) {
+ queue_delayed_work(octep_wq, &oct->hb_task,
+ msecs_to_jiffies(oct->conf->hb_interval * 1000));
+ return;
+ }
+
+ dev_err(&oct->pdev->dev, "Missed %u heartbeats. Uninitializing\n",
+ miss_cnt);
+ rtnl_lock();
+ if (netif_running(oct->netdev))
+ octep_stop(oct->netdev);
+ rtnl_unlock();
+}
+
+/**
+ * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages.
+ *
+ * @work: pointer to ctrl mbox work_struct
+ *
+ * Poll ctrl mbox message queue and handle control messages from firmware.
+ **/
+static void octep_ctrl_mbox_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ ctrl_mbox_task);
+
+ octep_ctrl_net_recv_fw_messages(oct);
+}
+
+static const char *octep_devid_to_str(struct octep_device *oct)
+{
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_PF:
+ return "CN93XX";
+ case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
+ return "CNF95N";
+ default:
+ return "Unsupported";
+ }
+}
+
+/**
+ * octep_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_device_setup(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int i, ret;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR regions */
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ oct->mmio[i].hw_addr =
+ ioremap(pci_resource_start(oct->pdev, i * 2),
+ pci_resource_len(oct->pdev, i * 2));
+ if (!oct->mmio[i].hw_addr)
+ goto unmap_prev;
+
+ oct->mmio[i].mapped = 1;
+ }
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_PF:
+ case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n",
+ octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct),
+ OCTEP_MINOR_REV(oct));
+ octep_device_setup_cn93_pf(oct);
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "%s: unsupported device\n", __func__);
+ goto unsupported_dev;
+ }
+
+ oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
+
+ ret = octep_ctrl_net_init(oct);
+ if (ret)
+ return ret;
+
+ atomic_set(&oct->hb_miss_cnt, 0);
+ INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
+ queue_delayed_work(octep_wq, &oct->hb_task,
+ msecs_to_jiffies(oct->conf->hb_interval * 1000));
+ return 0;
+
+unsupported_dev:
+ i = OCTEP_MMIO_REGIONS;
+unmap_prev:
+ while (i--)
+ iounmap(oct->mmio[i].hw_addr);
+
+ kfree(oct->conf);
+ return -1;
+}
+
+/**
+ * octep_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_device_cleanup(struct octep_device *oct)
+{
+ int i;
+
+ oct->poll_non_ioq_intr = false;
+ cancel_delayed_work_sync(&oct->intr_poll_task);
+ cancel_work_sync(&oct->ctrl_mbox_task);
+
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ for (i = 0; i < OCTEP_MAX_VF; i++) {
+ vfree(oct->mbox[i]);
+ oct->mbox[i] = NULL;
+ }
+
+ octep_ctrl_net_uninit(oct);
+ cancel_delayed_work_sync(&oct->hb_task);
+
+ oct->hw_ops.soft_reset(oct);
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ if (oct->mmio[i].mapped)
+ iounmap(oct->mmio[i].hw_addr);
+ }
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+static bool get_fw_ready_status(struct pci_dev *pdev)
+{
+ u32 pos = 0;
+ u16 vsec_id;
+ u8 status;
+
+ while ((pos = pci_find_next_ext_capability(pdev, pos,
+ PCI_EXT_CAP_ID_VNDR))) {
+ pci_read_config_word(pdev, pos + 4, &vsec_id);
+#define FW_STATUS_VSEC_ID 0xA3
+ if (vsec_id != FW_STATUS_VSEC_ID)
+ continue;
+
+ pci_read_config_byte(pdev, (pos + 8), &status);
+ dev_info(&pdev->dev, "Firmware ready status = %u\n", status);
+#define FW_STATUS_READY 1ULL
+ return status == FW_STATUS_READY;
+ }
+ return false;
+}
+
+/**
+ * octep_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_device *octep_dev = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto err_dma_mask;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto err_pci_regions;
+ }
+
+ pci_set_master(pdev);
+
+ if (!get_fw_ready_status(pdev)) {
+ dev_notice(&pdev->dev, "Firmware not ready; defer probe.\n");
+ err = -EPROBE_DEFER;
+ goto err_alloc_netdev;
+ }
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_device),
+ OCTEP_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_alloc_netdev;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_dev = netdev_priv(netdev);
+ octep_dev->netdev = netdev;
+ octep_dev->pdev = pdev;
+ octep_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_dev);
+
+ err = octep_device_setup(octep_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto err_octep_config;
+ }
+ INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
+ INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
+ INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task);
+ octep_dev->poll_non_ioq_intr = true;
+ queue_delayed_work(octep_wq, &octep_dev->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+
+ netdev->netdev_ops = &octep_netdev_ops;
+ octep_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ netdev->hw_features = NETIF_F_SG;
+ netdev->features |= netdev->hw_features;
+ netdev->min_mtu = OCTEP_MIN_MTU;
+ netdev->max_mtu = OCTEP_MAX_MTU;
+ netdev->mtu = OCTEP_DEFAULT_MTU;
+
+ err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
+ octep_dev->mac_addr);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get mac address\n");
+ goto register_dev_err;
+ }
+ eth_hw_addr_set(netdev, octep_dev->mac_addr);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto register_dev_err;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+register_dev_err:
+ octep_device_cleanup(octep_dev);
+err_octep_config:
+ free_netdev(netdev);
+err_alloc_netdev:
+ pci_release_mem_regions(pdev);
+err_pci_regions:
+err_dma_mask:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * octep_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_remove(struct pci_dev *pdev)
+{
+ struct octep_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
+ cancel_work_sync(&oct->tx_timeout_task);
+ octep_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_driver = {
+ .name = OCTEP_DRV_NAME,
+ .id_table = octep_pci_id_tbl,
+ .probe = octep_probe,
+ .remove = octep_remove,
+};
+
+/**
+ * octep_init_module() - Module initialiation.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING);
+
+ /* work queue for all deferred tasks */
+ octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME);
+ if (!octep_wq) {
+ pr_err("%s: Failed to create common workqueue\n",
+ OCTEP_DRV_NAME);
+ return -ENOMEM;
+ }
+
+ ret = pci_register_driver(&octep_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_DRV_NAME, ret);
+ destroy_workqueue(octep_wq);
+ return ret;
+ }
+
+ pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME);
+
+ return ret;
+}
+
+/**
+ * octep_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME);
+
+ pci_unregister_driver(&octep_driver);
+ destroy_workqueue(octep_wq);
+
+ pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME);
+}
+
+module_init(octep_init_module);
+module_exit(octep_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
new file mode 100644
index 000000000..e0907a719
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_MAIN_H_
+#define _OCTEP_MAIN_H_
+
+#include "octep_tx.h"
+#include "octep_rx.h"
+#include "octep_ctrl_mbox.h"
+
+#define OCTEP_DRV_NAME "octeon_ep"
+#define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver"
+
+#define OCTEP_PCIID_CN93_PF 0xB200177d
+#define OCTEP_PCIID_CN93_VF 0xB203177d
+
+#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203
+
+#define OCTEP_PCI_DEVICE_ID_CNF95N_PF 0xB400 //95N PF
+
+#define OCTEP_MAX_QUEUES 63
+#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_VF 64
+
+#define OCTEP_MAX_MSIX_VECTORS OCTEP_MAX_OQ
+
+/* Flags to disable and enable Interrupts */
+#define OCTEP_INPUT_INTR (1)
+#define OCTEP_OUTPUT_INTR (2)
+#define OCTEP_MBOX_INTR (4)
+#define OCTEP_ALL_INTR 0xff
+
+#define OCTEP_IQ_INTR_RESEND_BIT 59
+#define OCTEP_OQ_INTR_RESEND_BIT 59
+
+#define OCTEP_MMIO_REGIONS 3
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_pci_win_regs {
+ u8 __iomem *pci_win_wr_addr;
+ u8 __iomem *pci_win_rd_addr;
+ u8 __iomem *pci_win_wr_data;
+ u8 __iomem *pci_win_rd_data;
+};
+
+struct octep_hw_ops {
+ void (*setup_iq_regs)(struct octep_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ int (*soft_reset)(struct octep_device *oct);
+ void (*reinit_regs)(struct octep_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_iq *iq);
+
+ void (*enable_interrupts)(struct octep_device *oct);
+ void (*disable_interrupts)(struct octep_device *oct);
+ bool (*poll_non_ioq_interrupts)(struct octep_device *oct);
+
+ void (*enable_io_queues)(struct octep_device *oct);
+ void (*disable_io_queues)(struct octep_device *oct);
+ void (*enable_iq)(struct octep_device *oct, int q);
+ void (*disable_iq)(struct octep_device *oct, int q);
+ void (*enable_oq)(struct octep_device *oct, int q);
+ void (*disable_oq)(struct octep_device *oct, int q);
+ void (*reset_io_queues)(struct octep_device *oct);
+ void (*dump_registers)(struct octep_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_mbox_data {
+ u32 cmd;
+ u32 total_len;
+ u32 recv_len;
+ u32 rsvd;
+ u64 *data;
+};
+
+/* Octeon device mailbox */
+struct octep_mbox {
+ /* A spinlock to protect access to this q_mbox. */
+ spinlock_t lock;
+
+ u32 q_no;
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ struct octep_mbox_data mbox_data;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_ioq_vector {
+ char name[OCTEP_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_device *octep_dev;
+ struct octep_iq *iq;
+ struct octep_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_link_mode_bit_indices {
+ OCTEP_LINK_MODE_10GBASE_T = 0,
+ OCTEP_LINK_MODE_10GBASE_R,
+ OCTEP_LINK_MODE_10GBASE_CR,
+ OCTEP_LINK_MODE_10GBASE_KR,
+ OCTEP_LINK_MODE_10GBASE_LR,
+ OCTEP_LINK_MODE_10GBASE_SR,
+ OCTEP_LINK_MODE_25GBASE_CR,
+ OCTEP_LINK_MODE_25GBASE_KR,
+ OCTEP_LINK_MODE_25GBASE_SR,
+ OCTEP_LINK_MODE_40GBASE_CR4,
+ OCTEP_LINK_MODE_40GBASE_KR4,
+ OCTEP_LINK_MODE_40GBASE_LR4,
+ OCTEP_LINK_MODE_40GBASE_SR4,
+ OCTEP_LINK_MODE_50GBASE_CR2,
+ OCTEP_LINK_MODE_50GBASE_KR2,
+ OCTEP_LINK_MODE_50GBASE_SR2,
+ OCTEP_LINK_MODE_50GBASE_CR,
+ OCTEP_LINK_MODE_50GBASE_KR,
+ OCTEP_LINK_MODE_50GBASE_LR,
+ OCTEP_LINK_MODE_50GBASE_SR,
+ OCTEP_LINK_MODE_100GBASE_CR4,
+ OCTEP_LINK_MODE_100GBASE_KR4,
+ OCTEP_LINK_MODE_100GBASE_LR4,
+ OCTEP_LINK_MODE_100GBASE_SR4,
+ OCTEP_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotation state. */
+#define OCTEP_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_device {
+ struct octep_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_mmio mmio[OCTEP_MMIO_REGIONS];
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* Pointers to Octeon Tx queues */
+ struct octep_iq *iq[OCTEP_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_oq *oq[OCTEP_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* PCI Window registers to access some hardware CSRs */
+ struct octep_pci_win_regs pci_win_regs;
+ /* Hardware operations */
+ struct octep_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_ioq_vector *ioq_vector[OCTEP_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_mbox *mbox[OCTEP_MAX_VF];
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* control mbox over pf */
+ struct octep_ctrl_mbox ctrl_mbox;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Work entry to handle ctrl mbox interrupt */
+ struct work_struct ctrl_mbox_task;
+ /* Wait queue for host to firmware requests */
+ wait_queue_head_t ctrl_req_wait_q;
+ /* List of objects waiting for h2f response */
+ struct list_head ctrl_req_wait_list;
+
+ /* Enable non-ioq interrupt polling */
+ bool poll_non_ioq_intr;
+ /* Work entry to poll non-ioq interrupts */
+ struct delayed_work intr_poll_task;
+
+ /* Firmware heartbeat timer */
+ struct timer_list hb_timer;
+ /* Firmware heartbeat miss count tracked by timer */
+ atomic_t hb_miss_cnt;
+ /* Task to reset device on heartbeat miss */
+ struct delayed_work hb_task;
+};
+
+static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_MINOR_REV(struct octep_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_write_csr(octep_dev, reg_off, value) \
+ writel(value, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_write_csr64(octep_dev, reg_off, val64) \
+ writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr(octep_dev, reg_off) \
+ readl((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr64(octep_dev, reg_off) \
+ readq((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+/* Read windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to read.
+ *
+ * This routine is called to read from the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return - 64 bit value read from the register.
+ */
+static inline u64
+OCTEP_PCI_WIN_READ(struct octep_device *oct, u64 addr)
+{
+ u64 val64;
+
+ addr |= 1ull << 53; /* read 8 bytes */
+ writeq(addr, oct->pci_win_regs.pci_win_rd_addr);
+ val64 = readq(oct->pci_win_regs.pci_win_rd_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val64);
+
+ return val64;
+}
+
+/* Write windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to write
+ * @param val - Value to write
+ *
+ * This routine is called to write to the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return Nothing.
+ */
+static inline void
+OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val)
+{
+ writeq(addr, oct->pci_win_regs.pci_win_wr_addr);
+ writeq(val, oct->pci_win_regs.pci_win_wr_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val);
+}
+
+extern struct workqueue_struct *octep_wq;
+
+int octep_device_setup(struct octep_device *oct);
+int octep_setup_iqs(struct octep_device *oct);
+void octep_free_iqs(struct octep_device *oct);
+void octep_clean_iqs(struct octep_device *oct);
+int octep_setup_oqs(struct octep_device *oct);
+void octep_free_oqs(struct octep_device *oct);
+void octep_oq_dbell_init(struct octep_device *oct);
+void octep_device_setup_cn93_pf(struct octep_device *oct);
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget);
+int octep_oq_process_rx(struct octep_oq *oq, int budget);
+void octep_set_ethtool_ops(struct net_device *netdev);
+
+#endif /* _OCTEP_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
new file mode 100644
index 000000000..b25c3093d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_REGS_CN9K_PF_H_
+#define _OCTEP_REGS_CN9K_PF_H_
+
+/* ############################ RST ######################### */
+#define CN93_RST_BOOT 0x000087E006001600ULL
+#define CN93_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL
+#define CN93_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL
+
+#define CN93_CONFIG_XPANSION_BAR 0x38
+#define CN93_CONFIG_PCIE_CAP 0x70
+#define CN93_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_PCIE_SRIOV_FDL 0x188 /* 0x98 */
+#define CN93_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CN93_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CN93_CONFIG_PCIE_FLTMSK 0x720
+
+/* ################# Offsets of RING, EPF, MAC ######################### */
+#define CN93_RING_OFFSET (0x1ULL << 17)
+#define CN93_EPF_OFFSET (0x1ULL << 25)
+#define CN93_MAC_OFFSET (0x1ULL << 4)
+#define CN93_BIT_ARRAY_OFFSET (0x1ULL << 4)
+#define CN93_EPVF_RING_OFFSET (0x1ULL << 4)
+
+/* ################# Scratch Registers ######################### */
+#define CN93_SDP_EPF_SCRATCH 0x205E0
+
+/* ################# Window Registers ######################### */
+#define CN93_SDP_WIN_WR_ADDR64 0x20000
+#define CN93_SDP_WIN_RD_ADDR64 0x20010
+#define CN93_SDP_WIN_WR_DATA64 0x20020
+#define CN93_SDP_WIN_WR_MASK_REG 0x20030
+#define CN93_SDP_WIN_RD_DATA64 0x20040
+
+#define CN93_SDP_MAC_NUMBER 0x2C100
+
+/* ################# Global Previliged registers ######################### */
+#define CN93_SDP_EPF_RINFO 0x205F0
+
+#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF)
+#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
+#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0xFF)
+
+/* SDP Function select */
+#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8
+#define CN93_SDP_FUNC_SEL_FUNC_BIT_POS 0
+
+/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */
+#define CN93_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_SDP_R_IN_CNTS_START 0x10050
+#define CN93_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_SDP_R_IN_CONTROL(ring) \
+ (CN93_SDP_R_IN_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_ENABLE(ring) \
+ (CN93_SDP_R_IN_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS(ring) \
+ (CN93_SDP_R_IN_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/* Rings per Virtual Function */
+#define CN93_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN93_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CN93_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CN93_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CN93_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CN93_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CN93_R_IN_CTL_NSR (0x1ULL << 3)
+#define CN93_R_IN_CTL_ESR (0x1ULL << 1)
+#define CN93_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CN93_R_IN_CTL_MASK (CN93_R_IN_CTL_RDSIZE | CN93_R_IN_CTL_IS_64B)
+
+/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */
+#define CN93_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_SDP_R_OUT_CONTROL(ring) \
+ (CN93_SDP_R_OUT_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_ENABLE(ring) \
+ (CN93_SDP_R_OUT_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS(ring) \
+ (CN93_SDP_R_OUT_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ############### Interrupt Moderation Registers ############### */
+#define CN93_SDP_R_IN_INT_MDRT_CTL0_START 0x10280
+#define CN93_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0
+#define CN93_SDP_R_IN_INT_MDRT_DBG_START 0x102C0
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0
+#define CN93_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Mail Box Registers ########################## */
+/* INT register for VF. when a MBOX write from PF happed to a VF,
+ * corresponding bit will be set in this register as well as in
+ * PF_VF_INT register.
+ *
+ * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT
+ */
+/* Basically first 3 are from PF to VF. The last one is data from VF to PF */
+#define CN93_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220
+#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Interrupt Registers ########################## */
+#define CN93_SDP_R_ERR_TYPE_START 0x10400
+
+#define CN93_SDP_R_ERR_TYPE(ring) \
+ (CN93_SDP_R_ERR_TYPE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_ISM_START 0x10500
+#define CN93_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CN93_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CN93_SDP_R_MBOX_ISM(ring) \
+ (CN93_SDP_R_MBOX_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS_ISM(ring) \
+ (CN93_SDP_R_OUT_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS_ISM(ring) \
+ (CN93_SDP_R_IN_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_EPF_MBOX_RINT_START 0x20100
+#define CN93_SDP_EPF_MBOX_RINT_W1S_START 0x20120
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160
+
+#define CN93_SDP_EPF_VFIRE_RINT_START 0x20180
+#define CN93_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0
+
+#define CN93_SDP_EPF_IRERR_RINT 0x20200
+#define CN93_SDP_EPF_IRERR_RINT_W1S 0x20210
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230
+
+#define CN93_SDP_EPF_VFORE_RINT_START 0x20240
+#define CN93_SDP_EPF_VFORE_RINT_W1S_START 0x20260
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0
+
+#define CN93_SDP_EPF_ORERR_RINT 0x20320
+#define CN93_SDP_EPF_ORERR_RINT_W1S 0x20330
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350
+
+#define CN93_SDP_EPF_OEI_RINT 0x20360
+#define CN93_SDP_EPF_OEI_RINT_W1S 0x20370
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1C 0x20380
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1S 0x20390
+
+#define CN93_SDP_EPF_DMA_RINT 0x20400
+#define CN93_SDP_EPF_DMA_RINT_W1S 0x20410
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1C 0x20420
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1S 0x20430
+
+#define CN93_SDP_EPF_DMA_INT_LEVEL_START 0x20440
+#define CN93_SDP_EPF_DMA_CNT_START 0x20460
+#define CN93_SDP_EPF_DMA_TIM_START 0x20480
+
+#define CN93_SDP_EPF_MISC_RINT 0x204A0
+#define CN93_SDP_EPF_MISC_RINT_W1S 0x204B0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1C 0x204C0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1S 0x204D0
+
+#define CN93_SDP_EPF_DMA_VF_RINT_START 0x204E0
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S_START 0x20500
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20520
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20540
+
+#define CN93_SDP_EPF_PP_VF_RINT_START 0x20560
+#define CN93_SDP_EPF_PP_VF_RINT_W1S_START 0x20580
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x205A0
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x205C0
+
+#define CN93_SDP_EPF_MBOX_RINT(index) \
+ (CN93_SDP_EPF_MBOX_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFIRE_RINT(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFORE_RINT(index) \
+ (CN93_SDP_EPF_VFORE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_DMA_VF_RINT(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_PP_VF_RINT(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+#define CN93_INTR_R_SEND_ISM BIT_ULL(63)
+#define CN93_INTR_R_OUT_INT BIT_ULL(62)
+#define CN93_INTR_R_IN_INT BIT_ULL(61)
+#define CN93_INTR_R_MBOX_INT BIT_ULL(60)
+#define CN93_INTR_R_RESEND BIT_ULL(59)
+#define CN93_INTR_R_CLR_TIM BIT_ULL(58)
+
+/* ####################### Ring Mapping Registers ################################## */
+#define CN93_SDP_EPVF_RING_START 0x26000
+#define CN93_SDP_IN_RING_TB_MAP_START 0x28000
+#define CN93_SDP_IN_RATE_LIMIT_START 0x2A000
+#define CN93_SDP_MAC_PF_RING_CTL_START 0x2C000
+
+#define CN93_SDP_EPVF_RING(ring) \
+ (CN93_SDP_EPVF_RING_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RING_TB_MAP(ring) \
+ (CN93_SDP_N_RING_TB_MAP_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RATE_LIMIT(ring) \
+ (CN93_SDP_IN_RATE_LIMIT_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_MAC_PF_RING_CTL(mac) \
+ (CN93_SDP_MAC_PF_RING_CTL_START + ((mac) * CN93_MAC_OFFSET))
+
+#define CN93_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0xF)
+#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF)
+#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+
+/* Number of non-queue interrupts in CN93xx */
+#define CN93_NUM_NON_IOQ_INTR 16
+
+/* bit 0 for control mbox interrupt */
+#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX BIT_ULL(0)
+/* bit 1 for firmware heartbeat interrupt */
+#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+
+#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
new file mode 100644
index 000000000..3c43f8078
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+static void octep_oq_reset_indices(struct octep_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -1, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_oq_fill_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -1;
+}
+
+/**
+ * octep_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_setup_oq(struct octep_device *oct, int q_no)
+{
+ struct octep_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->caps_enabled)
+ oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = vcalloc(oq->max_count, OCTEP_OQ_RECVBUF_SIZE);
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -1;
+}
+
+/**
+ * octep_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_oq_free_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_oq_reset_indices(oq);
+}
+
+/**
+ * octep_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_free_oq(struct octep_oq *oq)
+{
+ struct octep_device *oct = oq->octep_dev;
+ int q_no = oq->q_no;
+
+ octep_oq_free_ring_buffers(oq);
+
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_oqs(struct octep_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_free_oq(oct->oq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_oq_dbell_init(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_oqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_oq_process_rx(struct octep_device *oct,
+ struct octep_oq *oq, u16 pkts_to_process)
+{
+ struct octep_oq_resp_hw_ext *resp_hw_ext = NULL;
+ struct octep_rx_buffer *buff_info;
+ struct octep_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ struct sk_buff *skb;
+ u16 data_offset;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE +
+ OCTEP_OQ_RESP_HW_EXT_SIZE;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (resp_hw_ext &&
+ resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_oq_process_rx(struct octep_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
new file mode 100644
index 000000000..782a24f27
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_RX_H_
+#define _OCTEP_RX_H_
+
+/* struct octep_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
+
+#define OCTEP_CSUM_L4_VERIFIED 0x1
+#define OCTEP_CSUM_IP_VERIFIED 0x2
+#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED)
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 reserved:62;
+
+ /* checksum verified. */
+ u64 csum_verified:2;
+};
+
+#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_OQ_RECVBUF_SIZE (sizeof(struct octep_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_OQ_STATS_SIZE (sizeof(struct octep_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_oq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_OQ_SIZE (sizeof(struct octep_oq))
+#endif /* _OCTEP_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
new file mode 100644
index 000000000..d0adb82d6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_iq_reset_indices(struct octep_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+ atomic_set(&iq->instr_pending, 0);
+}
+
+/**
+ * octep_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_device *oct = iq->octep_dev;
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ atomic_sub(compl_pkts, &iq->instr_pending);
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
+
+ if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
+ ((iq->max_count - atomic_read(&iq->instr_pending)) >
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ netif_wake_subqueue(iq->netdev, iq->q_no);
+ return !budget;
+}
+
+/**
+ * octep_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_iq_free_pending(struct octep_iq *iq)
+{
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[3],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ atomic_set(&iq->instr_pending, 0);
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_clean_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_iq_free_pending(oct->iq[i]);
+ octep_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_setup_iq(struct octep_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_free_iq(struct octep_iq *iq)
+{
+ struct octep_device *oct = iq->octep_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_iqs(struct octep_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
new file mode 100644
index 000000000..21e75ff9f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_TX_H_
+#define _OCTEP_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list
+ *
+ * 63 48|47 32|31 16|15 0
+ * -----------------------------------------
+ * | Len 0 | Len 1 | Len 2 | Len 3 |
+ * -----------------------------------------
+ * | Ptr 0 |
+ * -----------------------------------------
+ * | Ptr 1 |
+ * -----------------------------------------
+ * | Ptr 2 |
+ * -----------------------------------------
+ * | Ptr 3 |
+ * -----------------------------------------
+ */
+struct octep_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_SGLIST_SIZE_PER_PKT \
+ (OCTEP_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_tx_sglist_desc))
+
+struct octep_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_tx_buffer))
+
+/* Hardware interface Tx statistics */
+struct octep_iface_tx_stats {
+ /* Packets dropped due to excessive collisions */
+ u64 xscol;
+
+ /* Packets dropped due to excessive deferral */
+ u64 xsdef;
+
+ /* Packets sent that experienced multiple collisions before successful
+ * transmission
+ */
+ u64 mcol;
+
+ /* Packets sent that experienced a single collision before successful
+ * transmission
+ */
+ u64 scol;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Packets sent with an octet count < 64 */
+ u64 hist_lt64;
+
+ /* Packets sent with an octet count == 64 */
+ u64 hist_eq64;
+
+ /* Packets sent with an octet count of 65–127 */
+ u64 hist_65to127;
+
+ /* Packets sent with an octet count of 128–255 */
+ u64 hist_128to255;
+
+ /* Packets sent with an octet count of 256–511 */
+ u64 hist_256to511;
+
+ /* Packets sent with an octet count of 512–1023 */
+ u64 hist_512to1023;
+
+ /* Packets sent with an octet count of 1024-1518 */
+ u64 hist_1024to1518;
+
+ /* Packets sent with an octet count of > 1518 */
+ u64 hist_gt1518;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets sent that experienced a transmit underflow and were
+ * truncated
+ */
+ u64 undflw;
+
+ /* Control/PAUSE packets sent */
+ u64 ctl;
+};
+
+/* Input Queue statistics. Each input queue has four stats fields. */
+struct octep_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_iq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_iq_stats stats;
+
+ /* This field keeps track of the instructions pending in this queue. */
+ atomic_t instr_pending;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+/* Hardware Tx completion response header */
+struct octep_instr_resp_hdr {
+ /* Request ID */
+ u64 rid:16;
+
+ /* PCIe port to use for response */
+ u64 pcie_port:3;
+
+ /* Scatter indicator 1=scatter */
+ u64 scatter:1;
+
+ /* Size of Expected result OR no. of entries in scatter list */
+ u64 rlenssz:14;
+
+ /* Desired destination port for result */
+ u64 dport:6;
+
+ /* Opcode Specific parameters */
+ u64 param:8;
+
+ /* Opcode for the return packet */
+ u64 opcode:16;
+};
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_instr_hdr ih;
+ u64 ih64;
+ };
+
+ /* Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ /* Input Instruction Response Header. */
+ struct octep_instr_resp_hdr irh;
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
+#endif /* _OCTEP_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
new file mode 100644
index 000000000..a32d85d6f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell RVU Network drivers configuration
+#
+
+config OCTEONTX2_MBOX
+ tristate
+
+config OCTEONTX2_AF
+ tristate "Marvell OcteonTX2 RVU Admin Function driver"
+ select OCTEONTX2_MBOX
+ select NET_DEVLINK
+ depends on (64BIT && COMPILE_TEST) || ARM64
+ depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver supports Marvell's OcteonTX2 Resource Virtualization
+ Unit's admin function manager which manages all RVU HW resources
+ and provides a medium to other PF/VFs to configure HW. Should be
+ enabled for other RVU device drivers to work.
+
+config NDC_DIS_DYNAMIC_CACHING
+ bool "Disable caching of dynamic entries in NDC"
+ depends on OCTEONTX2_AF
+ default n
+ help
+ This config option disables caching of dynamic entries such as NIX SQEs
+ , NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
+ NPA Aura/Pool contexts.
+
+config OCTEONTX2_PF
+ tristate "Marvell OcteonTX2 NIC Physical Function driver"
+ select OCTEONTX2_MBOX
+ select NET_DEVLINK
+ select PAGE_POOL
+ depends on (64BIT && COMPILE_TEST) || ARM64
+ select DIMLIB
+ depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on MACSEC || !MACSEC
+ help
+ This driver supports Marvell's OcteonTX2 NIC physical function.
+
+config OCTEONTX2_VF
+ tristate "Marvell OcteonTX2 NIC Virtual Function driver"
+ depends on OCTEONTX2_PF
+ help
+ This driver supports Marvell's OcteonTX2 NIC virtual function.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
new file mode 100644
index 000000000..0064a69e0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell OcteonTX2 device drivers.
+#
+
+obj-$(CONFIG_OCTEONTX2_MBOX) += af/
+obj-$(CONFIG_OCTEONTX2_AF) += af/
+obj-$(CONFIG_OCTEONTX2_PF) += nic/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
new file mode 100644
index 000000000..3cf4c8285
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's RVU Admin Function driver
+#
+
+ccflags-y += -I$(src)
+obj-$(CONFIG_OCTEONTX2_MBOX) += rvu_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
+
+rvu_mbox-y := mbox.o rvu_trace.o
+rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
+ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
+ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
new file mode 100644
index 000000000..e06f77ad6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -0,0 +1,1904 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "cgx.h"
+#include "rvu.h"
+#include "lmac_common.h"
+
+#define DRV_NAME "Marvell-CGX/RPM"
+#define DRV_STRING "Marvell CGX/RPM Driver"
+
+static LIST_HEAD(cgx_list);
+
+/* Convert firmware speed encoding to user format(Mbps) */
+static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
+ [CGX_LINK_NONE] = 0,
+ [CGX_LINK_10M] = 10,
+ [CGX_LINK_100M] = 100,
+ [CGX_LINK_1G] = 1000,
+ [CGX_LINK_2HG] = 2500,
+ [CGX_LINK_5G] = 5000,
+ [CGX_LINK_10G] = 10000,
+ [CGX_LINK_20G] = 20000,
+ [CGX_LINK_25G] = 25000,
+ [CGX_LINK_40G] = 40000,
+ [CGX_LINK_50G] = 50000,
+ [CGX_LINK_80G] = 80000,
+ [CGX_LINK_100G] = 100000,
+};
+
+/* Convert firmware lmac type encoding to string */
+static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
+ [LMAC_MODE_SGMII] = "SGMII",
+ [LMAC_MODE_XAUI] = "XAUI",
+ [LMAC_MODE_RXAUI] = "RXAUI",
+ [LMAC_MODE_10G_R] = "10G_R",
+ [LMAC_MODE_40G_R] = "40G_R",
+ [LMAC_MODE_QSGMII] = "QSGMII",
+ [LMAC_MODE_25G_R] = "25G_R",
+ [LMAC_MODE_50G_R] = "50G_R",
+ [LMAC_MODE_100G_R] = "100G_R",
+ [LMAC_MODE_USXGMII] = "USXGMII",
+ [LMAC_MODE_USGMII] = "USGMII",
+};
+
+/* CGX PHY management internal APIs */
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
+
+/* Supported devices */
+static const struct pci_device_id cgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
+ { 0, } /* end of table */
+};
+
+MODULE_DEVICE_TABLE(pci, cgx_id_table);
+
+static bool is_dev_rpm(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) ||
+ (cgx->pdev->device == PCI_DEVID_CN10KB_RPM);
+}
+
+bool is_lmac_valid(struct cgx *cgx, int lmac_id)
+{
+ if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
+ return false;
+ return test_bit(lmac_id, &cgx->lmac_bmap);
+}
+
+/* Helper function to get sequential index
+ * given the enabled LMAC of a CGX
+ */
+static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+{
+ int tmp, id = 0;
+
+ for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
+ if (tmp == lmac_id)
+ break;
+ id++;
+ }
+
+ return id;
+}
+
+struct mac_ops *get_mac_ops(void *cgxd)
+{
+ if (!cgxd)
+ return cgxd;
+
+ return ((struct cgx *)cgxd)->mac_ops;
+}
+
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+{
+ writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
+}
+
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+{
+ return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
+}
+
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+{
+ if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
+ return NULL;
+
+ return cgx->lmac_idmap[lmac_id];
+}
+
+int cgx_get_cgxcnt_max(void)
+{
+ struct cgx *cgx_dev;
+ int idmax = -ENODEV;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
+ if (cgx_dev->cgx_id > idmax)
+ idmax = cgx_dev->cgx_id;
+
+ if (idmax < 0)
+ return 0;
+
+ return idmax + 1;
+}
+
+int cgx_get_lmac_cnt(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ return cgx->lmac_count;
+}
+
+void *cgx_get_pdata(int cgx_id)
+{
+ struct cgx *cgx_dev;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
+ if (cgx_dev->cgx_id == cgx_id)
+ return cgx_dev;
+ }
+ return NULL;
+}
+
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ /* Software must not access disabled LMAC registers */
+ if (!is_lmac_valid(cgx_dev, lmac_id))
+ return;
+ cgx_write(cgx_dev, lmac_id, offset, val);
+}
+
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ /* Software must not access disabled LMAC registers */
+ if (!is_lmac_valid(cgx_dev, lmac_id))
+ return 0;
+
+ return cgx_read(cgx_dev, lmac_id, offset);
+}
+
+int cgx_get_cgxid(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -EINVAL;
+
+ return cgx->cgx_id;
+}
+
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
+
+ return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
+}
+
+/* Ensure the required lock for event queue(where asynchronous events are
+ * posted) is acquired before calling this API. Else an asynchronous event(with
+ * latest link status) can reach the destination before this function returns
+ * and could make the link status appear wrong.
+ */
+int cgx_get_link_info(void *cgxd, int lmac_id,
+ struct cgx_link_user_info *linfo)
+{
+ struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
+
+ if (!lmac)
+ return -ENODEV;
+
+ *linfo = lmac->link_info;
+ return 0;
+}
+
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, id;
+ u64 cfg;
+
+ if (!lmac)
+ return -ENODEV;
+
+ /* access mac_ops to know csr_offset */
+ mac_ops = cgx_dev->mac_ops;
+
+ /* copy 6bytes from macaddr */
+ /* memcpy(&cfg, mac_addr, 6); */
+
+ cfg = ether_addr_to_u64(mac_addr);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
+ cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+ CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx = cgxd;
+
+ if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
+ return 0;
+
+ cgx = cgxd;
+ /* Get mac_ops to know csr offset */
+ mac_ops = cgx->mac_ops;
+
+ return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+}
+
+u64 cgx_read_dmac_entry(void *cgxd, int index)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx;
+
+ if (!cgxd)
+ return 0;
+
+ cgx = cgxd;
+ mac_ops = cgx->mac_ops;
+ return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, idx;
+ u64 cfg = 0;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Get available index where entry is to be installed */
+ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+ if (idx < 0)
+ return idx;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + idx;
+
+ cfg = ether_addr_to_u64(mac_addr);
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cfg |= ((u64)lmac_id << 49);
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
+
+ if (is_multicast_ether_addr(mac_addr)) {
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE_CAM;
+ lmac->mcast_filters_count++;
+ } else if (!lmac->mcast_filters_count) {
+ cfg |= CGX_DMAC_MCAST_MODE;
+ }
+
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return idx;
+}
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 index = 0, id;
+ u64 cfg;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Restore index 0 to its default init value as done during
+ * cgx_lmac_init
+ */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+/* Allows caller to change macaddress associated with index
+ * in dmac filter table including index 0 reserved for
+ * interface mac address
+ */
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
+ struct lmac *lmac;
+ u64 cfg;
+ int id;
+
+ lmac = lmac_pdata(lmac_id, cgx_dev);
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* ensure index is already set */
+ if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
+ return -EINVAL;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+ cfg &= ~CGX_RX_DMAC_ADR_MASK;
+ cfg |= ether_addr_to_u64(mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+ return 0;
+}
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 mac[ETH_ALEN];
+ u64 cfg;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* Skip deletion for reserved index i.e. index 0 */
+ if (index == 0)
+ return 0;
+
+ rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ /* Read MAC address to check whether it is ucast or mcast */
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+
+ u64_to_ether_addr(cfg, mac);
+ if (is_multicast_ether_addr(mac))
+ lmac->mcast_filters_count--;
+
+ if (!lmac->mcast_filters_count) {
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ }
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ return 0;
+}
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ if (lmac)
+ return lmac->mac_to_index_bmap.max;
+
+ return 0;
+}
+
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index;
+ u64 cfg;
+ int id;
+
+ mac_ops = cgx_dev->mac_ops;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
+ return cfg & CGX_RX_DMAC_ADR_MASK;
+}
+
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F));
+ return 0;
+}
+
+static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
+}
+
+static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = cgx->mac_ops->fifo_len;
+ num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC0 gets half of the FIFO, reset 1/4th */
+ if (lmac_id == 0)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
+/* Configure CGX LMAC in internal loopback mode */
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (lmac->lmac_type == LMAC_MODE_SGMII ||
+ lmac->lmac_type == LMAC_MODE_QSGMII) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
+ if (enable)
+ cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
+ else
+ cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
+ if (enable)
+ cfg |= CGXX_SPUX_CONTROL1_LBK;
+ else
+ cfg &= ~CGXX_SPUX_CONTROL1_LBK;
+ cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
+ }
+ return 0;
+}
+
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx);
+ struct mac_ops *mac_ops;
+ u16 max_dmac;
+ int index, i;
+ u64 cfg = 0;
+ int id;
+
+ if (!cgx || !lmac)
+ return;
+
+ max_dmac = lmac->mac_to_index_bmap.max;
+ id = get_sequence_id_of_lmac(cgx, lmac_id);
+
+ mac_ops = cgx->mac_ops;
+ if (enable) {
+ /* Enable promiscuous mode on LMAC */
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
+ }
+ } else {
+ /* Disable promiscuous mode */
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 +
+ index * 0x8),
+ cfg);
+ }
+ }
+ }
+}
+
+static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
+ return 0;
+}
+
+/* Enable or disable forwarding received pause frames to Tx block */
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u8 rx_pause, tx_pause;
+ bool is_pfc_enabled;
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
+ is_pfc_enabled = rx_pause ? false : true;
+
+ if (enable) {
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
+ } else {
+
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
+ }
+}
+
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+ *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
+ return 0;
+}
+
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+ *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
+ return 0;
+}
+
+u64 cgx_features_get(void *cgxd)
+{
+ return ((struct cgx *)cgxd)->hw_features;
+}
+
+static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
+{
+ if (!linfo->fec)
+ return 0;
+
+ switch (linfo->lmac_type_id) {
+ case LMAC_MODE_SGMII:
+ case LMAC_MODE_XAUI:
+ case LMAC_MODE_RXAUI:
+ case LMAC_MODE_QSGMII:
+ return 0;
+ case LMAC_MODE_10G_R:
+ case LMAC_MODE_25G_R:
+ case LMAC_MODE_100G_R:
+ case LMAC_MODE_USXGMII:
+ return 1;
+ case LMAC_MODE_40G_R:
+ return 4;
+ case LMAC_MODE_50G_R:
+ if (linfo->fec == OTX2_FEC_BASER)
+ return 2;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+{
+ int stats, fec_stats_count = 0;
+ int corr_reg, uncorr_reg;
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
+ return 0;
+
+ fec_stats_count =
+ cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
+ if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+ corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
+ uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
+ } else {
+ corr_reg = CGXX_SPUX_RSFEC_CORR;
+ uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
+ }
+ for (stats = 0; stats < fec_stats_count; stats++) {
+ rsp->fec_corr_blks +=
+ cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
+ rsp->fec_uncorr_blks +=
+ cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
+ }
+ return 0;
+}
+
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ if (enable)
+ cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ else
+ cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return 0;
+}
+
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ last = cfg;
+ if (enable)
+ cfg |= DATA_PKT_TX_EN;
+ else
+ cfg &= ~DATA_PKT_TX_EN;
+
+ if (cfg != last)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return !!(last & DATA_PKT_TX_EN);
+}
+
+static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return;
+
+ if (enable) {
+ /* Set pause time and interval */
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
+ DEFAULT_PAUSE_TIME);
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFULL;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME / 2));
+
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
+ DEFAULT_PAUSE_TIME);
+
+ cfg = cgx_read(cgx, lmac_id,
+ CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFULL;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME / 2));
+ }
+
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
+ /* Disable all PFC classes by default */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+}
+
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ if (!rx_pause)
+ clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+
+ if (!tx_pause)
+ clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+
+ /* check if other pfvfs are using flow control */
+ if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Receive Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Transmit Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Return as no traffic classes are requested */
+ if (tx_pause && !pfc_en)
+ return 0;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
+
+ if (rx_pause) {
+ cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ } else {
+ cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ }
+
+ if (tx_pause) {
+ cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+ } else {
+ cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ }
+
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+
+ /* Write source MAC address which will be filled into PFC packet */
+ cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
+
+ return 0;
+}
+
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
+ *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
+
+ return 0;
+}
+
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ /* Enable inbound PTP timestamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ /* Disable inbound PTP stamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+
+/* CGX Firmware interface low level support */
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+{
+ struct cgx *cgx = lmac->cgx;
+ struct device *dev;
+ int err = 0;
+ u64 cmd;
+
+ /* Ensure no other command is in progress */
+ err = mutex_lock_interruptible(&lmac->cmd_lock);
+ if (err)
+ return err;
+
+ /* Ensure command register is free */
+ cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG);
+ if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ /* Update ownership in command request */
+ req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
+
+ /* Mark this lmac as pending, before we start */
+ lmac->cmd_pend = true;
+
+ /* Start command in hardware */
+ cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
+
+ /* Ensure command is completed without errors */
+ if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
+ msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
+ dev = &cgx->pdev->dev;
+ dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
+ cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
+ err = LMAC_AF_ERR_CMD_TIMEOUT;
+ goto unlock;
+ }
+
+ /* we have a valid command response */
+ smp_rmb(); /* Ensure the latest updates are visible */
+ *resp = lmac->resp;
+
+unlock:
+ mutex_unlock(&lmac->cmd_lock);
+
+ return err;
+}
+
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
+{
+ struct lmac *lmac;
+ int err;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ err = cgx_fwi_cmd_send(req, resp, lmac);
+
+ /* Check for valid response */
+ if (!err) {
+ if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return err;
+}
+
+static int cgx_link_usertable_index_map(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return CGX_LINK_10M;
+ case SPEED_100:
+ return CGX_LINK_100M;
+ case SPEED_1000:
+ return CGX_LINK_1G;
+ case SPEED_2500:
+ return CGX_LINK_2HG;
+ case SPEED_5000:
+ return CGX_LINK_5G;
+ case SPEED_10000:
+ return CGX_LINK_10G;
+ case SPEED_20000:
+ return CGX_LINK_20G;
+ case SPEED_25000:
+ return CGX_LINK_25G;
+ case SPEED_40000:
+ return CGX_LINK_40G;
+ case SPEED_50000:
+ return CGX_LINK_50G;
+ case 80000:
+ return CGX_LINK_80G;
+ case SPEED_100000:
+ return CGX_LINK_100G;
+ case SPEED_UNKNOWN:
+ return CGX_LINK_NONE;
+ }
+ return CGX_LINK_NONE;
+}
+
+static void set_mod_args(struct cgx_set_link_mode_args *args,
+ u32 speed, u8 duplex, u8 autoneg, u64 mode)
+{
+ /* Fill default values incase of user did not pass
+ * valid parameters
+ */
+ if (args->duplex == DUPLEX_UNKNOWN)
+ args->duplex = duplex;
+ if (args->speed == SPEED_UNKNOWN)
+ args->speed = speed;
+ if (args->an == AUTONEG_UNKNOWN)
+ args->an = autoneg;
+ args->mode = mode;
+ args->ports = 0;
+}
+
+static void otx2_map_ethtool_link_modes(u64 bitmask,
+ struct cgx_set_link_mode_args *args)
+{
+ switch (bitmask) {
+ case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
+ set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10baseT_Full_BIT:
+ set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Half_BIT:
+ set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
+ set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
+ set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
+ set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
+ break;
+ default:
+ set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
+ break;
+ }
+}
+
+static inline void link_status_user_format(u64 lstat,
+ struct cgx_link_user_info *linfo,
+ struct cgx *cgx, u8 lmac_id)
+{
+ const char *lmac_string;
+
+ linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
+ linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
+ linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
+ linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
+ linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
+ linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
+
+ if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
+ dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
+ linfo->lmac_type_id, cgx->cgx_id, lmac_id);
+ strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
+ return;
+ }
+
+ lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
+ strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+}
+
+/* Hardware event handlers */
+static inline void cgx_link_change_handler(u64 lstat,
+ struct lmac *lmac)
+{
+ struct cgx_link_user_info *linfo;
+ struct cgx *cgx = lmac->cgx;
+ struct cgx_link_event event;
+ struct device *dev;
+ int err_type;
+
+ dev = &cgx->pdev->dev;
+
+ link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
+ err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
+
+ event.cgx_id = cgx->cgx_id;
+ event.lmac_id = lmac->lmac_id;
+
+ /* update the local copy of link status */
+ lmac->link_info = event.link_uinfo;
+ linfo = &lmac->link_info;
+
+ if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
+ return;
+
+ /* Ensure callback doesn't get unregistered until we finish it */
+ spin_lock(&lmac->event_cb_lock);
+
+ if (!lmac->event_cb.notify_link_chg) {
+ dev_dbg(dev, "cgx port %d:%d Link change handler null",
+ cgx->cgx_id, lmac->lmac_id);
+ if (err_type != CGX_ERR_NONE) {
+ dev_err(dev, "cgx port %d:%d Link error %d\n",
+ cgx->cgx_id, lmac->lmac_id, err_type);
+ }
+ dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
+ cgx->cgx_id, lmac->lmac_id,
+ linfo->link_up ? "UP" : "DOWN", linfo->speed);
+ goto err;
+ }
+
+ if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
+ dev_err(dev, "event notification failure\n");
+err:
+ spin_unlock(&lmac->event_cb_lock);
+}
+
+static inline bool cgx_cmdresp_is_linkevent(u64 event)
+{
+ u8 id;
+
+ id = FIELD_GET(EVTREG_ID, event);
+ if (id == CGX_CMD_LINK_BRING_UP ||
+ id == CGX_CMD_LINK_BRING_DOWN ||
+ id == CGX_CMD_MODE_CHANGE)
+ return true;
+ else
+ return false;
+}
+
+static inline bool cgx_event_is_linkevent(u64 event)
+{
+ if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
+ return true;
+ else
+ return false;
+}
+
+static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
+{
+ u64 event, offset, clear_bit;
+ struct lmac *lmac = data;
+ struct cgx *cgx;
+
+ cgx = lmac->cgx;
+
+ /* Clear SW_INT for RPM and CMR_INT for CGX */
+ offset = cgx->mac_ops->int_register;
+ clear_bit = cgx->mac_ops->int_ena_bit;
+
+ event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
+
+ if (!FIELD_GET(EVTREG_ACK, event))
+ return IRQ_NONE;
+
+ switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
+ case CGX_EVT_CMD_RESP:
+ /* Copy the response. Since only one command is active at a
+ * time, there is no way a response can get overwritten
+ */
+ lmac->resp = event;
+ /* Ensure response is updated before thread context starts */
+ smp_wmb();
+
+ /* There wont be separate events for link change initiated from
+ * software; Hence report the command responses as events
+ */
+ if (cgx_cmdresp_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+
+ /* Release thread waiting for completion */
+ lmac->cmd_pend = false;
+ wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ break;
+ case CGX_EVT_ASYNC:
+ if (cgx_event_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+ break;
+ }
+
+ /* Any new event or command response will be posted by firmware
+ * only after the current status is acked.
+ * Ack the interrupt register as well.
+ */
+ cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
+ cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
+
+ return IRQ_HANDLED;
+}
+
+/* APIs for PHY management using CGX firmware interface */
+
+/* callback registration for hardware events like link change */
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ lmac->event_cb = *cb;
+
+ return 0;
+}
+
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
+{
+ struct lmac *lmac;
+ unsigned long flags;
+ struct cgx *cgx = cgxd;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ spin_lock_irqsave(&lmac->event_cb_lock, flags);
+ lmac->event_cb.notify_link_chg = NULL;
+ lmac->event_cb.data = NULL;
+ spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
+
+ return 0;
+}
+
+int cgx_get_fwdata_base(u64 *base)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int first_lmac;
+ int err;
+
+ cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
+ if (!cgx)
+ return -ENXIO;
+
+ first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
+ if (!err)
+ *base = FIELD_GET(RESP_FWD_BASE, resp);
+
+ return err;
+}
+
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ if (args.mode)
+ otx2_map_ethtool_link_modes(args.mode, &args);
+ if (!args.speed && args.duplex && !args.an)
+ return -EINVAL;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
+ req = FIELD_SET(CMDMODECHANGE_SPEED,
+ cgx_link_usertable_index_map(args.speed), req);
+ req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
+ req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
+ req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
+ req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
+
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int err = 0;
+
+ cgx = cgx_get_pdata(cgx_id);
+ if (!cgx)
+ return -ENXIO;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
+ req = FIELD_SET(CMDSETFEC, fec, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ if (err)
+ return err;
+
+ cgx->lmac_idmap[lmac_id]->link_info.fec =
+ FIELD_GET(RESP_LINKSTAT_FEC, resp);
+ return cgx->lmac_idmap[lmac_id]->link_info.fec;
+}
+
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
+{
+ u64 req = 0;
+ u64 resp;
+
+ if (enable) {
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
+ /* On CN10K firmware offloads link bring up/down operations to ECP
+ * On Octeontx2 link operations are handled by firmware itself
+ * which can cause mbox errors so configure maximum time firmware
+ * poll for Link as 1000 ms
+ */
+ if (!is_dev_rpm(cgx))
+ req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
+
+ } else {
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+ }
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
+{
+ int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
+ u64 req = 0;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
+ return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
+}
+
+static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
+{
+ struct device *dev = &cgx->pdev->dev;
+ int major_ver, minor_ver;
+ u64 resp;
+ int err;
+
+ if (!cgx->lmac_count)
+ return 0;
+
+ err = cgx_fwi_read_version(&resp, cgx);
+ if (err)
+ return err;
+
+ major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
+ minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
+ dev_dbg(dev, "Firmware command interface version = %d.%d\n",
+ major_ver, minor_ver);
+ if (major_ver != CGX_FIRMWARE_MAJOR_VER)
+ return -EIO;
+ else
+ return 0;
+}
+
+static void cgx_lmac_linkup_work(struct work_struct *work)
+{
+ struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
+ struct device *dev = &cgx->pdev->dev;
+ int i, err;
+
+ /* Do Link up for all the enabled lmacs */
+ for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
+ err = cgx_fwi_link_change(cgx, i, true);
+ if (err)
+ dev_info(dev, "cgx port %d:%d Link up command failed\n",
+ cgx->cgx_id, i);
+ }
+}
+
+int cgx_lmac_linkup_start(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
+
+ return 0;
+}
+
+int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Resetting PFC related CSRs */
+ cfg = 0xff;
+ cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg);
+
+ if (pf_req_flr)
+ cgx_lmac_internal_loopback(cgxd, lmac_id, false);
+ return 0;
+}
+
+static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
+ int cnt, bool req_free)
+{
+ struct mac_ops *mac_ops = cgx->mac_ops;
+ u64 offset, ena_bit;
+ unsigned int irq;
+ int err;
+
+ irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
+ cnt * mac_ops->irq_offset);
+ offset = mac_ops->int_set_reg;
+ ena_bit = mac_ops->int_ena_bit;
+
+ if (req_free) {
+ free_irq(irq, lmac);
+ return 0;
+ }
+
+ err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
+ if (err)
+ return err;
+
+ /* Enable interrupt */
+ cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
+ return 0;
+}
+
+int cgx_get_nr_lmacs(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
+}
+
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_idmap[lmac_index]->lmac_id;
+}
+
+unsigned long cgx_get_lmac_bmap(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_bmap;
+}
+
+static int cgx_lmac_init(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ u64 lmac_list;
+ int i, err;
+
+ /* lmac_list specifies which lmacs are enabled
+ * when bit n is set to 1, LMAC[n] is enabled
+ */
+ if (cgx->mac_ops->non_contiguous_serdes_lane) {
+ if (is_dev_rpm2(cgx))
+ lmac_list =
+ cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL;
+ else
+ lmac_list =
+ cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+ }
+
+ if (cgx->lmac_count > cgx->max_lmac_per_mac)
+ cgx->lmac_count = cgx->max_lmac_per_mac;
+
+ for (i = 0; i < cgx->lmac_count; i++) {
+ lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
+ if (!lmac)
+ return -ENOMEM;
+ lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
+ if (!lmac->name) {
+ err = -ENOMEM;
+ goto err_lmac_free;
+ }
+ sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+ if (cgx->mac_ops->non_contiguous_serdes_lane) {
+ lmac->lmac_id = __ffs64(lmac_list);
+ lmac_list &= ~BIT_ULL(lmac->lmac_id);
+ } else {
+ lmac->lmac_id = i;
+ }
+
+ lmac->cgx = cgx;
+ lmac->mac_to_index_bmap.max =
+ cgx->mac_ops->dmac_filter_count /
+ cgx->lmac_count;
+
+ err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+ if (err)
+ goto err_name_free;
+
+ /* Reserve first entry for default MAC address */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ lmac->rx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
+ if (err)
+ goto err_dmac_bmap_free;
+
+ lmac->tx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
+ if (err)
+ goto err_rx_fc_bmap_free;
+
+ init_waitqueue_head(&lmac->wq_cmd_cmplt);
+ mutex_init(&lmac->cmd_lock);
+ spin_lock_init(&lmac->event_cb_lock);
+ err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
+ if (err)
+ goto err_bitmap_free;
+
+ /* Add reference */
+ cgx->lmac_idmap[lmac->lmac_id] = lmac;
+ set_bit(lmac->lmac_id, &cgx->lmac_bmap);
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
+ lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
+ }
+
+ return cgx_lmac_verify_fwi_version(cgx);
+
+err_bitmap_free:
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
+err_rx_fc_bmap_free:
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+err_dmac_bmap_free:
+ rvu_free_bitmap(&lmac->mac_to_index_bmap);
+err_name_free:
+ kfree(lmac->name);
+err_lmac_free:
+ kfree(lmac);
+ return err;
+}
+
+static int cgx_lmac_exit(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ int i;
+
+ if (cgx->cgx_cmd_workq) {
+ destroy_workqueue(cgx->cgx_cmd_workq);
+ cgx->cgx_cmd_workq = NULL;
+ }
+
+ /* Free all lmac related resources */
+ for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
+ lmac = cgx->lmac_idmap[i];
+ if (!lmac)
+ continue;
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
+ cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
+ kfree(lmac->mac_to_index_bmap.bmap);
+ kfree(lmac->name);
+ kfree(lmac);
+ }
+
+ return 0;
+}
+
+static void cgx_populate_features(struct cgx *cgx)
+{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, 0, CGX_CONST);
+ cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+ cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
+
+ if (is_dev_rpm(cgx))
+ cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
+ RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+ else
+ cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 |
+ RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
+}
+
+static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
+{
+ if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM ||
+ is_dev_rpm2(cgx))
+ return 0x80;
+ else
+ return 0x60;
+}
+
+static struct mac_ops cgx_mac_ops = {
+ .name = "cgx",
+ .csr_offset = 0,
+ .lmac_offset = 18,
+ .int_register = CGXX_CMRX_INT,
+ .int_set_reg = CGXX_CMRX_INT_ENA_W1S,
+ .irq_offset = 9,
+ .int_ena_bit = FW_CGX_INT,
+ .lmac_fwi = CGX_LMAC_FWI,
+ .non_contiguous_serdes_lane = false,
+ .rx_stats_cnt = 9,
+ .tx_stats_cnt = 18,
+ .dmac_filter_count = 32,
+ .get_nr_lmacs = cgx_get_nr_lmacs,
+ .get_lmac_type = cgx_get_lmac_type,
+ .lmac_fifo_len = cgx_get_lmac_fifo_len,
+ .mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
+ .mac_get_rx_stats = cgx_get_rx_stats,
+ .mac_get_tx_stats = cgx_get_tx_stats,
+ .get_fec_stats = cgx_get_fec_stats,
+ .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = cgx_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = cgx_lmac_ptp_config,
+ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
+ .mac_tx_enable = cgx_lmac_tx_enable,
+ .pfc_config = cgx_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
+ .mac_reset = cgx_lmac_reset,
+};
+
+static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct cgx *cgx;
+ int err, nvec;
+
+ cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
+ if (!cgx)
+ return -ENOMEM;
+ cgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, cgx);
+
+ /* Use mac_ops to get MAC specific features */
+ if (is_dev_rpm(cgx))
+ cgx->mac_ops = rpm_get_mac_ops(cgx);
+ else
+ cgx->mac_ops = &cgx_mac_ops;
+
+ cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!cgx->reg_base) {
+ dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ if (!cgx->lmac_count) {
+ dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
+ err = -EOPNOTSUPP;
+ goto err_release_regions;
+ }
+
+ nvec = pci_msix_vec_count(cgx->pdev);
+ err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (err < 0 || err != nvec) {
+ dev_err(dev, "Request for %d msix vectors failed, err %d\n",
+ nvec, err);
+ goto err_release_regions;
+ }
+
+ cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & CGX_ID_MASK;
+
+ /* init wq for processing linkup requests */
+ INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
+ cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+ if (!cgx->cgx_cmd_workq) {
+ dev_err(dev, "alloc workqueue failed for cgx cmd");
+ err = -ENOMEM;
+ goto err_free_irq_vectors;
+ }
+
+ list_add(&cgx->cgx_list, &cgx_list);
+
+
+ cgx_populate_features(cgx);
+
+ mutex_init(&cgx->lock);
+
+ err = cgx_lmac_init(cgx);
+ if (err)
+ goto err_release_lmac;
+
+ return 0;
+
+err_release_lmac:
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void cgx_remove(struct pci_dev *pdev)
+{
+ struct cgx *cgx = pci_get_drvdata(pdev);
+
+ if (cgx) {
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ }
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver cgx_driver = {
+ .name = DRV_NAME,
+ .id_table = cgx_id_table,
+ .probe = cgx_probe,
+ .remove = cgx_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
new file mode 100644
index 000000000..6f7d1dee5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef CGX_H
+#define CGX_H
+
+#include "mbox.h"
+#include "cgx_fw_if.h"
+#include "rpm.h"
+
+ /* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_CGX 0xA059
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+
+#define CGX_ID_MASK 0xF
+
+/* Registers */
+#define CGXX_CMRX_CFG 0x00
+#define CMR_P2X_SEL_MASK GENMASK_ULL(61, 59)
+#define CMR_P2X_SEL_SHIFT 59ULL
+#define CMR_P2X_SEL_NIX0 1ULL
+#define CMR_P2X_SEL_NIX1 2ULL
+#define DATA_PKT_TX_EN BIT_ULL(53)
+#define DATA_PKT_RX_EN BIT_ULL(54)
+#define CGX_LMAC_TYPE_SHIFT 40
+#define CGX_LMAC_TYPE_MASK 0xF
+#define CGXX_CMRX_INT 0x040
+#define FW_CGX_INT BIT_ULL(1)
+#define CGXX_CMRX_INT_ENA_W1S 0x058
+#define CGXX_CMRX_RX_ID_MAP 0x060
+#define CGXX_CMRX_RX_STAT0 0x070
+#define CGXX_CMRX_RX_LOGL_XON 0x100
+#define CGXX_CMRX_RX_LMACS 0x128
+#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
+#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
+#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2)
+#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
+#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
+#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
+#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49)
+#define CGXX_CMRX_RX_DMAC_CAM1 0x400
+#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGXX_CMRX_TX_STAT0 0x700
+#define CGXX_SCRATCH0_REG 0x1050
+#define CGXX_SCRATCH1_REG 0x1058
+#define CGX_CONST 0x2000
+#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(55, 32)
+#define CGX_CONST_MAX_LMACS GENMASK_ULL(31, 24)
+#define CGXX_SPUX_CONTROL1 0x10000
+#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
+#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
+#define CGXX_SPUX_RSFEC_CORR 0x10088
+#define CGXX_SPUX_RSFEC_UNCORR 0x10090
+
+#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
+#define CGXX_GMP_PCS_MRX_CTL 0x30000
+#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+
+#define CGXX_SMUX_RX_FRM_CTL 0x20020
+#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_SMUX_RX_FRM_CTL_PTP_MODE BIT_ULL(12)
+#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
+#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12)
+#define CGXX_SMUX_TX_CTL 0x20178
+#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
+#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_SMUX_SMAC 0x20108
+#define CGXX_SMUX_CBFC_CTL 0x20218
+#define CGXX_SMUX_CBFC_CTL_RX_EN BIT_ULL(0)
+#define CGXX_SMUX_CBFC_CTL_TX_EN BIT_ULL(1)
+#define CGXX_SMUX_CBFC_CTL_DRP_EN BIT_ULL(2)
+#define CGXX_SMUX_CBFC_CTL_BCK_EN BIT_ULL(3)
+#define CGX_PFC_CLASS_MASK GENMASK_ULL(47, 32)
+#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
+#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
+#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
+#define CGXX_CMR_RX_OVR_BP 0x130
+#define CGX_CMR_RX_OVR_BP_EN(X) BIT_ULL(((X) + 8))
+#define CGX_CMR_RX_OVR_BP_BP(X) BIT_ULL(((X) + 4))
+
+#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
+#define CGX_EVENT_REG CGXX_SCRATCH0_REG
+#define CGX_CMD_TIMEOUT 5000 /* msecs */
+#define DEFAULT_PAUSE_TIME 0x7FF
+
+#define CGX_LMAC_FWI 0
+
+enum cgx_nix_stat_type {
+ NIX_STATS_RX,
+ NIX_STATS_TX,
+};
+
+enum LMAC_TYPE {
+ LMAC_MODE_SGMII = 0,
+ LMAC_MODE_XAUI = 1,
+ LMAC_MODE_RXAUI = 2,
+ LMAC_MODE_10G_R = 3,
+ LMAC_MODE_40G_R = 4,
+ LMAC_MODE_QSGMII = 6,
+ LMAC_MODE_25G_R = 7,
+ LMAC_MODE_50G_R = 8,
+ LMAC_MODE_100G_R = 9,
+ LMAC_MODE_USXGMII = 10,
+ LMAC_MODE_USGMII = 11,
+ LMAC_MODE_MAX,
+};
+
+struct cgx_link_event {
+ struct cgx_link_user_info link_uinfo;
+ u8 cgx_id;
+ u8 lmac_id;
+};
+
+/**
+ * struct cgx_event_cb
+ * @notify_link_chg: callback for link change notification
+ * @data: data passed to callback function
+ */
+struct cgx_event_cb {
+ int (*notify_link_chg)(struct cgx_link_event *event, void *data);
+ void *data;
+};
+
+extern struct pci_driver cgx_driver;
+
+int cgx_get_cgxcnt_max(void);
+int cgx_get_cgxid(void *cgxd);
+int cgx_get_lmac_cnt(void *cgxd);
+void *cgx_get_pdata(int cgx_id);
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
+int cgx_get_link_info(void *cgxd, int lmac_id,
+ struct cgx_link_user_info *linfo);
+int cgx_lmac_linkup_start(void *cgxd);
+int cgx_get_fwdata_base(u64 *base);
+int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause);
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id);
+u64 cgx_features_get(void *cgxd);
+struct mac_ops *get_mac_ops(void *cgxd);
+int cgx_get_nr_lmacs(void *cgxd);
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
+unsigned long cgx_get_lmac_bmap(void *cgxd);
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
+u64 cgx_read_dmac_entry(void *cgxd, int index);
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx);
+int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr);
+#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
new file mode 100644
index 000000000..d4a27c882
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef __CGX_FW_INTF_H__
+#define __CGX_FW_INTF_H__
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define CGX_FIRMWARE_MAJOR_VER 1
+#define CGX_FIRMWARE_MINOR_VER 0
+
+#define CGX_EVENT_ACK 1UL
+
+/* CGX error types. set for cmd response status as CGX_STAT_FAIL */
+enum cgx_error_type {
+ CGX_ERR_NONE,
+ CGX_ERR_LMAC_NOT_ENABLED,
+ CGX_ERR_LMAC_MODE_INVALID,
+ CGX_ERR_REQUEST_ID_INVALID,
+ CGX_ERR_PREV_ACK_NOT_CLEAR,
+ CGX_ERR_PHY_LINK_DOWN,
+ CGX_ERR_PCS_RESET_FAIL,
+ CGX_ERR_AN_CPT_FAIL,
+ CGX_ERR_TX_NOT_IDLE,
+ CGX_ERR_RX_NOT_IDLE,
+ CGX_ERR_SPUX_BR_BLKLOCK_FAIL,
+ CGX_ERR_SPUX_RX_ALIGN_FAIL,
+ CGX_ERR_SPUX_TX_FAULT,
+ CGX_ERR_SPUX_RX_FAULT,
+ CGX_ERR_SPUX_RESET_FAIL,
+ CGX_ERR_SPUX_AN_RESET_FAIL,
+ CGX_ERR_SPUX_USX_AN_RESET_FAIL,
+ CGX_ERR_SMUX_RX_LINK_NOT_OK,
+ CGX_ERR_PCS_RECV_LINK_FAIL,
+ CGX_ERR_TRAINING_FAIL,
+ CGX_ERR_RX_EQU_FAIL,
+ CGX_ERR_SPUX_BER_FAIL,
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL,
+ CGX_ERR_SPUX_MARKER_LOCK_FAIL,
+ CGX_ERR_SET_FEC_INVALID,
+ CGX_ERR_SET_FEC_FAIL,
+ CGX_ERR_MODULE_INVALID,
+ CGX_ERR_MODULE_NOT_PRESENT,
+ CGX_ERR_SPEED_CHANGE_INVALID,
+};
+
+/* LINK speed types */
+enum cgx_link_speed {
+ CGX_LINK_NONE,
+ CGX_LINK_10M,
+ CGX_LINK_100M,
+ CGX_LINK_1G,
+ CGX_LINK_2HG,
+ CGX_LINK_5G,
+ CGX_LINK_10G,
+ CGX_LINK_20G,
+ CGX_LINK_25G,
+ CGX_LINK_40G,
+ CGX_LINK_50G,
+ CGX_LINK_80G,
+ CGX_LINK_100G,
+ CGX_LINK_SPEED_MAX,
+};
+
+enum CGX_MODE_ {
+ CGX_MODE_SGMII,
+ CGX_MODE_1000_BASEX,
+ CGX_MODE_QSGMII,
+ CGX_MODE_10G_C2C,
+ CGX_MODE_10G_C2M,
+ CGX_MODE_10G_KR,
+ CGX_MODE_20G_C2C,
+ CGX_MODE_25G_C2C,
+ CGX_MODE_25G_C2M,
+ CGX_MODE_25G_2_C2C,
+ CGX_MODE_25G_CR,
+ CGX_MODE_25G_KR,
+ CGX_MODE_40G_C2C,
+ CGX_MODE_40G_C2M,
+ CGX_MODE_40G_CR4,
+ CGX_MODE_40G_KR4,
+ CGX_MODE_40GAUI_C2C,
+ CGX_MODE_50G_C2C,
+ CGX_MODE_50G_C2M,
+ CGX_MODE_50G_4_C2C,
+ CGX_MODE_50G_CR,
+ CGX_MODE_50G_KR,
+ CGX_MODE_80GAUI_C2C,
+ CGX_MODE_100G_C2C,
+ CGX_MODE_100G_C2M,
+ CGX_MODE_100G_CR4,
+ CGX_MODE_100G_KR4,
+ CGX_MODE_MAX /* = 29 */
+};
+/* REQUEST ID types. Input to firmware */
+enum cgx_cmd_id {
+ CGX_CMD_NONE,
+ CGX_CMD_GET_FW_VER,
+ CGX_CMD_GET_MAC_ADDR,
+ CGX_CMD_SET_MTU,
+ CGX_CMD_GET_LINK_STS, /* optional to user */
+ CGX_CMD_LINK_BRING_UP,
+ CGX_CMD_LINK_BRING_DOWN,
+ CGX_CMD_INTERNAL_LBK,
+ CGX_CMD_EXTERNAL_LBK,
+ CGX_CMD_HIGIG,
+ CGX_CMD_LINK_STAT_CHANGE,
+ CGX_CMD_MODE_CHANGE, /* hot plug support */
+ CGX_CMD_INTF_SHUTDOWN,
+ CGX_CMD_GET_MKEX_PRFL_SIZE,
+ CGX_CMD_GET_MKEX_PRFL_ADDR,
+ CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
+ CGX_CMD_GET_LINK_MODES, /* Supported Link Modes */
+ CGX_CMD_SET_LINK_MODE,
+ CGX_CMD_GET_SUPPORTED_FEC,
+ CGX_CMD_SET_FEC,
+ CGX_CMD_GET_AN,
+ CGX_CMD_SET_AN,
+ CGX_CMD_GET_ADV_LINK_MODES,
+ CGX_CMD_GET_ADV_FEC,
+ CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
+ CGX_CMD_SET_PHY_MOD_TYPE,
+ CGX_CMD_PRBS,
+ CGX_CMD_DISPLAY_EYE,
+ CGX_CMD_GET_PHY_FEC_STATS,
+};
+
+/* async event ids */
+enum cgx_evt_id {
+ CGX_EVT_NONE,
+ CGX_EVT_LINK_CHANGE,
+};
+
+/* event types - cause of interrupt */
+enum cgx_evt_type {
+ CGX_EVT_ASYNC,
+ CGX_EVT_CMD_RESP
+};
+
+enum cgx_stat {
+ CGX_STAT_SUCCESS,
+ CGX_STAT_FAIL
+};
+
+enum cgx_cmd_own {
+ CGX_CMD_OWN_NS,
+ CGX_CMD_OWN_FIRMWARE,
+};
+
+/* m - bit mask
+ * y - value to be written in the bitrange
+ * x - input value whose bitrange to be modified
+ */
+#define FIELD_SET(m, y, x) \
+ (((x) & ~(m)) | \
+ FIELD_PREP((m), (y)))
+
+/* scratchx(0) CSR used for ATF->non-secure SW communication.
+ * This acts as the status register
+ * Provides details on command ack/status, command response, error details
+ */
+#define EVTREG_ACK BIT_ULL(0)
+#define EVTREG_EVT_TYPE BIT_ULL(1)
+#define EVTREG_STAT BIT_ULL(2)
+#define EVTREG_ID GENMASK_ULL(8, 3)
+
+/* Response to command IDs with command status as CGX_STAT_FAIL
+ *
+ * Not applicable for commands :
+ * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE
+ */
+#define EVTREG_ERRTYPE GENMASK_ULL(18, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAJOR_VER GENMASK_ULL(12, 9)
+#define RESP_MINOR_VER GENMASK_ULL(16, 13)
+
+/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAC_ADDR GENMASK_ULL(56, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_SIZE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_SIZE GENMASK_ULL(63, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_FWD_BASE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_FWD_BASE GENMASK_ULL(56, 9)
+#define RESP_LINKSTAT_LMAC_TYPE GENMASK_ULL(35, 28)
+
+/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
+ * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
+ *
+ * In case of CGX_STAT_FAIL, it indicates CGX configuration failed
+ * when processing link up/down/change command.
+ * Both err_type and current link status will be updated
+ *
+ * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current
+ * link status will be updated
+ */
+struct cgx_lnk_sts {
+ uint64_t reserved1:9;
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t speed:4; /* cgx_link_speed */
+ uint64_t err_type:10;
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled, if not 0 */
+ uint64_t port:8;
+ uint64_t reserved2:28;
+};
+
+#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
+#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
+#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
+#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+#define RESP_LINKSTAT_AN GENMASK_ULL(25, 25)
+#define RESP_LINKSTAT_FEC GENMASK_ULL(27, 26)
+#define RESP_LINKSTAT_PORT GENMASK_ULL(35, 28)
+
+/* scratchx(1) CSR used for non-secure SW->ATF communication
+ * This CSR acts as a command register
+ */
+#define CMDREG_OWN BIT_ULL(0)
+#define CMDREG_ID GENMASK_ULL(7, 2)
+
+/* Any command using enable/disable as an argument need
+ * to set this bitfield.
+ * Ex: Loopback, HiGig...
+ */
+#define CMDREG_ENABLE BIT_ULL(8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */
+#define CMDMTU_SIZE GENMASK_ULL(23, 8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */
+#define CMDLINKCHANGE_LINKUP BIT_ULL(8)
+#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
+#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+
+#define CMDSETFEC GENMASK_ULL(9, 8)
+/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */
+#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8)
+#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12)
+#define CMDMODECHANGE_AN GENMASK_ULL(13, 13)
+#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
+#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+
+/* LINK_BRING_UP command timeout */
+#define LINKCFG_TIMEOUT GENMASK_ULL(21, 8)
+#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
new file mode 100644
index 000000000..2436c1ff9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ */
+
+#ifndef COMMON_H
+#define COMMON_H
+
+#include "rvu_struct.h"
+
+#define OTX2_ALIGN 128 /* Align to cacheline */
+
+#define Q_SIZE_16 0ULL /* 16 entries */
+#define Q_SIZE_64 1ULL /* 64 entries */
+#define Q_SIZE_256 2ULL
+#define Q_SIZE_1K 3ULL
+#define Q_SIZE_4K 4ULL
+#define Q_SIZE_16K 5ULL
+#define Q_SIZE_64K 6ULL
+#define Q_SIZE_256K 7ULL
+#define Q_SIZE_1M 8ULL /* Million entries */
+#define Q_SIZE_MIN Q_SIZE_16
+#define Q_SIZE_MAX Q_SIZE_1M
+
+#define Q_COUNT(x) (16ULL << (2 * x))
+#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2)
+
+/* Admin queue info */
+
+/* Since we intend to add only one instruction at a time,
+ * keep queue size to it's minimum.
+ */
+#define AQ_SIZE Q_SIZE_16
+/* HW head & tail pointer mask */
+#define AQ_PTR_MASK 0xFFFFF
+
+struct qmem {
+ void *base;
+ dma_addr_t iova;
+ int alloc_sz;
+ u16 entry_sz;
+ u8 align;
+ u32 qsize;
+};
+
+static inline int qmem_alloc(struct device *dev, struct qmem **q,
+ int qsize, int entry_sz)
+{
+ struct qmem *qmem;
+ int aligned_addr;
+
+ if (!qsize)
+ return -EINVAL;
+
+ *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
+ if (!*q)
+ return -ENOMEM;
+ qmem = *q;
+
+ qmem->entry_sz = entry_sz;
+ qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
+ qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova,
+ GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!qmem->base)
+ return -ENOMEM;
+
+ qmem->qsize = qsize;
+
+ aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
+ qmem->align = (aligned_addr - qmem->iova);
+ qmem->base += qmem->align;
+ qmem->iova += qmem->align;
+ return 0;
+}
+
+static inline void qmem_free(struct device *dev, struct qmem *qmem)
+{
+ if (!qmem)
+ return;
+
+ if (qmem->base)
+ dma_free_attrs(dev, qmem->alloc_sz,
+ qmem->base - qmem->align,
+ qmem->iova - qmem->align,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ devm_kfree(dev, qmem);
+}
+
+struct admin_queue {
+ struct qmem *inst;
+ struct qmem *res;
+ spinlock_t lock; /* Serialize inst enqueue from PFs */
+};
+
+/* NPA aura count */
+enum npa_aura_sz {
+ NPA_AURA_SZ_0,
+ NPA_AURA_SZ_128,
+ NPA_AURA_SZ_256,
+ NPA_AURA_SZ_512,
+ NPA_AURA_SZ_1K,
+ NPA_AURA_SZ_2K,
+ NPA_AURA_SZ_4K,
+ NPA_AURA_SZ_8K,
+ NPA_AURA_SZ_16K,
+ NPA_AURA_SZ_32K,
+ NPA_AURA_SZ_64K,
+ NPA_AURA_SZ_128K,
+ NPA_AURA_SZ_256K,
+ NPA_AURA_SZ_512K,
+ NPA_AURA_SZ_1M,
+ NPA_AURA_SZ_MAX,
+};
+
+#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6))
+
+/* NPA AQ result structure for init/read/write of aura HW contexts */
+struct npa_aq_aura_res {
+ struct npa_aq_res_s res;
+ struct npa_aura_s aura_ctx;
+ struct npa_aura_s ctx_mask;
+};
+
+/* NPA AQ result structure for init/read/write of pool HW contexts */
+struct npa_aq_pool_res {
+ struct npa_aq_res_s res;
+ struct npa_pool_s pool_ctx;
+ struct npa_pool_s ctx_mask;
+};
+
+/* NIX Transmit schedulers */
+enum nix_scheduler {
+ NIX_TXSCH_LVL_SMQ = 0x0,
+ NIX_TXSCH_LVL_MDQ = 0x0,
+ NIX_TXSCH_LVL_TL4 = 0x1,
+ NIX_TXSCH_LVL_TL3 = 0x2,
+ NIX_TXSCH_LVL_TL2 = 0x3,
+ NIX_TXSCH_LVL_TL1 = 0x4,
+ NIX_TXSCH_LVL_CNT = 0x5,
+};
+
+#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
+#define TXSCH_TL1_DFLT_RR_PRIO (0x7ull)
+#define CN10K_MAX_DWRR_WEIGHT 16384 /* Weight is 14bit on CN10K */
+
+/* Don't change the order as on CN10K (except CN10KB)
+ * SMQX_CFG[SDP] value should be 1 for SDP flows.
+ */
+#define SMQ_LINK_TYPE_RPM 0
+#define SMQ_LINK_TYPE_SDP 1
+#define SMQ_LINK_TYPE_LBK 2
+
+/* Min/Max packet sizes, excluding FCS */
+#define NIC_HW_MIN_FRS 40
+#define NIC_HW_MAX_FRS 9212
+#define SDP_HW_MAX_FRS 65535
+#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
+#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
+
+/* NIX RX action operation*/
+#define NIX_RX_ACTIONOP_DROP (0x0ull)
+#define NIX_RX_ACTIONOP_UCAST (0x1ull)
+#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
+#define NIX_RX_ACTIONOP_MCAST (0x3ull)
+#define NIX_RX_ACTIONOP_RSS (0x4ull)
+/* Use the RX action set in the default unicast entry */
+#define NIX_RX_ACTION_DEFAULT (0xfull)
+
+/* NIX TX action operation*/
+#define NIX_TX_ACTIONOP_DROP (0x0ull)
+#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull)
+#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull)
+#define NIX_TX_ACTIONOP_MCAST (0x3ull)
+#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull)
+
+#define NPC_MCAM_KEY_X1 0
+#define NPC_MCAM_KEY_X2 1
+#define NPC_MCAM_KEY_X4 2
+
+#define NIX_INTFX_RX(a) (0x0ull | (a) << 1)
+#define NIX_INTFX_TX(a) (0x1ull | (a) << 1)
+
+/* Default interfaces are NIX0_RX and NIX0_TX */
+#define NIX_INTF_RX NIX_INTFX_RX(0)
+#define NIX_INTF_TX NIX_INTFX_TX(0)
+
+#define NIX_INTF_TYPE_CGX 0
+#define NIX_INTF_TYPE_LBK 1
+#define NIX_INTF_TYPE_SDP 2
+
+#define MAX_LMAC_PKIND 12
+#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
+#define NIX_LINK_LBK(a) (12 + (a))
+#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
+#define NIX_CHAN_SDP_CH_START (0x700ull)
+#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
+#define NIX_CHAN_SDP_NUM_CHANS 256
+#define NIX_CHAN_CPT_CH_START (0x800ull)
+
+/* The mask is to extract lower 10-bits of channel number
+ * which CPT will pass to X2P.
+ */
+#define NIX_CHAN_CPT_X2P_MASK (0x3ffull)
+
+/* NIX LSO format indices.
+ * As of now TSO is the only one using, so statically assigning indices.
+ */
+#define NIX_LSO_FORMAT_IDX_TSOV4 0
+#define NIX_LSO_FORMAT_IDX_TSOV6 1
+
+/* RSS info */
+#define MAX_RSS_GROUPS 8
+/* Group 0 has to be used in default pkt forwarding MCAM entries
+ * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
+ * filters.
+ */
+#define DEFAULT_RSS_CONTEXT_GROUP 0
+#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
+
+/* NDC info */
+enum ndc_idx_e {
+ NIX0_RX = 0x0,
+ NIX0_TX = 0x1,
+ NPA0_U = 0x2,
+ NIX1_RX = 0x4,
+ NIX1_TX = 0x5,
+};
+
+enum ndc_ctype_e {
+ CACHING = 0x0,
+ BYPASS = 0x1,
+};
+
+#define NDC_MAX_PORT 6
+#define NDC_READ_TRANS 0
+#define NDC_WRITE_TRANS 1
+
+#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
new file mode 100644
index 000000000..0b4cba03f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef LMAC_COMMON_H
+#define LMAC_COMMON_H
+
+#include "rvu.h"
+#include "cgx.h"
+/**
+ * struct lmac - per lmac locks and properties
+ * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
+ * @cmd_lock: Lock to serialize the command interface
+ * @resp: command response
+ * @link_info: link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
+ * @rx_fc_pfvf_bmap: Receive flow control enabled netdev mapping
+ * @tx_fc_pfvf_bmap: Transmit flow control enabled netdev mapping
+ * @event_cb: callback for linkchange events
+ * @event_cb_lock: lock for serializing callback with unregister
+ * @cgx: parent cgx port
+ * @mcast_filters_count: Number of multicast filters installed
+ * @lmac_id: lmac port id
+ * @lmac_type: lmac type like SGMII/XAUI
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
+ * @name: lmac port name
+ */
+struct lmac {
+ wait_queue_head_t wq_cmd_cmplt;
+ /* Lock to serialize the command interface */
+ struct mutex cmd_lock;
+ u64 resp;
+ struct cgx_link_user_info link_info;
+ struct rsrc_bmap mac_to_index_bmap;
+ struct rsrc_bmap rx_fc_pfvf_bmap;
+ struct rsrc_bmap tx_fc_pfvf_bmap;
+ struct cgx_event_cb event_cb;
+ /* lock for serializing callback with unregister */
+ spinlock_t event_cb_lock;
+ struct cgx *cgx;
+ u8 mcast_filters_count;
+ u8 lmac_id;
+ u8 lmac_type;
+ bool cmd_pend;
+ char *name;
+};
+
+/* CGX & RPM has different feature set
+ * update the structure fields with different one
+ */
+struct mac_ops {
+ char *name;
+ /* Features like RXSTAT, TXSTAT, DMAC FILTER csrs differs by fixed
+ * bar offset for example
+ * CGX DMAC_CTL0 0x1f8
+ * RPM DMAC_CTL0 0x4ff8
+ */
+ u64 csr_offset;
+ /* For ATF to send events to kernel, there is no dedicated interrupt
+ * defined hence CGX uses OVERFLOW bit in CMR_INT. RPM block supports
+ * SW_INT so that ATF triggers this interrupt after processing of
+ * requested command
+ */
+ u64 int_register;
+ u64 int_set_reg;
+ /* lmac offset is different is RPM */
+ u8 lmac_offset;
+ u8 irq_offset;
+ u8 int_ena_bit;
+ u8 lmac_fwi;
+ u32 fifo_len;
+ bool non_contiguous_serdes_lane;
+ /* RPM & CGX differs in number of Receive/transmit stats */
+ u8 rx_stats_cnt;
+ u8 tx_stats_cnt;
+ /* Unlike CN10K which shares same CSR offset with CGX
+ * CNF10KB has different csr offset
+ */
+ u64 rxid_map_offset;
+ u8 dmac_filter_count;
+ /* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST]
+ * number of setbits in lmac_exist tells number of lmacs
+ */
+ int (*get_nr_lmacs)(void *cgx);
+ u8 (*get_lmac_type)(void *cgx, int lmac_id);
+ u32 (*lmac_fifo_len)(void *cgx, int lmac_id);
+ int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id,
+ bool enable);
+ /* Register Stats related functions */
+ int (*mac_get_rx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *rx_stat);
+ int (*mac_get_tx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *tx_stat);
+
+ /* Enable LMAC Pause Frame Configuration */
+ void (*mac_enadis_rx_pause_fwding)(void *cgxd,
+ int lmac_id,
+ bool enable);
+
+ int (*mac_get_pause_frm_status)(void *cgxd,
+ int lmac_id,
+ u8 *tx_pause,
+ u8 *rx_pause);
+
+ int (*mac_enadis_pause_frm)(void *cgxd,
+ int lmac_id,
+ u8 tx_pause,
+ u8 rx_pause);
+
+ void (*mac_pause_frm_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
+
+ /* Enable/Disable Inbound PTP */
+ void (*mac_enadis_ptp_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
+
+ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*pfc_config)(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause, u16 pfc_en);
+
+ int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+ int (*mac_reset)(void *cgxd, int lmac_id, u8 pf_req_flr);
+
+ /* FEC stats */
+ int (*get_fec_stats)(void *cgxd, int lmac_id,
+ struct cgx_fec_stats_rsp *rsp);
+};
+
+struct cgx {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ u8 cgx_id;
+ u8 lmac_count;
+ /* number of LMACs per MAC could be 4 or 8 */
+ u8 max_lmac_per_mac;
+#define MAX_LMAC_COUNT 8
+ struct lmac *lmac_idmap[MAX_LMAC_COUNT];
+ struct work_struct cgx_cmd_work;
+ struct workqueue_struct *cgx_cmd_workq;
+ struct list_head cgx_list;
+ u64 hw_features;
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap; /* bitmap of enabled lmacs */
+ /* Lock to serialize read/write of global csrs like
+ * RPMX_MTI_STAT_DATA_HI_CDC etc
+ */
+ struct mutex lock;
+};
+
+typedef struct cgx rpm_t;
+
+/* Function Declarations */
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val);
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset);
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx);
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac);
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id);
+bool is_lmac_valid(struct cgx *cgx, int lmac_id);
+struct mac_ops *rpm_get_mac_ops(struct cgx *cgx);
+
+#endif /* LMAC_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
new file mode 100644
index 000000000..9690ac01f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include "rvu_reg.h"
+#include "mbox.h"
+#include "rvu_trace.h"
+
+static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
+
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
+
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ tx_hdr->num_msgs = 0;
+ tx_hdr->msg_size = 0;
+ rx_hdr->num_msgs = 0;
+ rx_hdr->msg_size = 0;
+}
+EXPORT_SYMBOL(__otx2_mbox_reset);
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+
+ spin_lock(&mdev->mbox_lock);
+ __otx2_mbox_reset(mbox, devid);
+ spin_unlock(&mdev->mbox_lock);
+}
+EXPORT_SYMBOL(otx2_mbox_reset);
+
+void otx2_mbox_destroy(struct otx2_mbox *mbox)
+{
+ mbox->reg_base = NULL;
+ mbox->hwbase = NULL;
+
+ kfree(mbox->dev);
+ mbox->dev = NULL;
+}
+EXPORT_SYMBOL(otx2_mbox_destroy);
+
+static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_AF_AFPF_MBOX0;
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_PF_PFAF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
+ mbox->tr_shift = 12;
+ break;
+ case MBOX_DIR_VFPF:
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_VF_VFPF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ mbox->reg_base = reg_base;
+ mbox->pdev = pdev;
+
+ mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+ if (!mbox->dev) {
+ otx2_mbox_destroy(mbox);
+ return -ENOMEM;
+ }
+ mbox->ndevs = ndevs;
+
+ return 0;
+}
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase;
+
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ mdev->hwbase = mdev->mbase;
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_init);
+
+/* Initialize mailbox with the set of mailbox region addresses
+ * in the array hwbase.
+ */
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ struct pci_dev *pdev, void *reg_base,
+ int direction, int ndevs, unsigned long *pf_bmap)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase[0];
+
+ for (devid = 0; devid < ndevs; devid++) {
+ if (!test_bit(devid, pf_bmap))
+ continue;
+
+ mdev = &mbox->dev[devid];
+ mdev->mbase = hwbase[devid];
+ mdev->hwbase = hwbase[devid];
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_regions_init);
+
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct device *sender = &mbox->pdev->dev;
+
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ usleep_range(800, 1000);
+ }
+ dev_dbg(sender, "timed out while waiting for rsp\n");
+ return -EIO;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
+
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ unsigned long timeout = jiffies + 1 * HZ;
+
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ cpu_relax();
+ }
+ return -EIO;
+}
+EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
+
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
+
+ /* If bounce buffer is implemented copy mbox messages from
+ * bounce buffer to hw mbox memory.
+ */
+ if (mdev->mbase != hw_mbase)
+ memcpy(hw_mbase + mbox->tx_start + msgs_offset,
+ mdev->mbase + mbox->tx_start + msgs_offset,
+ mdev->msg_size);
+
+ spin_lock(&mdev->mbox_lock);
+
+ tx_hdr->msg_size = mdev->msg_size;
+
+ /* Reset header for next messages */
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ mdev->msgs_acked = 0;
+
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ /* num_msgs != 0 signals to the peer that the buffer has a number of
+ * messages. So this should be written after writing all the messages
+ * to the shared memory.
+ */
+ tx_hdr->num_msgs = mdev->num_msgs;
+ rx_hdr->num_msgs = 0;
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+
+ spin_unlock(&mdev->mbox_lock);
+
+ /* The interrupt should be fired after num_msgs is written
+ * to the shared memory
+ */
+ writeq(1, (void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send);
+
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr = NULL;
+
+ spin_lock(&mdev->mbox_lock);
+ size = ALIGN(size, MBOX_MSG_ALIGN);
+ size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
+ /* Check if there is space in mailbox */
+ if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
+ goto exit;
+ if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
+ goto exit;
+
+ if (mdev->msg_size == 0)
+ mdev->num_msgs = 0;
+ mdev->num_msgs++;
+
+ msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
+
+ /* Clear the whole msg region */
+ memset(msghdr, 0, size);
+ /* Init message header with reset values */
+ msghdr->ver = OTX2_MBOX_VERSION;
+ mdev->msg_size += size;
+ mdev->rsp_size += size_rsp;
+ msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+
+ return msghdr;
+}
+EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
+
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg)
+{
+ unsigned long imsg = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ u16 msgs;
+
+ spin_lock(&mdev->mbox_lock);
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ goto error;
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *pmsg = mdev->mbase + imsg;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (msg == pmsg) {
+ if (pmsg->id != prsp->id)
+ goto error;
+ spin_unlock(&mdev->mbox_lock);
+ return prsp;
+ }
+
+ imsg = mbox->tx_start + pmsg->next_msgoff;
+ irsp = mbox->rx_start + prsp->next_msgoff;
+ }
+
+error:
+ spin_unlock(&mdev->mbox_lock);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(otx2_mbox_get_rsp);
+
+int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
+{
+ unsigned long ireq = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int rc = -ENODEV;
+ u16 msgs;
+
+ spin_lock(&mdev->mbox_lock);
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ goto exit;
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *preq = mdev->mbase + ireq;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (preq->id != prsp->id) {
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
+ goto exit;
+ }
+ if (prsp->rc) {
+ rc = prsp->rc;
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
+ goto exit;
+ }
+
+ ireq = mbox->tx_start + preq->next_msgoff;
+ irsp = mbox->rx_start + prsp->next_msgoff;
+ }
+ rc = 0;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
+
+int
+otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
+{
+ struct msg_rsp *rsp;
+
+ rsp = (struct msg_rsp *)
+ otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+ rsp->hdr.id = id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.rc = MBOX_MSG_INVALID;
+ rsp->hdr.pcifunc = pcifunc;
+ return 0;
+}
+EXPORT_SYMBOL(otx2_reply_invalid_msg);
+
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ bool ret;
+
+ spin_lock(&mdev->mbox_lock);
+ ret = mdev->num_msgs != 0;
+ spin_unlock(&mdev->mbox_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(otx2_mbox_nonempty);
+
+const char *otx2_mbox_id2name(u16 id)
+{
+ switch (id) {
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_MESSAGES
+#undef M
+ default:
+ return "INVALID ID";
+ }
+}
+EXPORT_SYMBOL(otx2_mbox_id2name);
+
+MODULE_AUTHOR("Marvell.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
new file mode 100644
index 000000000..31bd9aeb4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -0,0 +1,2214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef MBOX_H
+#define MBOX_H
+
+#include <linux/etherdevice.h>
+#include <linux/sizes.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+
+#define MBOX_SIZE SZ_64K
+
+/* AF/PF: PF initiated, PF/VF VF initiated */
+#define MBOX_DOWN_RX_START 0
+#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
+#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
+#define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
+/* AF/PF: AF initiated, PF/VF PF initiated */
+#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
+#define MBOX_UP_RX_SIZE SZ_1K
+#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
+#define MBOX_UP_TX_SIZE SZ_1K
+
+#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
+# error "incorrect mailbox area sizes"
+#endif
+
+#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
+
+#define MBOX_RSP_TIMEOUT 6000 /* Time(ms) to wait for mbox response */
+
+#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
+
+/* Mailbox directions */
+#define MBOX_DIR_AFPF 0 /* AF replies to PF */
+#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
+#define MBOX_DIR_PFVF 2 /* PF replies to VF */
+#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
+#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
+#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
+#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
+#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+
+struct otx2_mbox_dev {
+ void *mbase; /* This dev's mbox region */
+ void *hwbase;
+ spinlock_t mbox_lock;
+ u16 msg_size; /* Total msg size to be sent */
+ u16 rsp_size; /* Total rsp size to be sure the reply is ok */
+ u16 num_msgs; /* No of msgs sent or waiting for response */
+ u16 msgs_acked; /* No of msgs for which response is received */
+};
+
+struct otx2_mbox {
+ struct pci_dev *pdev;
+ void *hwbase; /* Mbox region advertised by HW */
+ void *reg_base;/* CSR base for this dev */
+ u64 trigger; /* Trigger mbox notification */
+ u16 tr_shift; /* Mbox trigger shift */
+ u64 rx_start; /* Offset of Rx region in mbox memory */
+ u64 tx_start; /* Offset of Tx region in mbox memory */
+ u16 rx_size; /* Size of Rx region */
+ u16 tx_size; /* Size of Tx region */
+ u16 ndevs; /* The number of peers */
+ struct otx2_mbox_dev *dev;
+};
+
+/* Header which precedes all mbox messages */
+struct mbox_hdr {
+ u64 msg_size; /* Total msgs size embedded */
+ u16 num_msgs; /* No of msgs embedded */
+};
+
+/* Header which precedes every msg and is also part of it */
+struct mbox_msghdr {
+ u16 pcifunc; /* Who's sending this msg */
+ u16 id; /* Mbox message ID */
+#define OTX2_MBOX_REQ_SIG (0xdead)
+#define OTX2_MBOX_RSP_SIG (0xbeef)
+ u16 sig; /* Signature, for validating corrupted msgs */
+#define OTX2_MBOX_VERSION (0x000a)
+ u16 ver; /* Version of msg's structure for this ID */
+ u16 next_msgoff; /* Offset of next msg within mailbox region */
+ int rc; /* Msg process'ed response code */
+};
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_destroy(struct otx2_mbox *mbox);
+int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
+
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs, unsigned long *bmap);
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp);
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg);
+int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid);
+int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
+ u16 pcifunc, u16 id);
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
+const char *otx2_mbox_id2name(u16 id);
+static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
+ int devid, int size)
+{
+ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+}
+
+/* Mailbox message types */
+#define MBOX_MSG_MASK 0xFFFF
+#define MBOX_MSG_INVALID 0xFFFE
+#define MBOX_MSG_MAX 0xFFFF
+
+#define MBOX_MESSAGES \
+/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
+M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
+M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
+M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \
+M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
+M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
+M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
+ msg_rsp) \
+M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
+M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \
+/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
+M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
+M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
+M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \
+M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get, \
+ cgx_mac_addr_set_or_get) \
+M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get, \
+ cgx_mac_addr_set_or_get) \
+M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \
+M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \
+M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \
+M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
+M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
+M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
+M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
+M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
+ cgx_pause_frm_cfg) \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
+ cgx_features_info_msg) \
+M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, cgx_mac_addr_reset_req, \
+ msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+ cgx_mac_addr_update_rsp) \
+M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
+/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
+ npa_lf_alloc_req, npa_lf_alloc_rsp) \
+M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
+M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
+M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
+/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
+ msg_rsp) \
+M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
+M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
+ cpt_rd_wr_reg_msg) \
+M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
+ cpt_inline_ipsec_cfg_msg, msg_rsp) \
+M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
+M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
+ msg_rsp) \
+M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
+M(CPT_LF_RESET, 0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp) \
+M(CPT_FLT_ENG_INFO, 0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \
+ cpt_flt_eng_info_rsp) \
+/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
+M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
+M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
+/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
+ npc_mcam_alloc_entry_rsp) \
+M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \
+ npc_mcam_free_entry_req, msg_rsp) \
+M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \
+ npc_mcam_write_entry_req, msg_rsp) \
+M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\
+ npc_mcam_shift_entry_rsp) \
+M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \
+ npc_mcam_alloc_counter_req, \
+ npc_mcam_alloc_counter_rsp) \
+M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \
+ npc_mcam_oper_counter_req, msg_rsp) \
+M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \
+ npc_mcam_unmap_counter_req, msg_rsp) \
+M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \
+ npc_mcam_oper_counter_req, msg_rsp) \
+M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \
+ npc_mcam_oper_counter_req, \
+ npc_mcam_oper_counter_rsp) \
+M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
+ npc_mcam_alloc_and_write_entry_req, \
+ npc_mcam_alloc_and_write_entry_rsp) \
+M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
+ msg_req, npc_get_kex_cfg_rsp) \
+M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
+ npc_install_flow_req, npc_install_flow_rsp) \
+M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
+ npc_delete_flow_req, npc_delete_flow_rsp) \
+M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
+ npc_mcam_read_entry_req, \
+ npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, msg_rsp) \
+M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
+ msg_req, npc_mcam_read_base_rule_rsp) \
+M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
+ npc_mcam_get_stats_req, \
+ npc_mcam_get_stats_rsp) \
+M(NPC_GET_FIELD_HASH_INFO, 0x6013, npc_get_field_hash_info, \
+ npc_get_field_hash_info_req, \
+ npc_get_field_hash_info_rsp) \
+M(NPC_GET_FIELD_STATUS, 0x6014, npc_get_field_status, \
+ npc_get_field_status_req, \
+ npc_get_field_status_rsp) \
+/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
+M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
+ nix_lf_alloc_req, nix_lf_alloc_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
+M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
+M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
+ hwctx_disable_req, msg_rsp) \
+M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
+ nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
+M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \
+ nix_txschq_config) \
+M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
+ nix_vtag_config_rsp) \
+M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg_rsp) \
+M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \
+M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \
+M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \
+M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \
+M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \
+M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg, \
+ nix_mark_format_cfg, \
+ nix_mark_format_cfg_rsp) \
+M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
+M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
+ nix_lso_format_cfg, \
+ nix_lso_format_cfg_rsp) \
+M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
+M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
+M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \
+ nix_inline_ipsec_cfg, msg_rsp) \
+M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \
+ nix_inline_ipsec_lf_cfg, msg_rsp) \
+M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
+ nix_cn10k_aq_enq_rsp) \
+M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
+M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
+ nix_bandprof_alloc_rsp) \
+M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
+ msg_rsp) \
+M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
+ nix_bandprof_get_hwinfo_rsp) \
+M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
+ msg_req, nix_inline_ipsec_cfg) \
+/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
+M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
+ mcs_alloc_rsrc_rsp) \
+M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, msg_rsp) \
+M(MCS_FLOWID_ENTRY_WRITE, 0xa002, mcs_flowid_entry_write, mcs_flowid_entry_write_req, \
+ msg_rsp) \
+M(MCS_SECY_PLCY_WRITE, 0xa003, mcs_secy_plcy_write, mcs_secy_plcy_write_req, \
+ msg_rsp) \
+M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, mcs_rx_sc_cam_write_req, \
+ msg_rsp) \
+M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, \
+ msg_rsp) \
+M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, mcs_tx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, mcs_rx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, mcs_flowid_ena_dis_entry, \
+ msg_rsp) \
+M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, mcs_pn_table_write_req, \
+ msg_rsp) \
+M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, mcs_set_active_lmac, \
+ msg_rsp) \
+M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info) \
+M(MCS_GET_FLOWID_STATS, 0xa00c, mcs_get_flowid_stats, mcs_stats_req, \
+ mcs_flowid_stats) \
+M(MCS_GET_SECY_STATS, 0xa00d, mcs_get_secy_stats, mcs_stats_req, \
+ mcs_secy_stats) \
+M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, mcs_sc_stats) \
+M(MCS_GET_SA_STATS, 0xa00f, mcs_get_sa_stats, mcs_stats_req, mcs_sa_stats) \
+M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, \
+ mcs_port_stats) \
+M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp) \
+M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp) \
+M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, msg_rsp) \
+M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, mcs_set_pn_threshold, \
+ msg_rsp) \
+M(MCS_ALLOC_CTRL_PKT_RULE, 0xa015, mcs_alloc_ctrl_pkt_rule, \
+ mcs_alloc_ctrl_pkt_rule_req, \
+ mcs_alloc_ctrl_pkt_rule_rsp) \
+M(MCS_FREE_CTRL_PKT_RULE, 0xa016, mcs_free_ctrl_pkt_rule, \
+ mcs_free_ctrl_pkt_rule_req, msg_rsp) \
+M(MCS_CTRL_PKT_RULE_WRITE, 0xa017, mcs_ctrl_pkt_rule_write, \
+ mcs_ctrl_pkt_rule_write_req, msg_rsp) \
+M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp) \
+M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, msg_rsp)\
+M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, \
+ mcs_port_cfg_get_rsp) \
+M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, \
+ mcs_custom_tag_cfg_get_req, \
+ mcs_custom_tag_cfg_get_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xEFF) */
+#define MBOX_UP_CGX_MESSAGES \
+M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
+
+#define MBOX_UP_CPT_MESSAGES \
+M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+
+#define MBOX_UP_MCS_MESSAGES \
+M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+
+enum {
+#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
+MBOX_MESSAGES
+MBOX_UP_CGX_MESSAGES
+MBOX_UP_CPT_MESSAGES
+MBOX_UP_MCS_MESSAGES
+#undef M
+};
+
+/* Mailbox message formats */
+
+#define RVU_DEFAULT_PF_FUNC 0xFFFF
+
+/* Generic request msg used for those mbox messages which
+ * don't send any data in the request.
+ */
+struct msg_req {
+ struct mbox_msghdr hdr;
+};
+
+/* Generic response msg used an ack or response for those mbox
+ * messages which don't have a specific rsp msg format.
+ */
+struct msg_rsp {
+ struct mbox_msghdr hdr;
+};
+
+/* RVU mailbox error codes
+ * Range 256 - 300.
+ */
+enum rvu_af_status {
+ RVU_INVALID_VF_ID = -256,
+};
+
+struct ready_msg_rsp {
+ struct mbox_msghdr hdr;
+ u16 sclk_freq; /* SCLK frequency (in MHz) */
+ u16 rclk_freq; /* RCLK frequency (in MHz) */
+};
+
+/* Structure for requesting resource provisioning.
+ * 'modify' flag to be used when either requesting more
+ * or to detach partial of a certain resource type.
+ * Rest of the fields specify how many of what type to
+ * be attached.
+ * To request LFs from two blocks of same type this mailbox
+ * can be sent twice as below:
+ * struct rsrc_attach *attach;
+ * .. Allocate memory for message ..
+ * attach->cptlfs = 3; <3 LFs from CPT0>
+ * .. Send message ..
+ * .. Allocate memory for message ..
+ * attach->modify = 1;
+ * attach->cpt_blkaddr = BLKADDR_CPT1;
+ * attach->cptlfs = 2; <2 LFs from CPT1>
+ * .. Send message ..
+ */
+struct rsrc_attach {
+ struct mbox_msghdr hdr;
+ u8 modify:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
+ int cpt_blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
+};
+
+/* Structure for relinquishing resources.
+ * 'partial' flag to be used when relinquishing all resources
+ * but only of a certain type. If not set, all resources of all
+ * types provisioned to the RVU function will be detached.
+ */
+struct rsrc_detach {
+ struct mbox_msghdr hdr;
+ u8 partial:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u8 sso:1;
+ u8 ssow:1;
+ u8 timlfs:1;
+ u8 cptlfs:1;
+};
+
+/* Number of resources available to the caller.
+ * In reply to MBOX_MSG_FREE_RSRC_CNT.
+ */
+struct free_rsrcs_rsp {
+ struct mbox_msghdr hdr;
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ u16 sso;
+ u16 tim;
+ u16 ssow;
+ u16 cpt;
+ u8 npa;
+ u8 nix;
+ u16 schq_nix1[NIX_TXSCH_LVL_CNT];
+ u8 nix1;
+ u8 cpt1;
+ u8 ree0;
+ u8 ree1;
+};
+
+#define MSIX_VECTOR_INVALID 0xFFFF
+#define MAX_RVU_BLKLF_CNT 256
+
+struct msix_offset_rsp {
+ struct mbox_msghdr hdr;
+ u16 npa_msixoff;
+ u16 nix_msixoff;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
+ u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 cpt1_lfs;
+ u16 ree0_lfs;
+ u16 ree1_lfs;
+ u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ree0_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ree1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+};
+
+struct get_hw_cap_rsp {
+ struct mbox_msghdr hdr;
+ u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
+ u8 nix_shaping; /* Is shaping and coloring supported */
+ u8 npc_hash_extract; /* Is hash extract supported */
+};
+
+/* CGX mbox message formats */
+
+struct cgx_stats_rsp {
+ struct mbox_msghdr hdr;
+#define CGX_RX_STATS_COUNT 9
+#define CGX_TX_STATS_COUNT 18
+ u64 rx_stats[CGX_RX_STATS_COUNT];
+ u64 tx_stats[CGX_TX_STATS_COUNT];
+};
+
+struct cgx_fec_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 fec_corr_blks;
+ u64 fec_uncorr_blks;
+};
+/* Structure for requesting the operation for
+ * setting/getting mac address in the CGX interface
+ */
+struct cgx_mac_addr_set_or_get {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u32 index;
+};
+
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ u32 max_dmac_filters;
+};
+
+struct cgx_link_user_info {
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t lmac_type_id:4;
+ uint64_t speed:20; /* speed in Mbps */
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled else 0 */
+#define LMACTYPE_STR_LEN 16
+ char lmac_type[LMACTYPE_STR_LEN];
+};
+
+struct cgx_link_info_msg {
+ struct mbox_msghdr hdr;
+ struct cgx_link_user_info link_info;
+};
+
+struct cgx_pause_frm_cfg {
+ struct mbox_msghdr hdr;
+ u8 set;
+ /* set = 1 if the request is to config pause frames */
+ /* set = 0 if the request is to fetch pause frames config */
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+enum fec_type {
+ OTX2_FEC_NONE,
+ OTX2_FEC_BASER,
+ OTX2_FEC_RS,
+ OTX2_FEC_STATS_CNT = 2,
+ OTX2_FEC_OFF,
+};
+
+struct fec_mode {
+ struct mbox_msghdr hdr;
+ int fec;
+};
+
+struct sfp_eeprom_s {
+#define SFP_EEPROM_SIZE 256
+ u16 sff_id;
+ u8 buf[SFP_EEPROM_SIZE];
+ u64 reserved;
+};
+
+struct phy_s {
+ struct {
+ u64 can_change_mod_type:1;
+ u64 mod_type:1;
+ u64 has_fec_stats:1;
+ } misc;
+ struct fec_stats_s {
+ u32 rsfec_corr_cws;
+ u32 rsfec_uncorr_cws;
+ u32 brfec_corr_blks;
+ u32 brfec_uncorr_blks;
+ } fec_stats;
+};
+
+struct cgx_lmac_fwdata_s {
+ u16 rw_valid;
+ u64 supported_fec;
+ u64 supported_an;
+ u64 supported_link_modes;
+ /* only applicable if AN is supported */
+ u64 advertised_fec;
+ u64 advertised_link_modes;
+ /* Only applicable if SFP/QSFP slot is present */
+ struct sfp_eeprom_s sfp_eeprom;
+ struct phy_s phy;
+#define LMAC_FWDATA_RESERVED_MEM 1021
+ u64 reserved[LMAC_FWDATA_RESERVED_MEM];
+};
+
+struct cgx_fw_data {
+ struct mbox_msghdr hdr;
+ struct cgx_lmac_fwdata_s fwdata;
+};
+
+struct cgx_set_link_mode_args {
+ u32 speed;
+ u8 duplex;
+ u8 an;
+ u8 ports;
+ u64 mode;
+};
+
+struct cgx_set_link_mode_req {
+#define AUTONEG_UNKNOWN 0xff
+ struct mbox_msghdr hdr;
+ struct cgx_set_link_mode_args args;
+};
+
+struct cgx_set_link_mode_rsp {
+ struct mbox_msghdr hdr;
+ int status;
+};
+
+struct cgx_mac_addr_reset_req {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
+struct cgx_mac_addr_update_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u32 index;
+};
+
+struct cgx_mac_addr_update_rsp {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
+#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
+#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1)
+ /* flow control from physical link higig2 messages */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */
+#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */
+#define RVU_MAC_VERSION BIT_ULL(4)
+#define RVU_MAC_CGX BIT_ULL(5)
+#define RVU_MAC_RPM BIT_ULL(6)
+
+struct cgx_features_info_msg {
+ struct mbox_msghdr hdr;
+ u64 lmac_features;
+};
+
+struct rpm_stats_rsp {
+ struct mbox_msghdr hdr;
+#define RPM_RX_STATS_COUNT 43
+#define RPM_TX_STATS_COUNT 34
+ u64 rx_stats[RPM_RX_STATS_COUNT];
+ u64 tx_stats[RPM_TX_STATS_COUNT];
+};
+
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+ u16 pfc_en; /* bitmap indicating pfc enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+ /* NPA mbox message formats */
+
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ u64 mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ u8 dir;
+ u8 pkind; /* valid only in case custom flag */
+ u8 var_len_off; /* Offset of custom header length field.
+ * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND
+ */
+ u8 var_len_off_mask; /* Mask for length with in offset */
+ u8 shift_dir; /* shift direction to get length of the header at var_len_off */
+};
+
+/* NPA mbox message formats */
+
+/* NPA mailbox error codes
+ * Range 301 - 400.
+ */
+enum npa_af_status {
+ NPA_AF_ERR_PARAM = -301,
+ NPA_AF_ERR_AQ_FULL = -302,
+ NPA_AF_ERR_AQ_ENQUEUE = -303,
+ NPA_AF_ERR_AF_LF_INVALID = -304,
+ NPA_AF_ERR_AF_LF_ALLOC = -305,
+ NPA_AF_ERR_LF_RESET = -306,
+};
+
+/* For NPA LF context alloc and init */
+struct npa_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ int aura_sz; /* No of auras */
+ u32 nr_pools; /* No of pools */
+ u64 way_mask;
+};
+
+struct npa_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u32 stack_pg_ptrs; /* No of ptrs per stack page */
+ u32 stack_pg_bytes; /* Size of stack page */
+ u16 qints; /* NPA_AF_CONST::QINTS */
+ u8 cache_lines; /*BATCH ALLOC DMA */
+};
+
+/* NPA AQ enqueue msg */
+struct npa_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 aura_id;
+ u8 ctype;
+ u8 op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == AURA.
+ * LF fills the pool_id in aura.pool_addr. AF will translate
+ * the pool_id to pool context pointer.
+ */
+ struct npa_aura_s aura;
+ /* Valid when op == WRITE/INIT and ctype == POOL */
+ struct npa_pool_s pool;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == AURA */
+ struct npa_aura_s aura_mask;
+ /* Valid when op == WRITE and ctype == POOL */
+ struct npa_pool_s pool_mask;
+ };
+};
+
+struct npa_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ /* Valid when op == READ and ctype == AURA */
+ struct npa_aura_s aura;
+ /* Valid when op == READ and ctype == POOL */
+ struct npa_pool_s pool;
+ };
+};
+
+/* Disable all contexts of type 'ctype' */
+struct hwctx_disable_req {
+ struct mbox_msghdr hdr;
+ u8 ctype;
+};
+
+/* NIX mbox message formats */
+
+/* NIX mailbox error codes
+ * Range 401 - 500.
+ */
+enum nix_af_status {
+ NIX_AF_ERR_PARAM = -401,
+ NIX_AF_ERR_AQ_FULL = -402,
+ NIX_AF_ERR_AQ_ENQUEUE = -403,
+ NIX_AF_ERR_AF_LF_INVALID = -404,
+ NIX_AF_ERR_AF_LF_ALLOC = -405,
+ NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
+ NIX_AF_ERR_TLX_INVALID = -407,
+ NIX_AF_ERR_RSS_SIZE_INVALID = -408,
+ NIX_AF_ERR_RSS_GRPS_INVALID = -409,
+ NIX_AF_ERR_FRS_INVALID = -410,
+ NIX_AF_ERR_RX_LINK_INVALID = -411,
+ NIX_AF_INVAL_TXSCHQ_CFG = -412,
+ NIX_AF_SMQ_FLUSH_FAILED = -413,
+ NIX_AF_ERR_LF_RESET = -414,
+ NIX_AF_ERR_RSS_NOSPC_FIELD = -415,
+ NIX_AF_ERR_RSS_NOSPC_ALGO = -416,
+ NIX_AF_ERR_MARK_CFG_FAIL = -417,
+ NIX_AF_ERR_LSO_CFG_FAIL = -418,
+ NIX_AF_INVAL_NPA_PF_FUNC = -419,
+ NIX_AF_INVAL_SSO_PF_FUNC = -420,
+ NIX_AF_ERR_TX_VTAG_NOSPC = -421,
+ NIX_AF_ERR_RX_VTAG_INUSE = -422,
+ NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
+ NIX_AF_ERR_NPC_KEY_NOT_SUPP = -424,
+ NIX_AF_ERR_INVALID_NIXBLK = -425,
+ NIX_AF_ERR_INVALID_BANDPROF = -426,
+ NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
+ NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
+ NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
+ NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
+ NIX_AF_ERR_LINK_CREDITS = -431,
+};
+
+/* For NIX RX vtag action */
+enum nix_rx_vtag0_type {
+ NIX_AF_LFX_RX_VTAG_TYPE0, /* reserved for rx vlan offload */
+ NIX_AF_LFX_RX_VTAG_TYPE1,
+ NIX_AF_LFX_RX_VTAG_TYPE2,
+ NIX_AF_LFX_RX_VTAG_TYPE3,
+ NIX_AF_LFX_RX_VTAG_TYPE4,
+ NIX_AF_LFX_RX_VTAG_TYPE5,
+ NIX_AF_LFX_RX_VTAG_TYPE6,
+ NIX_AF_LFX_RX_VTAG_TYPE7,
+};
+
+/* For NIX LF context alloc and init */
+struct nix_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u32 rq_cnt; /* No of receive queues */
+ u32 sq_cnt; /* No of send queues */
+ u32 cq_cnt; /* No of completion queues */
+ u8 xqe_sz;
+ u16 rss_sz;
+ u8 rss_grps;
+ u16 npa_func;
+ u16 sso_func;
+ u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
+ u64 way_mask;
+#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
+#define NIX_LF_LBK_BLK_SEL BIT_ULL(1)
+ u64 flags;
+};
+
+struct nix_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 sqb_size;
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u8 rx_chan_cnt; /* total number of RX channels */
+ u8 tx_chan_cnt; /* total number of TX channels */
+ u8 lso_tsov4_idx;
+ u8 lso_tsov6_idx;
+ u8 mac_addr[ETH_ALEN];
+ u8 lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */
+ u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
+ u16 cints; /* NIX_AF_CONST2::CINTS */
+ u16 qints; /* NIX_AF_CONST2::QINTS */
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
+ u8 sdp_links; /* No. of SDP links present in HW */
+ u8 tx_link; /* Transmit channel link number */
+};
+
+struct nix_lf_free_req {
+ struct mbox_msghdr hdr;
+#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
+#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
+ u64 flags;
+};
+
+/* CN10K NIX AQ enqueue msg */
+struct nix_cn10k_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+ union {
+ struct nix_cn10k_rq_ctx_s rq_mask;
+ struct nix_cn10k_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
+ };
+};
+
+struct nix_cn10k_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+};
+
+/* NIX AQ enqueue msg */
+struct nix_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_rq_ctx_s rq;
+ struct nix_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+ union {
+ struct nix_rq_ctx_s rq_mask;
+ struct nix_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
+ };
+};
+
+struct nix_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_rq_ctx_s rq;
+ struct nix_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+};
+
+/* Tx scheduler/shaper mailbox messages */
+
+#define MAX_TXSCHQ_PER_FUNC 128
+
+struct nix_txsch_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count request at each level */
+ u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */
+ u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */
+};
+
+struct nix_txsch_alloc_rsp {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count allocated at each level */
+ u16 schq_contig[NIX_TXSCH_LVL_CNT];
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ /* Scheduler queue list allocated at each level */
+ u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u8 aggr_level; /* Traffic aggregation scheduler level */
+ u8 aggr_lvl_rr_prio; /* Aggregation lvl's RR_PRIO config */
+ u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
+};
+
+struct nix_txsch_free_req {
+ struct mbox_msghdr hdr;
+#define TXSCHQ_FREE_ALL BIT_ULL(0)
+ u16 flags;
+ /* Scheduler queue level to be freed */
+ u16 schq_lvl;
+ /* List of scheduler queues to be freed */
+ u16 schq;
+};
+
+struct nix_txschq_config {
+ struct mbox_msghdr hdr;
+ u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+ u8 read;
+#define TXSCHQ_IDX_SHIFT 16
+#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
+#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
+ u8 num_regs;
+#define MAX_REGS_PER_MBOX_MSG 20
+ u64 reg[MAX_REGS_PER_MBOX_MSG];
+ u64 regval[MAX_REGS_PER_MBOX_MSG];
+ /* All 0's => overwrite with new value */
+ u64 regval_mask[MAX_REGS_PER_MBOX_MSG];
+};
+
+struct nix_vtag_config {
+ struct mbox_msghdr hdr;
+ /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
+ u8 vtag_size;
+ /* cfg_type is '0' for tx vlan cfg
+ * cfg_type is '1' for rx vlan cfg
+ */
+ u8 cfg_type;
+ union {
+ /* valid when cfg_type is '0' */
+ struct {
+ u64 vtag0;
+ u64 vtag1;
+
+ /* cfg_vtag0 & cfg_vtag1 fields are valid
+ * when free_vtag0 & free_vtag1 are '0's.
+ */
+ /* cfg_vtag0 = 1 to configure vtag0 */
+ u8 cfg_vtag0 :1;
+ /* cfg_vtag1 = 1 to configure vtag1 */
+ u8 cfg_vtag1 :1;
+
+ /* vtag0_idx & vtag1_idx are only valid when
+ * both cfg_vtag0 & cfg_vtag1 are '0's,
+ * these fields are used along with free_vtag0
+ * & free_vtag1 to free the nix lf's tx_vlan
+ * configuration.
+ *
+ * Denotes the indices of tx_vtag def registers
+ * that needs to be cleared and freed.
+ */
+ int vtag0_idx;
+ int vtag1_idx;
+
+ /* free_vtag0 & free_vtag1 fields are valid
+ * when cfg_vtag0 & cfg_vtag1 are '0's.
+ */
+ /* free_vtag0 = 1 clears vtag0 configuration
+ * vtag0_idx denotes the index to be cleared.
+ */
+ u8 free_vtag0 :1;
+ /* free_vtag1 = 1 clears vtag1 configuration
+ * vtag1_idx denotes the index to be cleared.
+ */
+ u8 free_vtag1 :1;
+ } tx;
+
+ /* valid when cfg_type is '1' */
+ struct {
+ /* rx vtag type index, valid values are in 0..7 range */
+ u8 vtag_type;
+ /* rx vtag strip */
+ u8 strip_vtag :1;
+ /* rx vtag capture */
+ u8 capture_vtag :1;
+ } rx;
+ };
+};
+
+struct nix_vtag_config_rsp {
+ struct mbox_msghdr hdr;
+ int vtag0_idx;
+ int vtag1_idx;
+ /* Indices of tx_vtag def registers used to configure
+ * tx vtag0 & vtag1 headers, these indices are valid
+ * when nix_vtag_config mbox requested for vtag0 and/
+ * or vtag1 configuration.
+ */
+};
+
+#define NIX_FLOW_KEY_TYPE_L3_L4_MASK (~(0xf << 28))
+
+struct nix_rss_flowkey_cfg {
+ struct mbox_msghdr hdr;
+ int mcam_index; /* MCAM entry index to modify */
+#define NIX_FLOW_KEY_TYPE_PORT BIT(0)
+#define NIX_FLOW_KEY_TYPE_IPV4 BIT(1)
+#define NIX_FLOW_KEY_TYPE_IPV6 BIT(2)
+#define NIX_FLOW_KEY_TYPE_TCP BIT(3)
+#define NIX_FLOW_KEY_TYPE_UDP BIT(4)
+#define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
+#define NIX_FLOW_KEY_TYPE_NVGRE BIT(6)
+#define NIX_FLOW_KEY_TYPE_VXLAN BIT(7)
+#define NIX_FLOW_KEY_TYPE_GENEVE BIT(8)
+#define NIX_FLOW_KEY_TYPE_ETH_DMAC BIT(9)
+#define NIX_FLOW_KEY_TYPE_IPV6_EXT BIT(10)
+#define NIX_FLOW_KEY_TYPE_GTPU BIT(11)
+#define NIX_FLOW_KEY_TYPE_INNR_IPV4 BIT(12)
+#define NIX_FLOW_KEY_TYPE_INNR_IPV6 BIT(13)
+#define NIX_FLOW_KEY_TYPE_INNR_TCP BIT(14)
+#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
+#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
+#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
+#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
+#define NIX_FLOW_KEY_TYPE_AH BIT(22)
+#define NIX_FLOW_KEY_TYPE_ESP BIT(23)
+#define NIX_FLOW_KEY_TYPE_L4_DST_ONLY BIT(28)
+#define NIX_FLOW_KEY_TYPE_L4_SRC_ONLY BIT(29)
+#define NIX_FLOW_KEY_TYPE_L3_DST_ONLY BIT(30)
+#define NIX_FLOW_KEY_TYPE_L3_SRC_ONLY BIT(31)
+ u32 flowkey_cfg; /* Flowkey types selected */
+ u8 group; /* RSS context or group */
+};
+
+struct nix_rss_flowkey_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 alg_idx; /* Selected algo index */
+};
+
+struct nix_set_mac_addr {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
+};
+
+struct nix_get_mac_addr_rsp {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct nix_mark_format_cfg {
+ struct mbox_msghdr hdr;
+ u8 offset;
+ u8 y_mask;
+ u8 y_val;
+ u8 r_mask;
+ u8 r_val;
+};
+
+struct nix_mark_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 mark_format_idx;
+};
+
+struct nix_rx_mode {
+ struct mbox_msghdr hdr;
+#define NIX_RX_MODE_UCAST BIT(0)
+#define NIX_RX_MODE_PROMISC BIT(1)
+#define NIX_RX_MODE_ALLMULTI BIT(2)
+#define NIX_RX_MODE_USE_MCE BIT(3)
+ u16 mode;
+};
+
+struct nix_rx_cfg {
+ struct mbox_msghdr hdr;
+#define NIX_RX_OL3_VERIFY BIT(0)
+#define NIX_RX_OL4_VERIFY BIT(1)
+#define NIX_RX_DROP_RE BIT(2)
+ u8 len_verify; /* Outer L3/L4 len check */
+#define NIX_RX_CSUM_OL4_VERIFY BIT(0)
+ u8 csum_verify; /* Outer L4 checksum verification */
+};
+
+struct nix_frs_cfg {
+ struct mbox_msghdr hdr;
+ u8 update_smq; /* Update SMQ's min/max lens */
+ u8 update_minlen; /* Set minlen also */
+ u8 sdp_link; /* Set SDP RX link */
+ u16 maxlen;
+ u16 minlen;
+};
+
+struct nix_lso_format_cfg {
+ struct mbox_msghdr hdr;
+ u64 field_mask;
+#define NIX_LSO_FIELD_MAX 8
+ u64 fields[NIX_LSO_FIELD_MAX];
+};
+
+struct nix_lso_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 lso_format_idx;
+};
+
+struct nix_bp_cfg_req {
+ struct mbox_msghdr hdr;
+ u16 chan_base; /* Starting channel number */
+ u8 chan_cnt; /* Number of channels */
+ u8 bpid_per_chan;
+ /* bpid_per_chan = 0 assigns single bp id for range of channels */
+ /* bpid_per_chan = 1 assigns separate bp id for each channel */
+};
+
+/* PF can be mapped to either CGX or LBK interface,
+ * so maximum 64 channels are possible.
+ */
+#define NIX_MAX_BPID_CHAN 64
+struct nix_bp_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
+ u8 chan_cnt; /* Number of channel for which bpids are assigned */
+};
+
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+ struct mbox_msghdr hdr;
+ u32 cpt_credit;
+ struct {
+ u8 egrp;
+ u16 opcode;
+ u16 param1;
+ u16 param2;
+ } gen_cfg;
+ struct {
+ u16 cpt_pf_func;
+ u8 cpt_slot;
+ } inst_qsel;
+ u8 enable;
+ u16 bpid;
+ u32 credit_th;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+ struct mbox_msghdr hdr;
+ u64 sa_base_addr;
+ struct {
+ u32 tag_const;
+ u16 lenm1_max;
+ u8 sa_pow2_size;
+ u8 tt;
+ } ipsec_cfg0;
+ struct {
+ u32 sa_idx_max;
+ u8 sa_idx_w;
+ } ipsec_cfg1;
+ u8 enable;
+};
+
+struct nix_hw_info {
+ struct mbox_msghdr hdr;
+ u16 rsvs16;
+ u16 max_mtu;
+ u16 min_mtu;
+ u32 rpm_dwrr_mtu;
+ u32 sdp_dwrr_mtu;
+ u32 lbk_dwrr_mtu;
+ u32 rsvd32[1];
+ u64 rsvd[15]; /* Add reserved fields for future expansion */
+};
+
+struct nix_bandprof_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Count of profiles needed per layer */
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+};
+
+struct nix_bandprof_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+
+ /* There is no need to allocate morethan 1 bandwidth profile
+ * per RQ of a PF_FUNC's NIXLF. So limit the maximum
+ * profiles to 64 per PF_FUNC.
+ */
+#define MAX_BANDPROF_PER_PFFUNC 64
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_free_req {
+ struct mbox_msghdr hdr;
+ u8 free_all;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_get_hwinfo_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u32 policer_timeunit;
+};
+
+/* NPC mbox message structs */
+
+#define NPC_MCAM_ENTRY_INVALID 0xFFFF
+#define NPC_MCAM_INVALID_MAP 0xFFFF
+
+/* NPC mailbox error codes
+ * Range 701 - 800.
+ */
+enum npc_af_status {
+ NPC_MCAM_INVALID_REQ = -701,
+ NPC_MCAM_ALLOC_DENIED = -702,
+ NPC_MCAM_ALLOC_FAILED = -703,
+ NPC_MCAM_PERM_DENIED = -704,
+ NPC_FLOW_INTF_INVALID = -707,
+ NPC_FLOW_CHAN_INVALID = -708,
+ NPC_FLOW_NO_NIXLF = -709,
+ NPC_FLOW_NOT_SUPPORTED = -710,
+ NPC_FLOW_VF_PERM_DENIED = -711,
+ NPC_FLOW_VF_NOT_INIT = -712,
+ NPC_FLOW_VF_OVERLAP = -713,
+};
+
+struct npc_mcam_alloc_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MAX_NONCONTIG_ENTRIES 256
+ u8 contig; /* Contiguous entries ? */
+#define NPC_MCAM_ANY_PRIO 0
+#define NPC_MCAM_LOWER_PRIO 1
+#define NPC_MCAM_HIGHER_PRIO 2
+ u8 priority; /* Lower or higher w.r.t ref_entry */
+ u16 ref_entry;
+ u16 count; /* Number of entries requested */
+};
+
+struct npc_mcam_alloc_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 entry; /* Entry allocated or start index if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ u16 count; /* Number of entries allocated */
+ u16 free_count; /* Number of entries available */
+ u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+};
+
+struct npc_mcam_free_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* Entry index to be freed */
+ u8 all; /* If all entries allocated to this PFVF to be freed */
+};
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
+ u64 kw[NPC_MAX_KWS_IN_KEY];
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ u64 action;
+ u64 vtag_action;
+};
+
+struct npc_mcam_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u16 entry; /* MCAM entry to write this match key */
+ u16 cntr; /* Counter for this MCAM entry */
+ u8 intf; /* Rx or Tx interface */
+ u8 enable_entry;/* Enable this MCAM entry ? */
+ u8 set_cntr; /* Set counter for this entry ? */
+};
+
+/* Enable/Disable a given entry */
+struct npc_mcam_ena_dis_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+};
+
+struct npc_mcam_shift_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MCAM_MAX_SHIFTS 64
+ u16 curr_entry[NPC_MCAM_MAX_SHIFTS];
+ u16 new_entry[NPC_MCAM_MAX_SHIFTS];
+ u16 shift_count; /* Number of entries to shift */
+};
+
+struct npc_mcam_shift_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */
+};
+
+struct npc_mcam_alloc_counter_req {
+ struct mbox_msghdr hdr;
+ u8 contig; /* Contiguous counters ? */
+#define NPC_MAX_NONCONTIG_COUNTERS 64
+ u16 count; /* Number of counters requested */
+};
+
+struct npc_mcam_alloc_counter_rsp {
+ struct mbox_msghdr hdr;
+ u16 cntr; /* Counter allocated or start index if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ u16 count; /* Number of counters allocated */
+ u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
+};
+
+struct npc_mcam_oper_counter_req {
+ struct mbox_msghdr hdr;
+ u16 cntr; /* Free a counter or clear/fetch it's stats */
+};
+
+struct npc_mcam_oper_counter_rsp {
+ struct mbox_msghdr hdr;
+ u64 stat; /* valid only while fetching counter's stats */
+};
+
+struct npc_mcam_unmap_counter_req {
+ struct mbox_msghdr hdr;
+ u16 cntr;
+ u16 entry; /* Entry and counter to be unmapped */
+ u8 all; /* Unmap all entries using this counter ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u16 ref_entry;
+ u8 priority; /* Lower or higher w.r.t ref_entry */
+ u8 intf; /* Rx or Tx interface */
+ u8 enable_entry;/* Enable this MCAM entry ? */
+ u8 alloc_cntr; /* Allocate counter and map ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 cntr;
+};
+
+struct npc_get_kex_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u64 rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */
+ u64 tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */
+#define NPC_MAX_INTF 2
+#define NPC_MAX_LID 8
+#define NPC_MAX_LT 16
+#define NPC_MAX_LD 2
+#define NPC_MAX_LFL 16
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ u64 kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+#define MKEX_NAME_LEN 128
+ u8 mkex_pfl_name[MKEX_NAME_LEN];
+};
+
+struct ptp_get_cap_rsp {
+ struct mbox_msghdr hdr;
+#define PTP_CAP_HW_ATOMIC_UPDATE BIT_ULL(0)
+ u64 cap;
+};
+
+struct flow_msg {
+ unsigned char dmac[6];
+ unsigned char smac[6];
+ __be16 etype;
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ union {
+ __be32 ip4src;
+ __be32 ip6src[4];
+ };
+ union {
+ __be32 ip4dst;
+ __be32 ip6dst[4];
+ };
+ union {
+ __be32 spi;
+ };
+
+ u8 tos;
+ u8 ip_ver;
+ u8 ip_proto;
+ u8 tc;
+ __be16 sport;
+ __be16 dport;
+ union {
+ u8 ip_flag;
+ u8 next_header;
+ };
+ __be16 vlan_itci;
+};
+
+struct npc_install_flow_req {
+ struct mbox_msghdr hdr;
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u64 features;
+ u16 entry;
+ u16 channel;
+ u16 chan_mask;
+ u8 intf;
+ u8 set_cntr; /* If counter is available set counter for this entry ? */
+ u8 default_rule;
+ u8 append; /* overwrite(0) or append(1) flow to default rule? */
+ u16 vf;
+ /* action */
+ u32 index;
+ u16 match_id;
+ u8 flow_key_alg;
+ u8 op;
+ /* vtag rx action */
+ u8 vtag0_type;
+ u8 vtag0_valid;
+ u8 vtag1_type;
+ u8 vtag1_valid;
+ /* vtag tx action */
+ u16 vtag0_def;
+ u8 vtag0_op;
+ u16 vtag1_def;
+ u8 vtag1_op;
+ /* old counter value */
+ u16 cntr_val;
+};
+
+struct npc_install_flow_rsp {
+ struct mbox_msghdr hdr;
+ int counter; /* negative if no counter else counter number */
+};
+
+struct npc_delete_flow_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 start;/*Disable range of entries */
+ u16 end;
+ u8 all; /* PF + VFs */
+};
+
+struct npc_delete_flow_rsp {
+ struct mbox_msghdr hdr;
+ u16 cntr_val;
+};
+
+struct npc_mcam_read_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* MCAM entry to read */
+};
+
+struct npc_mcam_read_entry_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u8 intf;
+ u8 enable;
+};
+
+struct npc_mcam_read_base_rule_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry;
+};
+
+struct npc_mcam_get_stats_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* mcam entry */
+};
+
+struct npc_mcam_get_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 stat; /* counter stats */
+ u8 stat_ena; /* enabled */
+};
+
+struct npc_get_field_hash_info_req {
+ struct mbox_msghdr hdr;
+ u8 intf;
+};
+
+struct npc_get_field_hash_info_rsp {
+ struct mbox_msghdr hdr;
+ u64 secret_key[3];
+#define NPC_MAX_HASH 2
+#define NPC_MAX_HASH_MASK 2
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
+ u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
+ u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+};
+
+enum ptp_op {
+ PTP_OP_ADJFINE = 0,
+ PTP_OP_GET_CLOCK = 1,
+ PTP_OP_GET_TSTMP = 2,
+ PTP_OP_SET_THRESH = 3,
+ PTP_OP_EXTTS_ON = 4,
+ PTP_OP_ADJTIME = 5,
+ PTP_OP_SET_CLOCK = 6,
+};
+
+struct ptp_req {
+ struct mbox_msghdr hdr;
+ u8 op;
+ s64 scaled_ppm;
+ u64 thresh;
+ int extts_on;
+ s64 delta;
+ u64 clk;
+};
+
+struct ptp_rsp {
+ struct mbox_msghdr hdr;
+ u64 clk;
+ u64 tsc;
+};
+
+struct npc_get_field_status_req {
+ struct mbox_msghdr hdr;
+ u8 intf;
+ u8 field;
+};
+
+struct npc_get_field_status_rsp {
+ struct mbox_msghdr hdr;
+ u8 enable;
+};
+
+struct set_vf_perm {
+ struct mbox_msghdr hdr;
+ u16 vf;
+#define RESET_VF_PERM BIT_ULL(0)
+#define VF_TRUSTED BIT_ULL(1)
+ u64 flags;
+};
+
+struct lmtst_tbl_setup_req {
+ struct mbox_msghdr hdr;
+ u64 dis_sched_early_comp :1;
+ u64 sch_ena :1;
+ u64 dis_line_pref :1;
+ u64 ssow_pf_func :13;
+ u16 base_pcifunc;
+ u8 use_local_lmt_region;
+ u64 lmt_iova;
+ u64 rsvd[4];
+};
+
+/* CPT mailbox error codes
+ * Range 901 - 1000.
+ */
+enum cpt_af_status {
+ CPT_AF_ERR_PARAM = -901,
+ CPT_AF_ERR_GRP_INVALID = -902,
+ CPT_AF_ERR_LF_INVALID = -903,
+ CPT_AF_ERR_ACCESS_DENIED = -904,
+ CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
+ CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
+ CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
+};
+
+/* CPT mbox message formats */
+struct cpt_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ u64 reg_offset;
+ u64 *ret_val;
+ u64 val;
+ u8 is_write;
+ int blkaddr;
+};
+
+struct cpt_lf_alloc_req_msg {
+ struct mbox_msghdr hdr;
+ u16 nix_pf_func;
+ u16 sso_pf_func;
+ u16 eng_grpmsk;
+ int blkaddr;
+ u8 ctx_ilen_valid : 1;
+ u8 ctx_ilen : 7;
+};
+
+#define CPT_INLINE_INBOUND 0
+#define CPT_INLINE_OUTBOUND 1
+
+/* Mailbox message request format for CPT IPsec
+ * inline inbound and outbound configuration.
+ */
+struct cpt_inline_ipsec_cfg_msg {
+ struct mbox_msghdr hdr;
+ u8 enable;
+ u8 slot;
+ u8 dir;
+ u8 sso_pf_func_ovrd;
+ u16 sso_pf_func; /* inbound path SSO_PF_FUNC */
+ u16 nix_pf_func; /* outbound path NIX_PF_FUNC */
+};
+
+/* Mailbox message request and response format for CPT stats. */
+struct cpt_sts_req {
+ struct mbox_msghdr hdr;
+ u8 blkaddr;
+};
+
+struct cpt_sts_rsp {
+ struct mbox_msghdr hdr;
+ u64 inst_req_pc;
+ u64 inst_lat_pc;
+ u64 rd_req_pc;
+ u64 rd_lat_pc;
+ u64 rd_uc_pc;
+ u64 active_cycles_pc;
+ u64 ctx_mis_pc;
+ u64 ctx_hit_pc;
+ u64 ctx_aop_pc;
+ u64 ctx_aop_lat_pc;
+ u64 ctx_ifetch_pc;
+ u64 ctx_ifetch_lat_pc;
+ u64 ctx_ffetch_pc;
+ u64 ctx_ffetch_lat_pc;
+ u64 ctx_wback_pc;
+ u64 ctx_wback_lat_pc;
+ u64 ctx_psh_pc;
+ u64 ctx_psh_lat_pc;
+ u64 ctx_err;
+ u64 ctx_enc_id;
+ u64 ctx_flush_timer;
+ u64 rxc_time;
+ u64 rxc_time_cfg;
+ u64 rxc_active_sts;
+ u64 rxc_zombie_sts;
+ u64 busy_sts_ae;
+ u64 free_sts_ae;
+ u64 busy_sts_se;
+ u64 free_sts_se;
+ u64 busy_sts_ie;
+ u64 free_sts_ie;
+ u64 exe_err_info;
+ u64 cptclk_cnt;
+ u64 diag;
+ u64 rxc_dfrg;
+ u64 x2p_link_cfg0;
+ u64 x2p_link_cfg1;
+};
+
+/* Mailbox message request format to configure reassembly timeout. */
+struct cpt_rxc_time_cfg_req {
+ struct mbox_msghdr hdr;
+ int blkaddr;
+ u32 step;
+ u16 zombie_thres;
+ u16 zombie_limit;
+ u16 active_thres;
+ u16 active_limit;
+};
+
+/* Mailbox message request format to request for CPT_INST_S lmtst. */
+struct cpt_inst_lmtst_req {
+ struct mbox_msghdr hdr;
+ u64 inst[8];
+ u64 rsvd;
+};
+
+/* Mailbox message format to request for CPT LF reset */
+struct cpt_lf_rst_req {
+ struct mbox_msghdr hdr;
+ u32 slot;
+ u32 rsvd;
+};
+
+/* Mailbox message format to request for CPT faulted engines */
+struct cpt_flt_eng_info_req {
+ struct mbox_msghdr hdr;
+ int blkaddr;
+ bool reset;
+ u32 rsvd;
+};
+
+struct cpt_flt_eng_info_rsp {
+ struct mbox_msghdr hdr;
+ u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
+ u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
+ u64 rsvd;
+};
+
+struct sdp_node_info {
+ /* Node to which this PF belons to */
+ u8 node_id;
+ u8 max_vfs;
+ u8 num_pf_rings;
+ u8 pf_srn;
+#define SDP_MAX_VFS 128
+ u8 vf_rings[SDP_MAX_VFS];
+};
+
+struct sdp_chan_info_msg {
+ struct mbox_msghdr hdr;
+ struct sdp_node_info info;
+};
+
+struct sdp_get_chan_info_msg {
+ struct mbox_msghdr hdr;
+ u16 chan_base;
+ u16 num_chan;
+};
+
+/* CGX mailbox error codes
+ * Range 1101 - 1200.
+ */
+enum cgx_af_status {
+ LMAC_AF_ERR_INVALID_PARAM = -1101,
+ LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
+ LMAC_AF_ERR_PERM_DENIED = -1103,
+ LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
+ LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
+ LMAC_AF_ERR_CMD_TIMEOUT = -1106,
+ LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED = -1107,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED = -1108,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED = -1109,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110,
+};
+
+enum mcs_direction {
+ MCS_RX,
+ MCS_TX,
+};
+
+enum mcs_rsrc_type {
+ MCS_RSRC_TYPE_FLOWID,
+ MCS_RSRC_TYPE_SECY,
+ MCS_RSRC_TYPE_SC,
+ MCS_RSRC_TYPE_SA,
+};
+
+struct mcs_alloc_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* Resources count */
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u8 all; /* Allocate all resource type one each */
+ u64 rsvd;
+};
+
+struct mcs_alloc_rsrc_rsp {
+ struct mbox_msghdr hdr;
+ u8 flow_ids[128]; /* Index of reserved entries */
+ u8 secy_ids[128];
+ u8 sc_ids[128];
+ u8 sa_ids[256];
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* No of entries reserved */
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u8 rsvd[256]; /* reserved fields for future expansion */
+};
+
+struct mcs_free_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_id; /* Index of the entry to be freed */
+ u8 rsrc_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* Free all the cam resources */
+ u64 rsvd;
+};
+
+struct mcs_flowid_entry_write_req {
+ struct mbox_msghdr hdr;
+ u64 data[4];
+ u64 mask[4];
+ u64 sci; /* CNF10K-B for tx_secy_mem_map */
+ u8 flow_id;
+ u8 secy_id; /* secyid for which flowid is mapped */
+ u8 sc_id; /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
+ u8 ena; /* Enable tcam entry */
+ u8 ctrl_pkt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_secy_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy;
+ u8 secy_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* RX SC_CAM mapping */
+struct mcs_rx_sc_cam_write_req {
+ struct mbox_msghdr hdr;
+ u64 sci; /* SCI */
+ u64 secy_id; /* secy index mapped to SC */
+ u8 sc_id; /* SC CAM entry index */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_sa_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy[2][9]; /* Support 2 SA policy */
+ u8 sa_index[2];
+ u8 sa_cnt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_tx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index0;
+ u8 sa_index1;
+ u8 rekey_ena;
+ u8 sa_index0_vld;
+ u8 sa_index1_vld;
+ u8 tx_sa_active;
+ u64 sectag_sci;
+ u8 sc_id; /* used as index for SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_rx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index;
+ u8 sa_in_use;
+ u8 sc_id;
+ u8 an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_flowid_ena_dis_entry {
+ struct mbox_msghdr hdr;
+ u8 flow_id;
+ u8 ena;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_pn_table_write_req {
+ struct mbox_msghdr hdr;
+ u64 next_pn;
+ u8 pn_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_hw_info {
+ struct mbox_msghdr hdr;
+ u8 num_mcs_blks; /* Number of MCS blocks */
+ u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
+ u8 secy_entries; /* RX/TX SECY entries per mcs block */
+ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
+ u16 sa_entries; /* PN table entries = SA entries */
+ u64 rsvd[16];
+};
+
+struct mcs_set_active_lmac {
+ struct mbox_msghdr hdr;
+ u32 lmac_bmap; /* bitmap of active lmac per mcs block */
+ u8 mcs_id;
+ u16 chan_base; /* MCS channel base */
+ u64 rsvd;
+};
+
+struct mcs_set_lmac_mode {
+ struct mbox_msghdr hdr;
+ u8 mode; /* 1:Bypass 0:Operational */
+ u8 lmac_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_reset_req {
+ struct mbox_msghdr hdr;
+ u8 reset;
+ u8 mcs_id;
+ u8 port_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_set_req {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u16 cstm_etype[8];
+ u8 cstm_indx[8];
+ u8 cstm_etype_en;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* MCS mailbox error codes
+ * Range 1201 - 1300.
+ */
+enum mcs_af_status {
+ MCS_AF_ERR_INVALID_MCSID = -1201,
+ MCS_AF_ERR_NOT_MAPPED = -1202,
+};
+
+struct mcs_set_pn_threshold {
+ struct mbox_msghdr hdr;
+ u64 threshold;
+ u8 xpn; /* '1' for setting xpn threshold */
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+enum mcs_ctrl_pkt_rulew_type {
+ MCS_CTRL_PKT_RULE_TYPE_ETH,
+ MCS_CTRL_PKT_RULE_TYPE_DA,
+ MCS_CTRL_PKT_RULE_TYPE_RANGE,
+ MCS_CTRL_PKT_RULE_TYPE_COMBO,
+ MCS_CTRL_PKT_RULE_TYPE_MAC,
+};
+
+struct mcs_alloc_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_type;
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u64 rsvd;
+};
+
+struct mcs_alloc_ctrl_pkt_rule_rsp {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_free_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u64 rsvd;
+};
+
+struct mcs_ctrl_pkt_rule_write_req {
+ struct mbox_msghdr hdr;
+ u64 data0;
+ u64 data1;
+ u64 data2;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_stats_req {
+ struct mbox_msghdr hdr;
+ u8 id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_flowid_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_hit_cnt;
+ u64 rsvd;
+};
+
+struct mcs_secy_stats {
+ struct mbox_msghdr hdr;
+ u64 ctl_pkt_bcast_cnt;
+ u64 ctl_pkt_mcast_cnt;
+ u64 ctl_pkt_ucast_cnt;
+ u64 ctl_octet_cnt;
+ u64 unctl_pkt_bcast_cnt;
+ u64 unctl_pkt_mcast_cnt;
+ u64 unctl_pkt_ucast_cnt;
+ u64 unctl_octet_cnt;
+ /* Valid only for RX */
+ u64 octet_decrypted_cnt;
+ u64 octet_validated_cnt;
+ u64 pkt_port_disabled_cnt;
+ u64 pkt_badtag_cnt;
+ u64 pkt_nosa_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_tagged_ctl_cnt;
+ u64 pkt_untaged_cnt;
+ u64 pkt_ctl_cnt; /* CN10K-B */
+ u64 pkt_notag_cnt; /* CNF10K-B */
+ /* Valid only for TX */
+ u64 octet_encrypted_cnt;
+ u64 octet_protected_cnt;
+ u64 pkt_noactivesa_cnt;
+ u64 pkt_toolong_cnt;
+ u64 pkt_untagged_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_port_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_miss_cnt;
+ u64 parser_err_cnt;
+ u64 preempt_err_cnt; /* CNF10K-B */
+ u64 sectag_insert_err_cnt;
+ u64 rsvd[4];
+};
+
+/* Only for CN10K-B */
+struct mcs_sa_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 pkt_invalid_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_ok_cnt;
+ u64 pkt_nosa_cnt;
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_sc_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 hit_cnt;
+ u64 pkt_invalid_cnt;
+ u64 pkt_late_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_unchecked_cnt;
+ u64 pkt_delay_cnt; /* CNF10K-B */
+ u64 pkt_ok_cnt; /* CNF10K-B */
+ u64 octet_decrypt_cnt; /* CN10K-B */
+ u64 octet_validate_cnt; /* CN10K-B */
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 octet_encrypt_cnt; /* CN10K-B */
+ u64 octet_protected_cnt; /* CN10K-B */
+ u64 rsvd[4];
+};
+
+struct mcs_clear_stats {
+ struct mbox_msghdr hdr;
+#define MCS_FLOWID_STATS 0
+#define MCS_SECY_STATS 1
+#define MCS_SC_STATS 2
+#define MCS_SA_STATS 3
+#define MCS_PORT_STATS 4
+ u8 type; /* FLOWID, SECY, SC, SA, PORT */
+ u8 id; /* type = PORT, If id = FF(invalid) port no is derived from pcifunc */
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* All resources stats mapped to PF are cleared */
+};
+
+struct mcs_intr_cfg {
+ struct mbox_msghdr hdr;
+#define MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0)
+#define MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1)
+#define MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2)
+#define MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3)
+#define MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4)
+#define MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5)
+#define MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6)
+#define MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7)
+#define MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8)
+#define MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9)
+#define MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10)
+#define MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11)
+#define MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12)
+#define MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13)
+#define MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14)
+#define MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15)
+ u64 intr_mask; /* Interrupt enable mask */
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
+struct mcs_intr_info {
+ struct mbox_msghdr hdr;
+ u64 intr_mask;
+ int sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
+#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
new file mode 100644
index 000000000..c1775bd01
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -0,0 +1,1617 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+#define DRV_NAME "Marvell MCS Driver"
+
+#define PCI_CFG_REG_BAR_NUM 0
+
+static const struct pci_device_id mcs_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
+ { 0, } /* end of table */
+};
+
+static LIST_HEAD(mcs_list);
+
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
+ stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
+ stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
+ stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
+ stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
+ stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
+ stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
+ stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
+ stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
+ stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
+ stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
+ else
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
+
+ stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
+ stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
+ stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
+ stats->hit_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
+ stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id);
+ stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
+ stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
+ stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
+ stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
+ stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+ }
+}
+
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
+{
+ struct mcs_flowid_stats flowid_st;
+ struct mcs_port_stats port_st;
+ struct mcs_secy_stats secy_st;
+ struct mcs_sc_stats sc_st;
+ struct mcs_sa_stats sa_st;
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_SLAVE_CTRL;
+ else
+ reg = MCSX_CSE_TX_SLAVE_CTRL;
+
+ mcs_reg_write(mcs, reg, BIT_ULL(0));
+
+ switch (type) {
+ case MCS_FLOWID_STATS:
+ mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
+ break;
+ case MCS_SECY_STATS:
+ if (dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, &secy_st, id);
+ else
+ mcs_get_tx_secy_stats(mcs, &secy_st, id);
+ break;
+ case MCS_SC_STATS:
+ mcs_get_sc_stats(mcs, &sc_st, id, dir);
+ break;
+ case MCS_SA_STATS:
+ mcs_get_sa_stats(mcs, &sa_st, id, dir);
+ break;
+ case MCS_PORT_STATS:
+ mcs_get_port_stats(mcs, &port_st, id, dir);
+ break;
+ }
+
+ mcs_reg_write(mcs, reg, 0x0);
+}
+
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear FLOWID stats */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
+ }
+
+ /* Clear SECY stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
+ }
+
+ /* Clear SC stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
+ }
+
+ /* Clear SA stats */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
+ }
+ return 0;
+}
+
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ mcs_reg_write(mcs, reg, next_pn);
+}
+
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0xFF) |
+ (map->sa_index1 & 0xFF) << 9 |
+ (map->rekey_ena & 0x1) << 18 |
+ (map->sa_index0_vld & 0x1) << 19 |
+ (map->sa_index1_vld & 0x1) << 20 |
+ (map->tx_sa_active & 0x1) << 21 |
+ map->sectag_sci << 22;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ val = map->sectag_sci >> 42;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 8; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 9; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ }
+}
+
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
+{
+ u64 reg, val;
+
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
+ if (sc_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
+
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
+{
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
+ /* Enable SC CAM */
+ mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
+}
+
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
+
+ mcs_reg_write(mcs, reg, plcy);
+
+ if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
+}
+
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ val |= (map->sc & 0x7F) << 9;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
+{
+ u64 reg, val;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
+ }
+
+ /* Enable/Disable the tcam entry */
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ }
+}
+
+int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+{
+ int flow_id, secy_id, reg_id;
+ struct secy_mem_map map;
+ u64 reg, plcy = 0;
+
+ /* Flow entry */
+ flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ __set_bit(flow_id, mcs->rx.flow_ids.bmap);
+ __set_bit(flow_id, mcs->tx.flow_ids.bmap);
+
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ /* secy */
+ secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ __set_bit(secy_id, mcs->rx.secy.bmap);
+ __set_bit(secy_id, mcs->tx.secy.bmap);
+
+ /* Set validate frames to NULL and enable control port */
+ plcy = 0x7ull;
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | 0x3ull << 4;
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
+
+ /* Enable control port and set mtu to max */
+ plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
+
+ /* Map flowid to secy */
+ map.secy = secy_id;
+ map.ctrl_pkt = 0;
+ map.flow_id = flow_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
+ map.sc = secy_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
+
+ /* Enable Flowid entry */
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
+
+ return 0;
+}
+
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int flow_id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear secy memory to zero */
+ mcs_secy_plcy_write(mcs, 0, secy_id, dir);
+
+ /* Disable the tcam entry using this secy */
+ for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
+ if (map->flowid2secy_map[flow_id] != secy_id)
+ continue;
+ mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
+ }
+}
+
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
+{
+ int rsrc_id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
+ if (rsrc_id >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, rsrc_id, 1);
+ pf_map[rsrc_id] = pcifunc;
+
+ return rsrc_id;
+}
+
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ u64 dis, reg;
+ int id, rc;
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ if (req->all) {
+ for (id = 0; id < map->ctrlpktrule.max; id++) {
+ if (map->ctrlpktrule2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(id);
+ mcs_reg_write(mcs, reg, dis);
+ }
+ return 0;
+ }
+
+ rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, dis);
+
+ return rc;
+}
+
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
+{
+ u64 reg, enb;
+ u64 idx;
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ req->data0 &= GENMASK(15, 0);
+ if (req->data0 != ETH_P_PAE)
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0);
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ req->data2 &= GENMASK(15, 0);
+ if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
+ !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
+ MCSX_PEX_TX_SLAVE_RULE_MAC;
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ }
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+
+ enb = mcs_reg_read(mcs, reg);
+ enb |= BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, enb);
+
+ return 0;
+}
+
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
+{
+ /* Check if the rsrc_id is mapped to PF/VF */
+ if (pf_map[rsrc_id] != pcifunc)
+ return -EINVAL;
+
+ rvu_free_rsrc(rsrc, rsrc_id);
+ pf_map[rsrc_id] = 0;
+ return 0;
+}
+
+/* Free all the cam resources mapped to pf */
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* free tcam entries */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
+ id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, id, dir, false);
+ }
+
+ /* free secy entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->secy, map->secy2pf_map,
+ id, pcifunc);
+ mcs_clear_secy_plcy(mcs, id, dir);
+ }
+
+ /* free sc entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
+
+ /* Disable SC CAM only on RX side */
+ if (dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, id, false);
+ }
+
+ /* free sa entries */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
+ }
+ return 0;
+}
+
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
+{
+ int rsrc_id;
+
+ rsrc_id = rvu_alloc_rsrc(rsrc);
+ if (rsrc_id < 0)
+ return -ENOMEM;
+ pf_map[rsrc_id] = pcifunc;
+ return rsrc_id;
+}
+
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *flow_id = id;
+
+ id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *secy_id = id;
+
+ id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sc_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa1_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa2_id = id;
+
+ return 0;
+}
+
+static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 9) & 0xFF;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val, status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ /* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ /* Auto rekey is enable */
+ if (!((val >> 18) & 0x1))
+ continue;
+
+ status = (val >> 21) & 0x1;
+
+ /* Check if tx_sa_active status had changed */
+ if (status == mcs->tx_sa_active[sc])
+ continue;
+ /* SA_index0 is expired */
+ if (status)
+ event.sa_id = val & 0xFF;
+ else
+ event.sa_id = (val >> 9) & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ int sa, reg;
+ u64 intr;
+
+ /* Check expired SAs */
+ for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
+ /* Bit high in *PN_THRESH_REACHEDX implies
+ * corresponding SAs are expired.
+ */
+ intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
+ for (sa = 0; sa < 64; sa++) {
+ if (!(intr & BIT_ULL(sa)))
+ continue;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
+ event.sa_id = sa + (reg * 64);
+ event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+ }
+}
+
+static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
+ event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SL_GTE48)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
+ if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
+ event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ u64 val, reg;
+ int lmac;
+
+ if (!(intr & 0x6ULL))
+ return;
+
+ if (intr & BIT_ULL(1))
+ reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 :
+ MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0;
+ else
+ reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 :
+ MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0;
+ val = mcs_reg_read(mcs, reg);
+
+ /* policy/data over flow occurred */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ if (!(val & BIT_ULL(lmac)))
+ continue;
+ dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
+ }
+}
+
+void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ int lmac;
+
+ if (!(intr & 0xFFFFFULL))
+ return;
+
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ if (intr & BIT_ULL(lmac))
+ dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
+ }
+}
+
+static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+{
+ struct mcs *mcs = (struct mcs *)mcs_irq;
+ u64 intr, cpm_intr, bbe_intr, pab_intr;
+
+ /* Disable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
+
+ /* Check which block has interrupt*/
+ intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
+
+ /* CPM RX */
+ if (intr & MCS_CPM_RX_INT_ENA) {
+ /* Check for PN thresh interrupt bit */
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
+
+ if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
+ mcs_rx_pn_thresh_reached_handler(mcs);
+
+ if (cpm_intr & MCS_CPM_RX_INT_ALL)
+ mcs_rx_misc_intr_handler(mcs, cpm_intr);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
+ }
+
+ /* CPM TX */
+ if (intr & MCS_CPM_TX_INT_ENA) {
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ }
+
+ if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
+ mcs_tx_misc_intr_handler(mcs, cpm_intr);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_wrapped_handler(mcs);
+ }
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
+ }
+
+ /* BBE RX */
+ if (intr & MCS_BBE_RX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
+ mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* BBE TX */
+ if (intr & MCS_BBE_TX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
+ mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* PAB RX */
+ if (intr & MCS_PAB_RX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
+ mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* PAB TX */
+ if (intr & MCS_PAB_TX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
+ mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* Clear and enable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ return IRQ_HANDLED;
+}
+
+static void *alloc_mem(struct mcs *mcs, int n)
+{
+ return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
+}
+
+static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
+{
+ struct hwinfo *hw = mcs->hw;
+ int err;
+
+ res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2pf_map)
+ return -ENOMEM;
+
+ res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
+ if (!res->secy2pf_map)
+ return -ENOMEM;
+
+ res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
+ if (!res->sc2pf_map)
+ return -ENOMEM;
+
+ res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
+ if (!res->sa2pf_map)
+ return -ENOMEM;
+
+ res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2secy_map)
+ return -ENOMEM;
+
+ res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
+ if (!res->ctrlpktrule2pf_map)
+ return -ENOMEM;
+
+ res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->flow_ids);
+ if (err)
+ return err;
+
+ res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->secy);
+ if (err)
+ return err;
+
+ res->sc.max = hw->sc_entries;
+ err = rvu_alloc_bitmap(&res->sc);
+ if (err)
+ return err;
+
+ res->sa.max = hw->sa_entries;
+ err = rvu_alloc_bitmap(&res->sa);
+ if (err)
+ return err;
+
+ res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
+ err = rvu_alloc_bitmap(&res->ctrlpktrule);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mcs_register_interrupts(struct mcs *mcs)
+{
+ int ret = 0;
+
+ mcs->num_vec = pci_msix_vec_count(mcs->pdev);
+
+ ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
+ mcs->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
+ mcs->num_vec, ret);
+ return ret;
+ }
+
+ ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
+ mcs_ip_intr_handler, 0, "MCS_IP", mcs);
+ if (ret) {
+ dev_err(mcs->dev, "MCS IP irq registration failed\n");
+ goto exit;
+ }
+
+ /* MCS enable IP interrupts */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ /* Enable CPM Rx/Tx interrupts */
+ mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
+ MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
+ MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
+ MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
+
+ mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ if (!mcs->tx_sa_active) {
+ ret = -ENOMEM;
+ goto free_irq;
+ }
+
+ return ret;
+
+free_irq:
+ free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
+exit:
+ pci_free_irq_vectors(mcs->pdev);
+ mcs->num_vec = 0;
+ return ret;
+}
+
+int mcs_get_blkcnt(void)
+{
+ struct mcs *mcs;
+ int idmax = -ENODEV;
+
+ /* Check MCS block is present in hardware */
+ if (!pci_dev_present(mcs_id_table))
+ return 0;
+
+ list_for_each_entry(mcs, &mcs_list, mcs_list)
+ if (mcs->mcs_id > idmax)
+ idmax = mcs->mcs_id;
+
+ if (idmax < 0)
+ return 0;
+
+ return idmax + 1;
+}
+
+struct mcs *mcs_get_pdata(int mcs_id)
+{
+ struct mcs *mcs_dev;
+
+ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+ if (mcs_dev->mcs_id == mcs_id)
+ return mcs_dev;
+ }
+ return NULL;
+}
+
+bool is_mcs_bypass(int mcs_id)
+{
+ struct mcs *mcs_dev;
+
+ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+ if (mcs_dev->mcs_id == mcs_id)
+ return mcs_dev->bypass;
+ }
+ return true;
+}
+
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
+{
+ u64 val = 0;
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
+ req->port_mode & MCS_PORT_MODE_MASK);
+
+ req->cstm_tag_rel_mode_sel &= 0x3;
+
+ if (mcs->hw->mcs_blks > 1) {
+ req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
+ val = (u32)req->fifo_skid << 0x10;
+ val |= req->fifo_skid;
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
+ req->cstm_tag_rel_mode_sel);
+ val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+
+ if (req->custom_hdr_enb)
+ val |= BIT_ULL(req->port_id);
+ else
+ val &= ~BIT_ULL(req->port_id);
+
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
+ } else {
+ val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
+ val |= (req->cstm_tag_rel_mode_sel << 2);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
+ }
+}
+
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ u64 reg = 0;
+
+ rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
+ MCS_PORT_MODE_MASK;
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
+ rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
+ if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
+ rsp->custom_hdr_enb = 1;
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
+ }
+
+ rsp->port_id = req->port_id;
+ rsp->mcs_id = req->mcs_id;
+}
+
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ u64 reg = 0, val = 0;
+ u8 idx;
+
+ for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
+ if (mcs->hw->mcs_blks > 1)
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
+ MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
+ else
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
+
+ val = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ rsp->cstm_etype[idx] = val & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
+ MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
+ } else {
+ rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
+ rsp->cstm_etype_en |= (val & 0x1) << idx;
+ }
+ }
+
+ rsp->mcs_id = req->mcs_id;
+ rsp->dir = req->dir;
+}
+
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
+{
+ u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
+
+ mcs_reg_write(mcs, reg, reset & 0x1);
+}
+
+/* Set lmac to bypass/operational mode */
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
+{
+ u64 reg;
+ int id = lmac_id * 2;
+
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id);
+ mcs_reg_write(mcs, reg, (u64)mode);
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1));
+ mcs_reg_write(mcs, reg, (u64)mode);
+}
+
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
+{
+ u64 reg;
+
+ if (pn->dir == MCS_RX)
+ reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
+ else
+ reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
+
+ mcs_reg_write(mcs, reg, pn->threshold);
+}
+
+void cn10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN CTag */
+ val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+}
+
+static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
+{
+ u64 reg;
+
+ /* Port mode 25GB */
+ reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0xe000e);
+ return;
+ }
+
+ reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+}
+
+int mcs_set_lmac_channels(int mcs_id, u16 base)
+{
+ struct mcs *mcs;
+ int lmac;
+ u64 cfg;
+
+ mcs = mcs_get_pdata(mcs_id);
+ if (!mcs)
+ return -ENODEV;
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
+ cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
+ mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
+ base += 16;
+ }
+ return 0;
+}
+
+static int mcs_x2p_calibration(struct mcs *mcs)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ int i, err = 0;
+ u64 val;
+
+ /* set X2P calibration */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ val |= BIT_ULL(5);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Wait for calibration to complete */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_before(jiffies, timeout)) {
+ usleep_range(80, 100);
+ continue;
+ } else {
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
+ return err;
+ }
+ }
+
+ val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
+ for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
+ if (val & BIT_ULL(1 + i))
+ continue;
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
+ }
+ /* Clear X2P calibrate */
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
+
+ return err;
+}
+
+static void mcs_set_external_bypass(struct mcs *mcs, bool bypass)
+{
+ u64 val;
+
+ /* Set MCS to external bypass */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ if (bypass)
+ val |= BIT_ULL(6);
+ else
+ val &= ~BIT_ULL(6);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+ mcs->bypass = bypass;
+}
+
+static void mcs_global_cfg(struct mcs *mcs)
+{
+ /* Disable external bypass */
+ mcs_set_external_bypass(mcs, false);
+
+ /* Reset TX/RX stats memory */
+ mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
+ mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
+
+ /* Set MCS to perform standard IEEE802.1AE macsec processing */
+ if (mcs->hw->mcs_blks == 1) {
+ mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
+ return;
+ }
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
+}
+
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 128; /* TCAM entries */
+ hw->secy_entries = 128; /* SecY entries */
+ hw->sc_entries = 128; /* SC CAM entries */
+ hw->sa_entries = 256; /* SA entries */
+ hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 5; /* x2p clabration intf */
+ hw->mcs_blks = 1; /* MCS blocks */
+ hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */
+}
+
+static struct mcs_ops cn10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cn10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
+ .mcs_bbe_intr_handler = cn10kb_mcs_bbe_intr_handler,
+ .mcs_pab_intr_handler = cn10kb_mcs_pab_intr_handler,
+};
+
+static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ int lmac, err = 0;
+ struct mcs *mcs;
+
+ mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
+ if (!mcs)
+ return -ENOMEM;
+
+ mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
+ if (!mcs->hw)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto exit;
+ }
+
+ mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!mcs->reg_base) {
+ dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pci_set_drvdata(pdev, mcs);
+ mcs->pdev = pdev;
+ mcs->dev = &pdev->dev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
+ mcs->mcs_ops = &cn10kb_mcs_ops;
+ else
+ mcs->mcs_ops = cnf10kb_get_mac_ops();
+
+ /* Set hardware capabilities */
+ mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
+
+ mcs_global_cfg(mcs);
+
+ /* Perform X2P clibration */
+ err = mcs_x2p_calibration(mcs);
+ if (err)
+ goto err_x2p;
+
+ mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & MCS_ID_MASK;
+
+ /* Set mcs tx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->tx);
+ if (err)
+ goto err_x2p;
+
+ /* Set mcs rx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->rx);
+ if (err)
+ goto err_x2p;
+
+ /* per port config */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_lmac_init(mcs, lmac);
+
+ /* Parser configuration */
+ mcs->mcs_ops->mcs_parser_cfg(mcs);
+
+ err = mcs_register_interrupts(mcs);
+ if (err)
+ goto exit;
+
+ list_add(&mcs->mcs_list, &mcs_list);
+ mutex_init(&mcs->stats_lock);
+
+ return 0;
+
+err_x2p:
+ /* Enable external bypass */
+ mcs_set_external_bypass(mcs, true);
+exit:
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void mcs_remove(struct pci_dev *pdev)
+{
+ struct mcs *mcs = pci_get_drvdata(pdev);
+
+ /* Set MCS to external bypass */
+ mcs_set_external_bypass(mcs, true);
+ free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver mcs_driver = {
+ .name = DRV_NAME,
+ .id_table = mcs_id_table,
+ .probe = mcs_probe,
+ .remove = mcs_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
new file mode 100644
index 000000000..f927cc61d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_H
+#define MCS_H
+
+#include <linux/bits.h>
+#include "rvu.h"
+
+#define PCI_DEVID_CN10K_MCS 0xA096
+
+#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
+#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
+
+#define MCS_ID_MASK 0x7
+#define MCS_MAX_PFS 128
+
+#define MCS_PORT_MODE_MASK 0x3
+#define MCS_PORT_FIFO_SKID_MASK 0x3F
+#define MCS_MAX_CUSTOM_TAGS 0x8
+
+#define MCS_CTRLPKT_ETYPE_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4
+#define MCS_CTRLPKT_COMBO_RULE_MAX 4
+#define MCS_CTRLPKT_MAC_RULE_MAX 1
+
+#define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \
+ MCS_CTRLPKT_DA_RULE_MAX + \
+ MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
+ MCS_CTRLPKT_COMBO_RULE_MAX + \
+ MCS_CTRLPKT_MAC_RULE_MAX)
+
+#define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0
+#define MCS_CTRLPKT_DA_RULE_OFFSET 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16
+#define MCS_CTRLPKT_COMBO_RULE_OFFSET 20
+#define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24
+
+/* Reserved resources for default bypass entry */
+#define MCS_RSRC_RSVD_CNT 1
+
+/* MCS Interrupt Vector */
+#define MCS_CNF10KB_INT_VEC_IP 0x13
+#define MCS_CN10KB_INT_VEC_IP 0x53
+
+#define MCS_MAX_BBE_INT 8ULL
+#define MCS_BBE_INT_MASK 0xFFULL
+
+#define MCS_MAX_PAB_INT 8ULL
+#define MCS_PAB_INT_MASK 0xFULL
+
+#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
+#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
+#define MCS_CPM_RX_INT_ENA BIT_ULL(2)
+#define MCS_CPM_TX_INT_ENA BIT_ULL(3)
+#define MCS_PAB_RX_INT_ENA BIT_ULL(4)
+#define MCS_PAB_TX_INT_ENA BIT_ULL(5)
+
+#define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0)
+#define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1)
+#define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2)
+
+#define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0)
+#define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1)
+#define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2)
+#define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3)
+#define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4)
+#define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5)
+#define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6)
+
+#define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \
+ MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \
+ MCS_CPM_RX_INT_SL_GTE48 | \
+ MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \
+ MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \
+ MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \
+ MCS_CPM_RX_INT_PN_THRESH_REACHED)
+
+struct mcs_pfvf {
+ u64 intr_mask; /* Enabled Interrupt mask */
+};
+
+struct mcs_intr_event {
+ u16 pcifunc;
+ u64 intr_mask;
+ u64 sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+};
+
+struct mcs_intrq_entry {
+ struct list_head node;
+ struct mcs_intr_event intr_event;
+};
+
+struct secy_mem_map {
+ u8 flow_id;
+ u8 secy;
+ u8 ctrl_pkt;
+ u8 sc;
+ u64 sci;
+};
+
+struct mcs_rsrc_map {
+ u16 *flowid2pf_map;
+ u16 *secy2pf_map;
+ u16 *sc2pf_map;
+ u16 *sa2pf_map;
+ u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
+ u16 *ctrlpktrule2pf_map;
+ struct rsrc_bmap flow_ids;
+ struct rsrc_bmap secy;
+ struct rsrc_bmap sc;
+ struct rsrc_bmap sa;
+ struct rsrc_bmap ctrlpktrule;
+};
+
+struct hwinfo {
+ u8 tcam_entries;
+ u8 secy_entries;
+ u8 sc_entries;
+ u16 sa_entries;
+ u8 mcs_x2p_intf;
+ u8 lmac_cnt;
+ u8 mcs_blks;
+ unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
+ u16 ip_vec;
+};
+
+struct mcs {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct hwinfo *hw;
+ struct mcs_rsrc_map tx;
+ struct mcs_rsrc_map rx;
+ u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
+ u8 mcs_id;
+ struct mcs_ops *mcs_ops;
+ struct list_head mcs_list;
+ /* Lock for mcs stats */
+ struct mutex stats_lock;
+ struct mcs_pfvf *pf;
+ struct mcs_pfvf *vf;
+ u16 num_vec;
+ void *rvu;
+ u16 *tx_sa_active;
+ bool bypass;
+};
+
+struct mcs_ops {
+ void (*mcs_set_hw_capabilities)(struct mcs *mcs);
+ void (*mcs_parser_cfg)(struct mcs *mcs);
+ void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+ void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
+ void (*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+ void (*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+};
+
+extern struct pci_driver mcs_driver;
+
+static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
+{
+ writeq(val, mcs->reg_base + offset);
+}
+
+static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
+{
+ return readq(mcs->reg_base + offset);
+}
+
+/* MCS APIs */
+struct mcs *mcs_get_pdata(int mcs_id);
+int mcs_get_blkcnt(void);
+int mcs_set_lmac_channels(int mcs_id, u16 base);
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
+void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
+void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
+int mcs_install_flowid_bypass_entry(struct mcs *mcs);
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp);
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp);
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
+bool is_mcs_bypass(int mcs_id);
+
+/* CN10K-B APIs */
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cn10kb_mcs_parser_cfg(struct mcs *mcs);
+void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+
+/* CNF10K-B APIs */
+struct mcs_ops *cnf10kb_get_mac_ops(void);
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
+void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+
+/* Stats APIs */
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
+int mcs_set_force_clk_en(struct mcs *mcs, bool set);
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
+
+#endif /* MCS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
new file mode 100644
index 000000000..9f9b904ab
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+static struct mcs_ops cnf10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
+ .mcs_bbe_intr_handler = cnf10kb_mcs_bbe_intr_handler,
+ .mcs_pab_intr_handler = cnf10kb_mcs_pab_intr_handler,
+};
+
+struct mcs_ops *cnf10kb_get_mac_ops(void)
+{
+ return &cnf10kb_mcs_ops;
+}
+
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 64; /* TCAM entries */
+ hw->secy_entries = 64; /* SecY entries */
+ hw->sc_entries = 64; /* SC CAM entries */
+ hw->sa_entries = 128; /* SA entries */
+ hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 1; /* x2p clabration intf */
+ hw->mcs_blks = 7; /* MCS blocks */
+ hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */
+}
+
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN Ctag */
+ val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
+
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
+
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* Enable custom tage 0 and 1 and sectag */
+ val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
+
+ reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ mcs_reg_write(mcs, reg, map->sci);
+ val |= (map->sc & 0x3F) << 7;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
+
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0;
+ val = mcs_reg_read(mcs, reg);
+
+ if (map->rekey_ena)
+ val |= BIT_ULL(map->sc_id);
+ else
+ val &= ~BIT_ULL(map->sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
+}
+
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+int mcs_set_force_clk_en(struct mcs *mcs, bool set)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(2000);
+ u64 val;
+
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+
+ if (set) {
+ val |= BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(mcs->dev, "MCS set force clk enable failed\n");
+ break;
+ }
+ }
+ } else {
+ val &= ~BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+ }
+
+ return 0;
+}
+
+/* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event;
+ struct rsrc_bmap *sc_bmap;
+ unsigned long rekey_ena;
+ u64 val, sa_status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0);
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ /* Auto rekey is enable */
+ if (!test_bit(sc, &rekey_ena))
+ continue;
+ sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc));
+ /* Check if tx_sa_active status had changed */
+ if (sa_status == mcs->tx_sa_active[sc])
+ continue;
+
+ /* SA_index0 is expired */
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ if (sa_status)
+ event.sa_id = val & 0x7F;
+ else
+ event.sa_id = (val >> 7) & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 7) & 0x7F;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_BBE_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_BBE_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ /* Lower nibble denotes data fifo overflow interrupts and
+ * upper nibble indicates policy fifo overflow interrupts.
+ */
+ if (intr & 0xFULL)
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_DFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_DFIFO_OVERFLOW_INT;
+ else
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into BBE fatal error */
+ event.lmac_id = i & 0x3ULL;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_PAB_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_PAB_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_PAB_RX_CHAN_OVERFLOW_INT :
+ MCS_PAB_TX_CHAN_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into PAB fatal error */
+ event.lmac_id = i;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
new file mode 100644
index 000000000..f4c6de890
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
@@ -0,0 +1,1129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_REG_H
+#define MCS_REG_H
+
+#include <linux/bits.h>
+
+/* Registers */
+#define MCSX_IP_MODE 0x900c8ull
+#define MCSX_MCS_TOP_SLAVE_PORT_RESET(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x408ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa28ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+
+#define MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x808ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa68ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_MIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0x80000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60000ull; \
+ offset; })
+
+#define MCSX_MIL_RX_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x900a8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x700a8ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_HIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0xc0000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa0000ull; \
+ offset; })
+
+#define MCSX_LINK_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x90000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x70000ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_MIL_RX_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800c8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600c8ull; \
+ offset; })
+
+#define MCSX_MIL_IP_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800d0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600d0ull; \
+ offset; })
+
+/* PAB */
+#define MCSX_PAB_RX_SLAVE_PORT_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1718ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x280ull; \
+ offset += (a) * 0x40ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PORT_CFGX(a) (0x2930ull + (a) * 0x40ull)
+
+/* PEX registers */
+#define MCSX_PEX_RX_SLAVE_VLAN_CFGX(a) (0x3b58ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_PORT_CFGX(a) (0x3b98ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fc0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x558ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x598ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4048ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5e0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4080ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x648ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4088ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x650ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4090ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x658ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x40e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6d8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x40e8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6e0ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4b60ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x7d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4ba0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x858ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x860ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8c8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c30ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x4c80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x958ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x4c88ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x960ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION ({ \
+ u64 offset; \
+ \
+ offset = 0x3b50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c0ull; \
+ offset; })
+
+/* CNF10K-B */
+#define MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(a) (0x4c8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(a) (0x748ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_ETYPE_ENABLE 0x6e8ull
+#define MCSX_PEX_TX_SLAVE_ETYPE_ENABLE 0x968ull
+
+/* BEE */
+#define MCSX_BBE_RX_SLAVE_PADDING_CTL 0xe08ull
+#define MCSX_BBE_TX_SLAVE_PADDING_CTL 0x12f8ull
+#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
+#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
+#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
+#define MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 0xe20
+#define MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0 0x1298
+#define MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 0xe40
+#define MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0 0x12b8
+#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0xe00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x160ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x168ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x178ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e0ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1f8ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x1280ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e8ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x16f0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x260ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x268ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x278ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x2908ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x380ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x2910ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x388ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x398ull; \
+ offset; })
+
+/* CPM registers */
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x30740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bf8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x34740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x43f8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x30700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bd8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x38780ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c08ull; \
+ offset += (a) * 0x8ull + (b) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAM_ENA(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x38740ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4bf8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23ee0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x246e0ull + (a) * 0x10ull); \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = (0xdd0ull + (a) * 0x8ull); \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23E90ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbb0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x256e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x27700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x17d8ull; \
+ offset += (a) * 0x8ull + (b) * 0x40ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x2f700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x37d8; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb90ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e48ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb98ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23e50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xba0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1 0x30708ull
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(a) (0x246e8ull + (a) * 0x10ull)
+
+/* TX registers */
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x51d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7c0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x55d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xafc0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x51d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e508ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5550ull + (a) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3ed08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5950ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4c0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5538ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fd10ull + (a) * 0x10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6150ull + (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x40d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x63a0ull; \
+ offset += (a) * 0x8ull + (b) * 0x80ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x50d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa3a0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5528ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5530ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(a) (0x3fd18ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(a) (0x5558ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1 0x51d18ull
+#define MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(a) (0x5b50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(a) (0x5d50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(a) (0x5f50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0 0x5500ull
+
+/* CSE */
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbe18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xca18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x5e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xdc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(a)({ \
+ u64 offset; \
+ \
+ offset = 0x5680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xda18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xd680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xce18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16a80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec38ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16880ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xfe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xde18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xae80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xc680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xce80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xbe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xcc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x52a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9c0ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x52b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9d8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xee80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xa680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xf680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(a) (0x11680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(a) (0x14680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(a) (0xec58ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(a) (0xea18ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(a) (0xe618ull + (a) * 0x8ull)
+
+/* CSE TX */
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCOMMONOCTETSX(a) (0x18440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1c440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1bc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xee78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1b440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1ac40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfc78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1a440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfa78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x18c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1e440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfe78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23240ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10ed8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e98ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x20440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10c78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1fc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10a78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x110d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1dc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1d440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1cc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x54a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa00ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x54b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa18ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(a) (0x1f440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(a) (0x1ec40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSEARLYPREEMPTERRX(a) (0x10eb8ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(a) (0x21c40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(a) (0x20c40ull + (a) * 0x8ull)
+
+#define MCSX_IP_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x80028ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60028ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1S ({ \
+ u64 offset; \
+ \
+ offset = 0x80040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60040ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1C ({ \
+ u64 offset; \
+ \
+ offset = 0x80038ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60038ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM ({ \
+ u64 offset; \
+ \
+ offset = 0xc20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xab8ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xc28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xac0ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x23c00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x0ad8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x23c08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xae0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x3d490ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x3d498ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a8ull; \
+ offset; })
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
new file mode 100644
index 000000000..dfd23580e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -0,0 +1,926 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "rvu.h"
+#include "mcs_reg.h"
+#include "lmac_common.h"
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_MCS_MESSAGES
+#undef M
+
+void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
+{
+ struct mcs *mcs;
+ u64 cfg;
+ u8 port;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ /* When ptp is enabled, RPM appends 8B header for all
+ * RX packets. MCS PEX need to configure to skip 8B
+ * during packet parsing.
+ */
+
+ /* CNF10K-B */
+ if (rvu->mcs_blk_cnt > 1) {
+ mcs = mcs_get_pdata(rpm_id);
+ cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+ if (ena)
+ cfg |= BIT_ULL(lmac_id);
+ else
+ cfg &= ~BIT_ULL(lmac_id);
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
+ return;
+ }
+ /* CN10KB */
+ mcs = mcs_get_pdata(0);
+ port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
+ cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
+ if (ena)
+ cfg |= BIT_ULL(0);
+ else
+ cfg &= ~BIT_ULL(0);
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
+}
+
+int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
+ struct mcs_set_lmac_mode *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
+ mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
+
+ return 0;
+}
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
+{
+ struct mcs_intrq_entry *qentry;
+ u16 pcifunc = event->pcifunc;
+ struct rvu *rvu = mcs->rvu;
+ struct mcs_pfvf *pfvf;
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ event->intr_mask &= pfvf->intr_mask;
+
+ /* Check PF/VF interrupt notification is enabled */
+ if (!(pfvf->intr_mask && event->intr_mask))
+ return 0;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+
+ qentry->intr_event = *event;
+ spin_lock(&rvu->mcs_intrq_lock);
+ list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
+ spin_unlock(&rvu->mcs_intrq_lock);
+ queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
+
+ return 0;
+}
+
+static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+{
+ struct mcs_intr_info *req;
+ int err, pf;
+
+ pf = rvu_get_pf(event->pcifunc);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
+ if (!req)
+ return -ENOMEM;
+
+ req->mcs_id = event->mcs_id;
+ req->intr_mask = event->intr_mask;
+ req->sa_id = event->sa_id;
+ req->hdr.pcifunc = event->pcifunc;
+ req->lmac_id = event->lmac_id;
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+ if (err)
+ dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+
+ return 0;
+}
+
+static void mcs_intr_handler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
+ struct mcs_intrq_entry *qentry;
+ struct mcs_intr_event *event;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
+ struct mcs_intrq_entry,
+ node);
+ if (qentry)
+ list_del(&qentry->node);
+
+ spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->intr_event;
+
+ mcs_notify_pfvf(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
+ struct mcs_intr_cfg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_pfvf *pfvf;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ mcs->pf_map[0] = pcifunc;
+ pfvf->intr_mask = req->intr_mask;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
+ struct msg_req *req,
+ struct mcs_hw_info *rsp)
+{
+ struct mcs *mcs;
+
+ if (!rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ /* MCS resources are same across all blocks */
+ mcs = mcs_get_pdata(0);
+ rsp->num_mcs_blks = rvu->mcs_blk_cnt;
+ rsp->tcam_entries = mcs->hw->tcam_entries;
+ rsp->secy_entries = mcs->hw->secy_entries;
+ rsp->sc_entries = mcs->hw->sc_entries;
+ rsp->sa_entries = mcs->hw->sa_entries;
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_reset_port(mcs, req->port_id, req->reset);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
+ struct mcs_clear_stats *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&mcs->stats_lock);
+ if (req->all)
+ mcs_clear_all_stats(mcs, pcifunc, req->dir);
+ else
+ mcs_clear_stats(mcs, req->type, req->id, req->dir);
+
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_flowid_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* In CNF10K-B, before reading the statistics,
+ * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
+ * to get accurate statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ /* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
+ * the statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_secy_stats *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+
+ if (req->dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, rsp, req->id);
+ else
+ mcs_get_tx_secy_stats(mcs, rsp, req->id);
+
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sc_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sa_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_port_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_port_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
+ struct mcs_set_active_lmac *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ if (!mcs)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ mcs->hw->lmac_bmap = req->lmac_bmap;
+ mcs_set_lmac_channels(req->mcs_id, req->chan_base);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_set_port_cfg(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_get_port_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_get_custom_tag_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ struct mcs *mcs;
+ int mcs_id;
+
+ /* CNF10K-B mcs0-6 are mapped to RPM2-8*/
+ if (rvu->mcs_blk_cnt > 1) {
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ } else {
+ /* CN10K-B has only one mcs block */
+ mcs = mcs_get_pdata(0);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
+ struct mcs_flowid_ena_dis_entry *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
+ struct mcs_pn_table_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
+ struct mcs_set_pn_threshold *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_pn_threshold_set(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_rx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_tx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
+ mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
+ struct mcs_sa_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ for (i = 0; i < req->sa_cnt; i++)
+ mcs_sa_plcy_write(mcs, &req->plcy[i][0],
+ req->sa_index[i], req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
+ struct mcs_rx_sc_cam_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
+ struct mcs_secy_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_secy_plcy_write(mcs, req->plcy,
+ req->secy_id, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
+ struct mcs_flowid_entry_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct secy_mem_map map;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* TODO validate the flowid */
+ mcs_flowid_entry_write(mcs, req->data, req->mask,
+ req->flow_id, req->dir);
+ map.secy = req->secy_id;
+ map.sc = req->sc_id;
+ map.ctrl_pkt = req->ctrl_pkt;
+ map.flow_id = req->flow_id;
+ map.sci = req->sci;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
+ if (req->ena)
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id,
+ req->dir, true);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
+ struct mcs_free_rsrc_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rc = 0;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the cam resources mapped to PF/VF */
+ if (req->all) {
+ rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
+ mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
+ break;
+ case MCS_RSRC_TYPE_SC:
+ rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
+ /* Disable SC CAM only on RX side */
+ if (req->dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
+ break;
+ case MCS_RSRC_TYPE_SA:
+ rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
+ break;
+ }
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
+ struct mcs_alloc_rsrc_req *req,
+ struct mcs_alloc_rsrc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id, i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (req->all) {
+ rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
+ &rsp->secy_ids[0],
+ &rsp->sc_ids[0],
+ &rsp->sa_ids[0],
+ &rsp->sa_ids[1],
+ pcifunc, req->dir);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->flow_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->secy_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SC:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sc_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SA:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sa_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ }
+
+ rsp->rsrc_type = req->rsrc_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+ rsp->all = req->all;
+
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_alloc_ctrl_pkt_rule_req *req,
+ struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id;
+ u16 offset;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ offset = MCS_CTRLPKT_DA_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ break;
+ }
+
+ rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
+ pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+
+ rsp->rule_idx = rsrc_id;
+ rsp->rule_type = req->rule_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
+ pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return rsrc_id;
+}
+
+int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_free_ctrl_pkt_rule_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ rc = mcs_free_ctrlpktrule(mcs, req);
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
+ struct mcs_ctrl_pkt_rule_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ rc = mcs_ctrlpktrule_write(mcs, req);
+
+ return rc;
+}
+
+static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
+{
+ struct mcs *mcs = mcs_get_pdata(0);
+ unsigned long lmac_bmap;
+ int cgx, lmac, port;
+
+ for (port = 0; port < mcs->hw->lmac_cnt; port++) {
+ cgx = port / rvu->hw->lmac_per_cgx;
+ lmac = port % rvu->hw->lmac_per_cgx;
+ if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
+ continue;
+ set_bit(port, &lmac_bmap);
+ }
+ mcs->hw->lmac_bmap = lmac_bmap;
+}
+
+int rvu_mcs_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lmac, err = 0, mcs_id;
+ struct mcs *mcs;
+
+ rvu->mcs_blk_cnt = mcs_get_blkcnt();
+
+ if (!rvu->mcs_blk_cnt)
+ return 0;
+
+ /* Needed only for CN10K-B */
+ if (rvu->mcs_blk_cnt == 1) {
+ err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
+ if (err)
+ return err;
+ /* Set active lmacs */
+ rvu_mcs_set_lmac_bmap(rvu);
+ }
+
+ /* Install default tcam bypass entry and set port to operational mode */
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_install_flowid_bypass_entry(mcs);
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_set_lmac_mode(mcs, lmac, 0);
+
+ mcs->rvu = rvu;
+
+ /* Allocated memory for PFVF data */
+ mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->pf)
+ return -ENOMEM;
+
+ mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->vf)
+ return -ENOMEM;
+ }
+
+ /* Initialize the wq for handling mcs interrupts */
+ INIT_LIST_HEAD(&rvu->mcs_intrq_head);
+ INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ if (!rvu->mcs_intr_wq) {
+ dev_err(rvu->dev, "mcs alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ return err;
+}
+
+void rvu_mcs_exit(struct rvu *rvu)
+{
+ if (!rvu->mcs_intr_wq)
+ return;
+
+ flush_workqueue(rvu->mcs_intr_wq);
+ destroy_workqueue(rvu->mcs_intr_wq);
+ rvu->mcs_intr_wq = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
new file mode 100644
index 000000000..c92c3f463
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -0,0 +1,632 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef NPC_H
+#define NPC_H
+
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
+enum NPC_LID_E {
+ NPC_LID_LA = 0,
+ NPC_LID_LB,
+ NPC_LID_LC,
+ NPC_LID_LD,
+ NPC_LID_LE,
+ NPC_LID_LF,
+ NPC_LID_LG,
+ NPC_LID_LH,
+};
+
+#define NPC_LT_NA 0
+
+enum npc_kpu_la_ltype {
+ NPC_LT_LA_8023 = 1,
+ NPC_LT_LA_ETHER,
+ NPC_LT_LA_IH_NIX_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER = 7,
+ NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_LT_LA_CPT_HDR,
+ NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
+ NPC_LT_LA_CUSTOM0 = 0xE,
+ NPC_LT_LA_CUSTOM1 = 0xF,
+};
+
+enum npc_kpu_lb_ltype {
+ NPC_LT_LB_ETAG = 1,
+ NPC_LT_LB_CTAG,
+ NPC_LT_LB_STAG_QINQ,
+ NPC_LT_LB_BTAG,
+ NPC_LT_LB_PPPOE,
+ NPC_LT_LB_DSA,
+ NPC_LT_LB_DSA_VLAN,
+ NPC_LT_LB_EDSA,
+ NPC_LT_LB_EDSA_VLAN,
+ NPC_LT_LB_EXDSA,
+ NPC_LT_LB_EXDSA_VLAN,
+ NPC_LT_LB_FDSA,
+ NPC_LT_LB_VLAN_EXDSA,
+ NPC_LT_LB_CUSTOM0 = 0xE,
+ NPC_LT_LB_CUSTOM1 = 0xF,
+};
+
+enum npc_kpu_lc_ltype {
+ NPC_LT_LC_IP = 1,
+ NPC_LT_LC_IP_OPT,
+ NPC_LT_LC_IP6,
+ NPC_LT_LC_IP6_EXT,
+ NPC_LT_LC_ARP,
+ NPC_LT_LC_RARP,
+ NPC_LT_LC_MPLS,
+ NPC_LT_LC_NSH,
+ NPC_LT_LC_PTP,
+ NPC_LT_LC_FCOE,
+ NPC_LT_LC_NGIO,
+ NPC_LT_LC_CUSTOM0 = 0xE,
+ NPC_LT_LC_CUSTOM1 = 0xF,
+};
+
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_ld_ltype {
+ NPC_LT_LD_TCP = 1,
+ NPC_LT_LD_UDP,
+ NPC_LT_LD_ICMP,
+ NPC_LT_LD_SCTP,
+ NPC_LT_LD_ICMP6,
+ NPC_LT_LD_CUSTOM0,
+ NPC_LT_LD_CUSTOM1,
+ NPC_LT_LD_IGMP = 8,
+ NPC_LT_LD_AH,
+ NPC_LT_LD_GRE,
+ NPC_LT_LD_NVGRE,
+ NPC_LT_LD_NSH,
+ NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_LT_LD_TU_MPLS_IN_IP,
+};
+
+enum npc_kpu_le_ltype {
+ NPC_LT_LE_VXLAN = 1,
+ NPC_LT_LE_GENEVE,
+ NPC_LT_LE_ESP,
+ NPC_LT_LE_GTPU = 4,
+ NPC_LT_LE_VXLANGPE,
+ NPC_LT_LE_GTPC,
+ NPC_LT_LE_NSH,
+ NPC_LT_LE_TU_MPLS_IN_GRE,
+ NPC_LT_LE_TU_NSH_IN_GRE,
+ NPC_LT_LE_TU_MPLS_IN_UDP,
+ NPC_LT_LE_CUSTOM0 = 0xE,
+ NPC_LT_LE_CUSTOM1 = 0xF,
+};
+
+enum npc_kpu_lf_ltype {
+ NPC_LT_LF_TU_ETHER = 1,
+ NPC_LT_LF_TU_PPP,
+ NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ NPC_LT_LF_TU_MPLS_IN_NSH,
+ NPC_LT_LF_TU_3RD_NSH,
+ NPC_LT_LF_CUSTOM0 = 0xE,
+ NPC_LT_LF_CUSTOM1 = 0xF,
+};
+
+enum npc_kpu_lg_ltype {
+ NPC_LT_LG_TU_IP = 1,
+ NPC_LT_LG_TU_IP6,
+ NPC_LT_LG_TU_ARP,
+ NPC_LT_LG_TU_ETHER_IN_NSH,
+ NPC_LT_LG_CUSTOM0 = 0xE,
+ NPC_LT_LG_CUSTOM1 = 0xF,
+};
+
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_lh_ltype {
+ NPC_LT_LH_TU_TCP = 1,
+ NPC_LT_LH_TU_UDP,
+ NPC_LT_LH_TU_ICMP,
+ NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_TU_IGMP = 8,
+ NPC_LT_LH_TU_ESP,
+ NPC_LT_LH_TU_AH,
+ NPC_LT_LH_CUSTOM0 = 0xE,
+ NPC_LT_LH_CUSTOM1 = 0xF,
+};
+
+/* NPC port kind defines how the incoming or outgoing packets
+ * are processed. NPC accepts packets from up to 64 pkinds.
+ * Software assigns pkind for each incoming port such as CGX
+ * Ethernet interfaces, LBK interfaces, etc.
+ */
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
+
+enum npc_pkind_type {
+ NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
+ NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
+ NPC_RX_CHLEN24B_PKIND = 57ULL,
+ NPC_RX_CPT_HDR_PKIND,
+ NPC_RX_CHLEN90B_PKIND,
+ NPC_TX_HIGIG_PKIND,
+ NPC_RX_HIGIG_PKIND,
+ NPC_RX_EDSA_PKIND,
+ NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
+};
+
+enum npc_interface_type {
+ NPC_INTF_MODE_DEF,
+};
+
+/* list of known and supported fields in packet header and
+ * fields present in key structure.
+ */
+enum key_fields {
+ NPC_DMAC,
+ NPC_SMAC,
+ NPC_ETYPE,
+ NPC_VLAN_ETYPE_CTAG, /* 0x8100 */
+ NPC_VLAN_ETYPE_STAG, /* 0x88A8 */
+ NPC_OUTER_VID,
+ NPC_INNER_VID,
+ NPC_TOS,
+ NPC_IPFRAG_IPV4,
+ NPC_SIP_IPV4,
+ NPC_DIP_IPV4,
+ NPC_IPFRAG_IPV6,
+ NPC_SIP_IPV6,
+ NPC_DIP_IPV6,
+ NPC_IPPROTO_TCP,
+ NPC_IPPROTO_UDP,
+ NPC_IPPROTO_SCTP,
+ NPC_IPPROTO_AH,
+ NPC_IPPROTO_ESP,
+ NPC_IPPROTO_ICMP,
+ NPC_IPPROTO_ICMP6,
+ NPC_SPORT_TCP,
+ NPC_DPORT_TCP,
+ NPC_SPORT_UDP,
+ NPC_DPORT_UDP,
+ NPC_SPORT_SCTP,
+ NPC_DPORT_SCTP,
+ NPC_IPSEC_SPI,
+ NPC_HEADER_FIELDS_MAX,
+ NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
+ NPC_PF_FUNC, /* Valid when Tx */
+ NPC_ERRLEV,
+ NPC_ERRCODE,
+ NPC_LXMB,
+ NPC_EXACT_RESULT,
+ NPC_LA,
+ NPC_LB,
+ NPC_LC,
+ NPC_LD,
+ NPC_LE,
+ NPC_LF,
+ NPC_LG,
+ NPC_LH,
+ /* Ethertype for untagged frame */
+ NPC_ETYPE_ETHER,
+ /* Ethertype for single tagged frame */
+ NPC_ETYPE_TAG1,
+ /* Ethertype for double tagged frame */
+ NPC_ETYPE_TAG2,
+ /* outer vlan tci for single tagged frame */
+ NPC_VLAN_TAG1,
+ /* outer vlan tci for double tagged frame */
+ NPC_VLAN_TAG2,
+ /* inner vlan tci for double tagged frame */
+ NPC_VLAN_TAG3,
+ /* other header fields programmed to extract but not of our interest */
+ NPC_UNKNOWN,
+ NPC_KEY_FIELDS_MAX,
+};
+
+struct npc_kpu_profile_cam {
+ u8 state;
+ u8 state_mask;
+ u16 dp0;
+ u16 dp0_mask;
+ u16 dp1;
+ u16 dp1_mask;
+ u16 dp2;
+ u16 dp2_mask;
+} __packed;
+
+struct npc_kpu_profile_action {
+ u8 errlev;
+ u8 errcode;
+ u8 dp0_offset;
+ u8 dp1_offset;
+ u8 dp2_offset;
+ u8 bypass_count;
+ u8 parse_done;
+ u8 next_state;
+ u8 ptr_advance;
+ u8 cap_ena;
+ u8 lid;
+ u8 ltype;
+ u8 flags;
+ u8 offset;
+ u8 mask;
+ u8 right;
+ u8 shift;
+} __packed;
+
+struct npc_kpu_profile {
+ int cam_entries;
+ int action_entries;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
+};
+
+/* NPC KPU register formats */
+struct npc_kpu_cam {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_56 : 8;
+ u64 state : 8;
+ u64 dp2_data : 16;
+ u64 dp1_data : 16;
+ u64 dp0_data : 16;
+#else
+ u64 dp0_data : 16;
+ u64 dp1_data : 16;
+ u64 dp2_data : 16;
+ u64 state : 8;
+ u64 rsvd_63_56 : 8;
+#endif
+};
+
+struct npc_kpu_action0 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_57 : 7;
+ u64 byp_count : 3;
+ u64 capture_ena : 1;
+ u64 parse_done : 1;
+ u64 next_state : 8;
+ u64 rsvd_43 : 1;
+ u64 capture_lid : 3;
+ u64 capture_ltype : 4;
+ u64 capture_flags : 8;
+ u64 ptr_advance : 8;
+ u64 var_len_offset : 8;
+ u64 var_len_mask : 8;
+ u64 var_len_right : 1;
+ u64 var_len_shift : 3;
+#else
+ u64 var_len_shift : 3;
+ u64 var_len_right : 1;
+ u64 var_len_mask : 8;
+ u64 var_len_offset : 8;
+ u64 ptr_advance : 8;
+ u64 capture_flags : 8;
+ u64 capture_ltype : 4;
+ u64 capture_lid : 3;
+ u64 rsvd_43 : 1;
+ u64 next_state : 8;
+ u64 parse_done : 1;
+ u64 capture_ena : 1;
+ u64 byp_count : 3;
+ u64 rsvd_63_57 : 7;
+#endif
+};
+
+struct npc_kpu_action1 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_36 : 28;
+ u64 errlev : 4;
+ u64 errcode : 8;
+ u64 dp2_offset : 8;
+ u64 dp1_offset : 8;
+ u64 dp0_offset : 8;
+#else
+ u64 dp0_offset : 8;
+ u64 dp1_offset : 8;
+ u64 dp2_offset : 8;
+ u64 errcode : 8;
+ u64 errlev : 4;
+ u64 rsvd_63_36 : 28;
+#endif
+};
+
+struct npc_kpu_pkind_cpi_def {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 ena : 1;
+ u64 rsvd_62_59 : 4;
+ u64 lid : 3;
+ u64 ltype_match : 4;
+ u64 ltype_mask : 4;
+ u64 flags_match : 8;
+ u64 flags_mask : 8;
+ u64 add_offset : 8;
+ u64 add_mask : 8;
+ u64 rsvd_15 : 1;
+ u64 add_shift : 3;
+ u64 rsvd_11_10 : 2;
+ u64 cpi_base : 10;
+#else
+ u64 cpi_base : 10;
+ u64 rsvd_11_10 : 2;
+ u64 add_shift : 3;
+ u64 rsvd_15 : 1;
+ u64 add_mask : 8;
+ u64 add_offset : 8;
+ u64 flags_mask : 8;
+ u64 flags_match : 8;
+ u64 ltype_mask : 4;
+ u64 ltype_match : 4;
+ u64 lid : 3;
+ u64 rsvd_62_59 : 4;
+ u64 ena : 1;
+#endif
+};
+
+struct nix_rx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_61 :3;
+ u64 flow_key_alg :5;
+ u64 match_id :16;
+ u64 index :20;
+ u64 pf_func :16;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 pf_func :16;
+ u64 index :20;
+ u64 match_id :16;
+ u64 flow_key_alg :5;
+ u64 rsvd_63_61 :3;
+#endif
+};
+
+/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_EXACT_NIBBLE_START 40
+#define NPC_EXACT_NIBBLE_END 43
+#define NPC_EXACT_NIBBLE GENMASK_ULL(43, 40)
+
+/* NPC_EXACT_KEX_S nibble definitions for each field */
+#define NPC_EXACT_NIBBLE_HIT BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_OPC BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_WAY BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_INDEX GENMASK_ULL(43, 41)
+
+#define NPC_EXACT_RESULT_HIT BIT_ULL(0)
+#define NPC_EXACT_RESULT_OPC GENMASK_ULL(2, 1)
+#define NPC_EXACT_RESULT_WAY GENMASK_ULL(4, 3)
+#define NPC_EXACT_RESULT_IDX GENMASK_ULL(15, 5)
+
+/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
+
+/* NPC_PARSE_KEX_S nibble definitions for each field */
+#define NPC_PARSE_NIBBLE_CHAN GENMASK_ULL(2, 0)
+#define NPC_PARSE_NIBBLE_ERRLEV BIT_ULL(3)
+#define NPC_PARSE_NIBBLE_ERRCODE GENMASK_ULL(5, 4)
+#define NPC_PARSE_NIBBLE_L2L3_BCAST BIT_ULL(6)
+#define NPC_PARSE_NIBBLE_LA_FLAGS GENMASK_ULL(8, 7)
+#define NPC_PARSE_NIBBLE_LA_LTYPE BIT_ULL(9)
+#define NPC_PARSE_NIBBLE_LB_FLAGS GENMASK_ULL(11, 10)
+#define NPC_PARSE_NIBBLE_LB_LTYPE BIT_ULL(12)
+#define NPC_PARSE_NIBBLE_LC_FLAGS GENMASK_ULL(14, 13)
+#define NPC_PARSE_NIBBLE_LC_LTYPE BIT_ULL(15)
+#define NPC_PARSE_NIBBLE_LD_FLAGS GENMASK_ULL(17, 16)
+#define NPC_PARSE_NIBBLE_LD_LTYPE BIT_ULL(18)
+#define NPC_PARSE_NIBBLE_LE_FLAGS GENMASK_ULL(20, 19)
+#define NPC_PARSE_NIBBLE_LE_LTYPE BIT_ULL(21)
+#define NPC_PARSE_NIBBLE_LF_FLAGS GENMASK_ULL(23, 22)
+#define NPC_PARSE_NIBBLE_LF_LTYPE BIT_ULL(24)
+#define NPC_PARSE_NIBBLE_LG_FLAGS GENMASK_ULL(26, 25)
+#define NPC_PARSE_NIBBLE_LG_LTYPE BIT_ULL(27)
+#define NPC_PARSE_NIBBLE_LH_FLAGS GENMASK_ULL(29, 28)
+#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30)
+
+struct nix_tx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_48 :16;
+ u64 match_id :16;
+ u64 index :20;
+ u64 rsvd_11_8 :8;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 rsvd_11_8 :8;
+ u64 index :20;
+ u64 match_id :16;
+ u64 rsvd_63_48 :16;
+#endif
+};
+
+/* NIX Receive Vtag Action Structure */
+#define RX_VTAG0_VALID_BIT BIT_ULL(15)
+#define RX_VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
+#define RX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define RX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG1_VALID_BIT BIT_ULL(47)
+#define RX_VTAG1_TYPE_MASK GENMASK_ULL(46, 44)
+#define RX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define RX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NIX Transmit Vtag Action Structure */
+#define TX_VTAG0_DEF_MASK GENMASK_ULL(25, 16)
+#define TX_VTAG0_OP_MASK GENMASK_ULL(13, 12)
+#define TX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define TX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define TX_VTAG1_DEF_MASK GENMASK_ULL(57, 48)
+#define TX_VTAG1_OP_MASK GENMASK_ULL(45, 44)
+#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NPC MCAM reserved entry index per nixlf */
+#define NIXLF_UCAST_ENTRY 0
+#define NIXLF_BCAST_ENTRY 1
+#define NIXLF_ALLMULTI_ENTRY 2
+#define NIXLF_PROMISC_ENTRY 3
+
+struct npc_coalesced_kpu_prfl {
+#define NPC_SIGN 0x00666f727063706e
+#define NPC_PRFL_NAME "npc_prfls_array"
+#define NPC_NAME_LEN 32
+ __le64 signature; /* "npcprof\0" (8 bytes/ASCII characters) */
+ u8 name[NPC_NAME_LEN]; /* KPU Profile name */
+ u64 version; /* KPU firmware/profile version */
+ u8 num_prfl; /* No of NPC profiles. */
+ u16 prfl_sz[];
+};
+
+struct npc_mcam_kex {
+ /* MKEX Profle Header */
+ u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */
+ u8 name[MKEX_NAME_LEN]; /* MKEX Profile name */
+ u64 cpu_model; /* Format as profiled by CPU hardware */
+ u64 kpu_version; /* KPU firmware/profile version */
+ u64 reserved; /* Reserved for extension */
+
+ /* MKEX Profle Data */
+ u64 keyx_cfg[NPC_MAX_INTF]; /* NPC_AF_INTF(0..1)_KEX_CFG */
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ u64 kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+} __packed;
+
+struct npc_kpu_fwdata {
+ int entries;
+ /* What follows is:
+ * struct npc_kpu_profile_cam[entries];
+ * struct npc_kpu_profile_action[entries];
+ */
+ u8 data[];
+} __packed;
+
+struct npc_lt_def {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+} __packed;
+
+struct npc_lt_def_ipsec {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 spi_offset;
+ u8 spi_nz;
+} __packed;
+
+struct npc_lt_def_apad {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+} __packed;
+
+struct npc_lt_def_color {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 noffset;
+ u8 offset;
+} __packed;
+
+struct npc_lt_def_et {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+ u8 offset;
+} __packed;
+
+struct npc_lt_def_cfg {
+ struct npc_lt_def rx_ol2;
+ struct npc_lt_def rx_oip4;
+ struct npc_lt_def rx_iip4;
+ struct npc_lt_def rx_oip6;
+ struct npc_lt_def rx_iip6;
+ struct npc_lt_def rx_otcp;
+ struct npc_lt_def rx_itcp;
+ struct npc_lt_def rx_oudp;
+ struct npc_lt_def rx_iudp;
+ struct npc_lt_def rx_osctp;
+ struct npc_lt_def rx_isctp;
+ struct npc_lt_def_ipsec rx_ipsec[2];
+ struct npc_lt_def pck_ol2;
+ struct npc_lt_def pck_oip4;
+ struct npc_lt_def pck_oip6;
+ struct npc_lt_def pck_iip4;
+ struct npc_lt_def_apad rx_apad0;
+ struct npc_lt_def_apad rx_apad1;
+ struct npc_lt_def_color ovlan;
+ struct npc_lt_def_color ivlan;
+ struct npc_lt_def_color rx_gen0_color;
+ struct npc_lt_def_color rx_gen1_color;
+ struct npc_lt_def_et rx_et[2];
+} __packed;
+
+/* Loadable KPU profile firmware data */
+struct npc_kpu_profile_fwdata {
+#define KPU_SIGN 0x00666f727075706b
+#define KPU_NAME_LEN 32
+/** Maximum number of custom KPU entries supported by the built-in profile. */
+#define KPU_MAX_CST_ENT 6
+ /* KPU Profle Header */
+ __le64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */
+ u8 name[KPU_NAME_LEN]; /* KPU Profile name */
+ __le64 version; /* KPU profile version */
+ u8 kpus;
+ u8 reserved[7];
+
+ /* Default MKEX profile to be used with this KPU profile. May be
+ * overridden with mkex_profile module parameter. Format is same as for
+ * the MKEX profile to streamline processing.
+ */
+ struct npc_mcam_kex mkex;
+ /* LTYPE values for specific HW offloaded protocols. */
+ struct npc_lt_def_cfg lt_def;
+ /* Dynamically sized data:
+ * Custom KPU CAM and ACTION configuration entries.
+ * struct npc_kpu_fwdata kpu[kpus];
+ */
+ u8 data[];
+} __packed;
+
+struct rvu_npc_mcam_rule {
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u8 intf;
+ union {
+ struct nix_tx_action tx_action;
+ struct nix_rx_action rx_action;
+ };
+ u64 vtag_action;
+ struct list_head list;
+ u64 features;
+ u16 owner;
+ u16 entry;
+ u16 cntr;
+ bool has_cntr;
+ u8 default_rule;
+ bool enable;
+ bool vfvlan_cfg;
+ u16 chan;
+ u16 chan_mask;
+ u8 lxmb;
+};
+
+#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
new file mode 100644
index 000000000..a820bad3a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -0,0 +1,15266 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef NPC_PROFILE_H
+#define NPC_PROFILE_H
+
+#define NPC_KPU_PROFILE_VER 0x0000000100070000
+#define NPC_KPU_VER_MAJ(ver) ((u16)(((ver) >> 32) & 0xFFFF))
+#define NPC_KPU_VER_MIN(ver) ((u16)(((ver) >> 16) & 0xFFFF))
+#define NPC_KPU_VER_PATCH(ver) ((u16)((ver) & 0xFFFF))
+
+#define NPC_IH_W 0x8000
+#define NPC_IH_UTAG 0x2000
+
+#define NPC_ETYPE_IP 0x0800
+#define NPC_ETYPE_IP6 0x86dd
+#define NPC_ETYPE_ARP 0x0806
+#define NPC_ETYPE_RARP 0x8035
+#define NPC_ETYPE_NGIO 0x8842
+#define NPC_ETYPE_MPLSU 0x8847
+#define NPC_ETYPE_MPLSM 0x8848
+#define NPC_ETYPE_ETAG 0x893f
+#define NPC_ETYPE_CTAG 0x8100
+#define NPC_ETYPE_SBTAG 0x88a8
+#define NPC_ETYPE_ITAG 0x88e7
+#define NPC_ETYPE_PTP 0x88f7
+#define NPC_ETYPE_FCOE 0x8906
+#define NPC_ETYPE_QINQ 0x9100
+#define NPC_ETYPE_TRANS_ETH_BR 0x6558
+#define NPC_ETYPE_PPP 0x880b
+#define NPC_ETYPE_NSH 0x894f
+#define NPC_ETYPE_DSA 0xdada
+#define NPC_ETYPE_PPPOE 0x8864
+
+#define NPC_PPP_IP 0x0021
+#define NPC_PPP_IP6 0x0057
+
+#define NPC_IPNH_HOP 0
+#define NPC_IPNH_ICMP 1
+#define NPC_IPNH_IGMP 2
+#define NPC_IPNH_IP 4
+#define NPC_IPNH_TCP 6
+#define NPC_IPNH_UDP 17
+#define NPC_IPNH_IP6 41
+#define NPC_IPNH_ROUT 43
+#define NPC_IPNH_FRAG 44
+#define NPC_IPNH_GRE 47
+#define NPC_IPNH_ESP 50
+#define NPC_IPNH_AH 51
+#define NPC_IPNH_ICMP6 58
+#define NPC_IPNH_NONH 59
+#define NPC_IPNH_DEST 60
+#define NPC_IPNH_SCTP 132
+#define NPC_IPNH_MOBILITY 135
+#define NPC_IPNH_MPLS 137
+#define NPC_IPNH_HOSTID 139
+#define NPC_IPNH_SHIM6 140
+
+#define NPC_UDP_PORT_PTP_E 319
+#define NPC_UDP_PORT_PTP_G 320
+#define NPC_UDP_PORT_GTPC 2123
+#define NPC_UDP_PORT_GTPU 2152
+#define NPC_UDP_PORT_VXLAN 4789
+#define NPC_UDP_PORT_VXLANGPE 4790
+#define NPC_UDP_PORT_GENEVE 6081
+#define NPC_UDP_PORT_MPLS 6635
+#define NPC_UDP_PORT_ESP 4500
+
+#define NPC_VXLANGPE_NP_IP 0x1
+#define NPC_VXLANGPE_NP_IP6 0x2
+#define NPC_VXLANGPE_NP_ETH 0x3
+#define NPC_VXLANGPE_NP_NSH 0x4
+#define NPC_VXLANGPE_NP_MPLS 0x5
+#define NPC_VXLANGPE_NP_GBP 0x6
+#define NPC_VXLANGPE_NP_VBNG 0x7
+
+#define NPC_NSH_NP_IP 0x1
+#define NPC_NSH_NP_IP6 0x2
+#define NPC_NSH_NP_ETH 0x3
+#define NPC_NSH_NP_NSH 0x4
+#define NPC_NSH_NP_MPLS 0x5
+
+#define NPC_TCP_PORT_HTTP 80
+#define NPC_TCP_PORT_HTTPS 443
+#define NPC_TCP_PORT_PPTP 1723
+
+#define NPC_MPLS_S 0x0100
+
+#define NPC_IP_TTL_MASK 0xff00
+#define NPC_IP_VER_4 0x4000
+#define NPC_IP_VER_6 0x6000
+#define NPC_IP_VER_MASK 0xf000
+#define NPC_IP_HDR_LEN_5 0x0500
+#define NPC_IP_HDR_LEN_MASK 0x0f00
+#define NPC_IP_HDR_MF 0x2000
+#define NPC_IP_HDR_FRAGOFF 0x1fff
+
+#define NPC_IP6_HOP_MASK 0x00ff
+#define NPC_IP6_FRAG_FRAGOFF 0xfff8
+
+#define NPC_GRE_F_CSUM (0x1 << 15)
+#define NPC_GRE_F_ROUTE (0x1 << 14)
+#define NPC_GRE_F_KEY (0x1 << 13)
+#define NPC_GRE_F_SEQ (0x1 << 12)
+#define NPC_GRE_F_ACK (0x1 << 7)
+#define NPC_GRE_FLAG_MASK (NPC_GRE_F_CSUM | NPC_GRE_F_ROUTE | \
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK)
+#define NPC_GRE_VER_MASK 0x0003
+#define NPC_GRE_VER_1 0x0001
+
+#define NPC_VXLAN_I 0x0800
+
+#define NPC_VXLANGPE_VER (0x3 << 12)
+#define NPC_VXLANGPE_I (0x1 << 11)
+#define NPC_VXLANGPE_P (0x1 << 10)
+#define NPC_VXLANGPE_B (0x1 << 9)
+#define NPC_VXLANGPE_NP_MASK 0x00ff
+
+#define NPC_NSH_NP_MASK 0x00ff
+
+#define NPC_GENEVE_F_OAM (0x1 << 7)
+#define NPC_GENEVE_F_CRI_OPT (0x1 << 6)
+
+#define NPC_GTP_PT_GTP (0x1 << 12)
+#define NPC_GTP_PT_MASK (0x1 << 12)
+#define NPC_GTP_VER1 (0x1 << 13)
+#define NPC_GTP_VER_MASK (0x7 << 13)
+#define NPC_GTP_MT_G_PDU 0xff
+#define NPC_GTP_MT_MASK 0xff
+
+#define NPC_TCP_FLAGS_FIN 0x0001
+#define NPC_TCP_FLAGS_SYN 0x0002
+#define NPC_TCP_FLAGS_RST 0x0004
+#define NPC_TCP_FLAGS_PSH 0x0008
+#define NPC_TCP_FLAGS_ACK 0x0010
+#define NPC_TCP_FLAGS_URG 0x0020
+#define NPC_TCP_FLAGS_MASK 0x003f
+
+#define NPC_TCP_DATA_OFFSET_5 0x5000
+#define NPC_TCP_DATA_OFFSET_MASK 0xf000
+
+#define NPC_DSA_EXTEND 0x1000
+#define NPC_DSA_EDSA 0x8000
+#define NPC_DSA_FDSA 0xc000
+
+#define NPC_KEXOF_DMAC 9
+#define MKEX_SIGN 0x19bbfdbd15f
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+ ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+
+/* Rx parse key extract nibble enable */
+#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
+ NPC_PARSE_NIBBLE_L2L3_BCAST | \
+ NPC_PARSE_NIBBLE_LA_LTYPE | \
+ NPC_PARSE_NIBBLE_LB_LTYPE | \
+ NPC_PARSE_NIBBLE_LC_LTYPE | \
+ NPC_PARSE_NIBBLE_LD_LTYPE | \
+ NPC_PARSE_NIBBLE_LE_LTYPE)
+/* Tx parse key extract nibble enable */
+#define NPC_PARSE_NIBBLE_INTF_TX (NPC_PARSE_NIBBLE_LA_LTYPE | \
+ NPC_PARSE_NIBBLE_LB_LTYPE | \
+ NPC_PARSE_NIBBLE_LC_LTYPE | \
+ NPC_PARSE_NIBBLE_LD_LTYPE | \
+ NPC_PARSE_NIBBLE_LE_LTYPE)
+
+enum npc_kpu_parser_state {
+ NPC_S_NA = 0,
+ NPC_S_KPU1_ETHER,
+ NPC_S_KPU1_IH_NIX,
+ NPC_S_KPU1_IH,
+ NPC_S_KPU1_EXDSA,
+ NPC_S_KPU1_HIGIG2,
+ NPC_S_KPU1_IH_NIX_HIGIG2,
+ NPC_S_KPU1_CUSTOM_PRE_L2,
+ NPC_S_KPU1_CPT_HDR,
+ NPC_S_KPU1_VLAN_EXDSA,
+ NPC_S_KPU2_CTAG,
+ NPC_S_KPU2_CTAG2,
+ NPC_S_KPU2_SBTAG,
+ NPC_S_KPU2_QINQ,
+ NPC_S_KPU2_ETAG,
+ NPC_S_KPU2_EXDSA,
+ NPC_S_KPU2_CPT_CTAG,
+ NPC_S_KPU2_CPT_QINQ,
+ NPC_S_KPU3_CTAG,
+ NPC_S_KPU3_STAG,
+ NPC_S_KPU3_QINQ,
+ NPC_S_KPU3_CTAG_C,
+ NPC_S_KPU3_STAG_C,
+ NPC_S_KPU3_QINQ_C,
+ NPC_S_KPU3_DSA,
+ NPC_S_KPU3_VLAN_EXDSA,
+ NPC_S_KPU4_MPLS,
+ NPC_S_KPU4_NSH,
+ NPC_S_KPU4_FDSA,
+ NPC_S_KPU4_VLAN_EXDSA,
+ NPC_S_KPU4_PPPOE,
+ NPC_S_KPU5_IP,
+ NPC_S_KPU5_IP6,
+ NPC_S_KPU5_ARP,
+ NPC_S_KPU5_RARP,
+ NPC_S_KPU5_PTP,
+ NPC_S_KPU5_FCOE,
+ NPC_S_KPU5_MPLS,
+ NPC_S_KPU5_MPLS_PL,
+ NPC_S_KPU5_NSH,
+ NPC_S_KPU5_CPT_IP,
+ NPC_S_KPU5_CPT_IP6,
+ NPC_S_KPU5_NGIO,
+ NPC_S_KPU6_IP6_EXT,
+ NPC_S_KPU6_IP6_HOP_DEST,
+ NPC_S_KPU6_IP6_ROUT,
+ NPC_S_KPU6_IP6_FRAG,
+ NPC_S_KPU6_IP6_CPT_FRAG,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST,
+ NPC_S_KPU6_IP6_CPT_ROUT,
+ NPC_S_KPU7_IP6_EXT,
+ NPC_S_KPU7_IP6_ROUT,
+ NPC_S_KPU7_IP6_FRAG,
+ NPC_S_KPU7_CPT_IP6_FRAG,
+ NPC_S_KPU8_TCP,
+ NPC_S_KPU8_UDP,
+ NPC_S_KPU8_SCTP,
+ NPC_S_KPU8_ICMP,
+ NPC_S_KPU8_IGMP,
+ NPC_S_KPU8_ICMP6,
+ NPC_S_KPU8_GRE,
+ NPC_S_KPU8_AH,
+ NPC_S_KPU9_TU_MPLS_IN_GRE,
+ NPC_S_KPU9_TU_MPLS_IN_NSH,
+ NPC_S_KPU9_TU_MPLS_IN_IP,
+ NPC_S_KPU9_TU_MPLS_IN_UDP,
+ NPC_S_KPU9_TU_NSH_IN_GRE,
+ NPC_S_KPU9_VXLAN,
+ NPC_S_KPU9_VXLANGPE,
+ NPC_S_KPU9_GENEVE,
+ NPC_S_KPU9_GTPC,
+ NPC_S_KPU9_GTPU,
+ NPC_S_KPU9_ESP,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
+ NPC_S_KPU10_TU_MPLS_PL,
+ NPC_S_KPU10_TU_MPLS,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE,
+ NPC_S_KPU11_TU_ETHER,
+ NPC_S_KPU11_TU_PPP,
+ NPC_S_KPU11_TU_MPLS_IN_NSH,
+ NPC_S_KPU11_TU_MPLS_PL,
+ NPC_S_KPU11_TU_MPLS,
+ NPC_S_KPU11_TU_ETHER_IN_NSH,
+ NPC_S_KPU12_TU_IP,
+ NPC_S_KPU12_TU_IP6,
+ NPC_S_KPU12_TU_ARP,
+ NPC_S_KPU13_TU_IP6_EXT,
+ NPC_S_KPU14_TU_IP6_EXT,
+ NPC_S_KPU15_TU_TCP,
+ NPC_S_KPU15_TU_UDP,
+ NPC_S_KPU15_TU_SCTP,
+ NPC_S_KPU15_TU_ICMP,
+ NPC_S_KPU15_TU_IGMP,
+ NPC_S_KPU15_TU_ICMP6,
+ NPC_S_KPU15_TU_ESP,
+ NPC_S_KPU15_TU_AH,
+ NPC_S_KPU16_HTTP_DATA,
+ NPC_S_KPU16_HTTPS_DATA,
+ NPC_S_KPU16_PPTP_DATA,
+ NPC_S_KPU16_TCP_DATA,
+ NPC_S_KPU16_UDP_DATA,
+ NPC_S_KPU16_UDP_PTP,
+ NPC_S_LAST /* has to be the last item */
+};
+
+enum npc_kpu_la_uflag {
+ NPC_F_LA_U_HAS_TAG = 0x10,
+ NPC_F_LA_U_HAS_IH_NIX = 0x20,
+ NPC_F_LA_U_HAS_HIGIG2 = 0x40,
+};
+enum npc_kpu_la_lflag {
+ NPC_F_LA_L_UNK_ETYPE = 1,
+ NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_L_WITH_ETAG,
+ NPC_F_LA_L_WITH_MPLS,
+ NPC_F_LA_L_WITH_NSH,
+};
+
+enum npc_kpu_lb_uflag {
+ NPC_F_LB_U_UNK_ETYPE = 0x80,
+ NPC_F_LB_U_MORE_TAG = 0x40,
+};
+enum npc_kpu_lb_lflag {
+ NPC_F_LB_L_WITH_CTAG = 1,
+ NPC_F_LB_L_WITH_CTAG_UNK,
+ NPC_F_LB_L_WITH_STAG_CTAG,
+ NPC_F_LB_L_WITH_STAG_STAG,
+ NPC_F_LB_L_WITH_QINQ_CTAG,
+ NPC_F_LB_L_WITH_QINQ_QINQ,
+ NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_L_WITH_BTAG_ITAG,
+ NPC_F_LB_L_WITH_STAG,
+ NPC_F_LB_L_WITH_QINQ,
+ NPC_F_LB_L_DSA,
+ NPC_F_LB_L_DSA_VLAN,
+ NPC_F_LB_L_EDSA,
+ NPC_F_LB_L_EDSA_VLAN,
+ NPC_F_LB_L_EXDSA,
+ NPC_F_LB_L_EXDSA_VLAN,
+ NPC_F_LB_L_FDSA,
+};
+
+enum npc_kpu_lc_uflag {
+ NPC_F_LC_U_UNK_PROTO = 0x10,
+ NPC_F_LC_U_IP_FRAG = 0x20,
+ NPC_F_LC_U_IP6_FRAG = 0x40,
+};
+enum npc_kpu_lc_lflag {
+ NPC_F_LC_L_IP_IN_IP = 1,
+ NPC_F_LC_L_6TO4,
+ NPC_F_LC_L_MPLS_IN_IP,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ NPC_F_LC_L_MPLS_4_LABELS,
+ NPC_F_LC_L_MPLS_3_LABELS,
+ NPC_F_LC_L_MPLS_2_LABELS,
+ NPC_F_LC_L_EXT_HOP,
+ NPC_F_LC_L_EXT_DEST,
+ NPC_F_LC_L_EXT_ROUT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ NPC_F_LC_L_EXT_HOSTID,
+ NPC_F_LC_L_EXT_SHIM6,
+};
+
+enum npc_kpu_ld_lflag {
+ NPC_F_LD_L_TCP_UNK_PORT = 1,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_F_LD_L_UDP_UNK_PORT,
+ NPC_F_LD_L_GRE_NVGRE,
+ NPC_F_LD_L_GRE_HAS_SRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_F_LD_L_GRE_HAS_ROUTE,
+ NPC_F_LD_L_GRE_UNK_PROTO,
+ NPC_F_LD_L_GRE_VER1,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ,
+ NPC_F_LD_L_GRE_VER1_HAS_ACK,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK,
+ NPC_F_LD_L_GRE_VER1_UNK_PROTO,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ NPC_F_LD_L_MPLS_2_LABELS,
+};
+
+enum npc_kpu_le_lflag {
+ NPC_F_LE_L_VXLAN_NOVNI,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ NPC_F_LE_L_VXLANGPE_UNK,
+ NPC_F_LE_L_VXLANGPE_NONP,
+ NPC_F_LE_L_GENEVE_OAM,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ NPC_F_LE_L_GTPU_G_PDU,
+ NPC_F_LE_L_GTPU_UNK,
+};
+
+enum npc_kpu_lf_uflag {
+ NPC_F_LF_U_UNK_ETYPE = 0x10,
+ NPC_F_LF_U_HAS_TAG = 0x20,
+};
+
+enum npc_kpu_lf_lflag {
+ NPC_F_LF_L_WITH_CTAG = 1,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ NPC_F_LF_L_WITH_STAG,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ NPC_F_LF_L_WITH_QINQ,
+};
+
+enum npc_kpu_lg_uflag {
+ NPC_F_LG_U_UNK_IP_PROTO = 0x10,
+ NPC_F_LG_U_IP_HAS_OPTIONS = 0x20,
+ NPC_F_LG_U_IP6_HAS_EXT = 0x40,
+};
+
+enum npc_kpu_lh_uflag {
+ NPC_F_LH_U_TCP_HAS_OPTIONS = 0x80,
+};
+
+enum npc_kpu_lh_lflag {
+ NPC_F_LH_L_TCP_HTTP = 1,
+ NPC_F_LH_L_TCP_HTTPS,
+ NPC_F_LH_L_TCP_PPTP,
+ NPC_F_LH_L_TCP_UNK_PORT,
+ NPC_F_LH_L_UDP_UNK_PORT,
+};
+
+enum npc_kpu_err_code {
+ NPC_EC_NOERR = 0, /* has to be zero */
+ NPC_EC_UNK,
+ NPC_EC_IH_LENGTH,
+ NPC_EC_EDSA_UNK,
+ NPC_EC_L2_K1,
+ NPC_EC_L2_K2,
+ NPC_EC_L2_K3,
+ NPC_EC_L2_K3_ETYPE_UNK,
+ NPC_EC_L2_K4,
+ NPC_EC_MPLS_2MANY,
+ NPC_EC_MPLS_UNK,
+ NPC_EC_NSH_UNK,
+ NPC_EC_IP_TTL_0,
+ NPC_EC_IP_FRAG_OFFSET_1,
+ NPC_EC_IP_VER,
+ NPC_EC_IP6_HOP_0,
+ NPC_EC_IP6_VER,
+ NPC_EC_TCP_FLAGS_FIN_ONLY,
+ NPC_EC_TCP_FLAGS_ZERO,
+ NPC_EC_TCP_FLAGS_RST_FIN,
+ NPC_EC_TCP_FLAGS_URG_SYN,
+ NPC_EC_TCP_FLAGS_RST_SYN,
+ NPC_EC_TCP_FLAGS_SYN_FIN,
+ NPC_EC_VXLAN,
+ NPC_EC_NVGRE,
+ NPC_EC_GRE,
+ NPC_EC_GRE_VER1,
+ NPC_EC_L4,
+ NPC_EC_OIP4_CSUM,
+ NPC_EC_IIP4_CSUM,
+ NPC_EC_LAST /* has to be the last item */
+};
+
+enum NPC_ERRLEV_E {
+ NPC_ERRLEV_RE = 0,
+ NPC_ERRLEV_LA = 1,
+ NPC_ERRLEV_LB = 2,
+ NPC_ERRLEV_LC = 3,
+ NPC_ERRLEV_LD = 4,
+ NPC_ERRLEV_LE = 5,
+ NPC_ERRLEV_LF = 6,
+ NPC_ERRLEV_LG = 7,
+ NPC_ERRLEV_LH = 8,
+ NPC_ERRLEV_R9 = 9,
+ NPC_ERRLEV_R10 = 10,
+ NPC_ERRLEV_R11 = 11,
+ NPC_ERRLEV_R12 = 12,
+ NPC_ERRLEV_R13 = 13,
+ NPC_ERRLEV_R14 = 14,
+ NPC_ERRLEV_NIX = 15,
+ NPC_ERRLEV_ENUM_LAST = 16,
+};
+
+#define NPC_KPU_NOP_CAM \
+ { \
+ NPC_S_NA, 0xff, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ }
+
+#define NPC_KPU_NOP_ACTION \
+ { \
+ NPC_ERRLEV_RE, NPC_EC_NOERR, \
+ 0, 0, 0, 0, 0, \
+ NPC_S_NA, 0, 0, \
+ NPC_LID_LA, NPC_LT_NA, \
+ 0, \
+ 0, 0, 0, 0, \
+ }
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_VLAN_EXDSA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 40, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 7, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 36, 40, 44, 0, 0,
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 28, 32, 36, 0, 0,
+ NPC_S_KPU1_HIGIG2, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 20, 0, 0,
+ NPC_S_KPU1_EXDSA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 20, 24, 28, 0, 0,
+ NPC_S_KPU1_IH_NIX, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+};
+
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_DSA,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0000,
+ 0xfc00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0400,
+ 0xfe00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_DSA_EXTEND,
+ NPC_DSA_EXTEND,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ NPC_DSA_FDSA,
+ NPC_DSA_FDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EXTEND,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_MPLS,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ 0x0000,
+ NPC_DSA_FDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_ARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_RARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_PTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_FCOE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU6_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU7_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_VXLAN,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_VXLANGPE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GENEVE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GTPC,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GTPU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_PTP_E,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_PTP_G,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_MPLS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_SCTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_IGMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_AH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_GRE_F_ROUTE,
+ 0x4fff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x4fff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x2001,
+ 0xef7f,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0001,
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLAN_I,
+ NPC_VXLAN_I,
+ 0x0000,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GTPC, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_GTPU, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_PPP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER_IN_NSH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_ARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU13_TU_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU14_TU_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_SCTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_IGMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_AH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU16_TCP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTPS_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_PPTP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_PTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 12, 0, 0,
+ NPC_S_KPU2_CTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 1, 0,
+ NPC_S_KPU3_DSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 2, 0,
+ NPC_S_KPU4_PPPOE, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LA, NPC_EC_IH_LENGTH,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 16, 0, 0,
+ NPC_S_KPU2_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 12, 2, 0,
+ NPC_S_KPU4_FDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LA, NPC_EC_EDSA_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_CPT_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_CPT_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CPT_CTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CPT_QINQ, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 0, 0, 1, 0,
+ NPC_S_KPU3_VLAN_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LA, NPC_EC_L2_K1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_NGIO, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_STAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_STAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_STAG, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 1,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_STAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_STAG, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_RARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 16, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA_VLAN,
+ NPC_F_LB_L_EDSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA_VLAN,
+ NPC_F_LB_L_EXDSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU4_VLAN_EXDSA, 12, 1,
+ NPC_LID_LB, NPC_LT_LB_VLAN_EXDSA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 4, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 8, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU5_MPLS, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 7, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 7, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 6, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 4, 0,
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_NSH_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K4,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_ARP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_RARP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_PTP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_FCOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_ZERO,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_URG_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_SYN_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_UNK_PORT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_VXLANGPE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GENEVE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GTPC, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GTPU, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_KPU16_UDP_PTP, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_KPU16_UDP_PTP, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_ESP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_ESP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_SCTP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_IGMP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_AH,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_NVGRE,
+ NPC_F_LD_L_GRE_NVGRE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_NVGRE,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_ROUTE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_GRE,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_ACK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_GRE_VER1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_NSH_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLAN,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLAN,
+ NPC_F_LE_L_VXLAN_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_VXLAN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_UNK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NONP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPC,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPU,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_ESP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 4, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 8, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 12, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS, 12, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_ETHER_IN_NSH, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_NSH_UNK,
+ 6, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_PPP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_MPLS_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_MPLS_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ETHER_IN_NSH,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_IGMP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_UNK_IP_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_IGMP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS | NPC_F_LG_U_UNK_IP_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ARP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP6, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU13_TU_IP6_EXT, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ NPC_F_LG_U_IP6_HAS_EXT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LG, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_ZERO,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_URG_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_SYN_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_HTTP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_HTTP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_PPTP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_UNK_PORT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTP,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTPS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_PPTP,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_UNK_PORT,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_UDP,
+ NPC_F_LH_L_UDP_UNK_PORT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_SCTP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ICMP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_IGMP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ICMP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ESP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_AH,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_L4,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile npc_kpu_profiles[] = {
+ {
+ ARRAY_SIZE(kpu1_cam_entries),
+ ARRAY_SIZE(kpu1_action_entries),
+ &kpu1_cam_entries[0],
+ &kpu1_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu2_cam_entries),
+ ARRAY_SIZE(kpu2_action_entries),
+ &kpu2_cam_entries[0],
+ &kpu2_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu3_cam_entries),
+ ARRAY_SIZE(kpu3_action_entries),
+ &kpu3_cam_entries[0],
+ &kpu3_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu4_cam_entries),
+ ARRAY_SIZE(kpu4_action_entries),
+ &kpu4_cam_entries[0],
+ &kpu4_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu5_cam_entries),
+ ARRAY_SIZE(kpu5_action_entries),
+ &kpu5_cam_entries[0],
+ &kpu5_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu6_cam_entries),
+ ARRAY_SIZE(kpu6_action_entries),
+ &kpu6_cam_entries[0],
+ &kpu6_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu7_cam_entries),
+ ARRAY_SIZE(kpu7_action_entries),
+ &kpu7_cam_entries[0],
+ &kpu7_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu8_cam_entries),
+ ARRAY_SIZE(kpu8_action_entries),
+ &kpu8_cam_entries[0],
+ &kpu8_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu9_cam_entries),
+ ARRAY_SIZE(kpu9_action_entries),
+ &kpu9_cam_entries[0],
+ &kpu9_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu10_cam_entries),
+ ARRAY_SIZE(kpu10_action_entries),
+ &kpu10_cam_entries[0],
+ &kpu10_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu11_cam_entries),
+ ARRAY_SIZE(kpu11_action_entries),
+ &kpu11_cam_entries[0],
+ &kpu11_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu12_cam_entries),
+ ARRAY_SIZE(kpu12_action_entries),
+ &kpu12_cam_entries[0],
+ &kpu12_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu13_cam_entries),
+ ARRAY_SIZE(kpu13_action_entries),
+ &kpu13_cam_entries[0],
+ &kpu13_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu14_cam_entries),
+ ARRAY_SIZE(kpu14_action_entries),
+ &kpu14_cam_entries[0],
+ &kpu14_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu15_cam_entries),
+ ARRAY_SIZE(kpu15_action_entries),
+ &kpu15_cam_entries[0],
+ &kpu15_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu16_cam_entries),
+ ARRAY_SIZE(kpu16_action_entries),
+ &kpu16_cam_entries[0],
+ &kpu16_action_entries[0],
+ },
+};
+
+static struct npc_lt_def_cfg npc_lt_defaults = {
+ .rx_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .ovlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_CTAG,
+ .ltype_mask = 0x0F,
+ },
+ .ivlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_STAG_QINQ,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip6 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip6 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_otcp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_itcp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oudp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_iudp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_osctp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_isctp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_ipsec = {
+ {
+ .lid = NPC_LID_LE,
+ .ltype_match = NPC_LT_LE_ESP,
+ .ltype_mask = 0x0F,
+ },
+ {
+ .spi_offset = 8,
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_ESP,
+ .ltype_mask = 0x0F,
+ },
+ },
+ .pck_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .pck_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .pck_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_apad0 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_apad1 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_et = {
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ },
+};
+
+static struct npc_mcam_kex npc_mkex_default = {
+ .mkex_sign = MKEX_SIGN,
+ .name = "default",
+ .kpu_version = NPC_KPU_PROFILE_VER,
+ .keyx_cfg = {
+ /* nibble: LA..LE (ltype only) + Error code + Channel */
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX |
+ (u64)NPC_EXACT_NIBBLE_HIT,
+ /* nibble: LA..LE (ltype only) */
+ [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
+ },
+ .intf_lid_lt_ld = {
+ /* Default RX MCAM KEX profile */
+ [NIX_INTF_RX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: Ethernet: */
+ [NPC_LT_LA_ETHER] = {
+ /* DMAC: 6 bytes, KW1[55:8] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_HIGIG2_ETHER] = {
+ /* Classification: 2 bytes, KW1[23:8] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* VID: 2 bytes, KW1[39:24] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0,
+ NPC_KEXOF_DMAC + 2),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ [NPC_LT_LB_CTAG] = {
+ /* CTAG VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* Outer VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x5),
+ },
+ [NPC_LT_LB_FDSA] = {
+ /* SWITCH PORT: 1 byte, KW0[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ /* TOS: 1 byte, KW1[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
+ },
+ /* Layer C: IPv6 */
+ [NPC_LT_LC_IP6] = {
+ /* Everything up to SADDR: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ },
+ },
+
+ /* Default TX MCAM KEX profile */
+ [NIX_INTF_TX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: NIX_INST_HDR_S + Ethernet */
+ /* NIX appends 8 bytes of NIX_INST_HDR_S at the
+ * start of each TX packet supplied to NPC.
+ */
+ [NPC_LT_LA_IH_NIX_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* DMAC: 6 bytes, KW1[63:16] */
+ KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_IH_NIX_HIGIG2_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* VID: 2 bytes, KW1[31:16] */
+ KEX_LD_CFG(0x01, 0x10, 0x1, 0x0, 0xa),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ [NPC_LT_LB_CTAG] = {
+ /* CTAG VLAN[2..3] KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* CTAG VLAN[2..3] KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* Outer VLAN: 2 bytes, KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* Outer VLAN: 2 Bytes, KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ },
+ /* Layer C: IPv6 */
+ [NPC_LT_LC_IP6] = {
+ /* Everything up to SADDR: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ },
+ },
+ },
+};
+
+#endif /* NPC_PROFILE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
new file mode 100644
index 000000000..ffbd22797
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+
+#include "mbox.h"
+#include "ptp.h"
+#include "rvu.h"
+
+#define DRV_NAME "Marvell PTP Driver"
+
+#define PCI_DEVID_OCTEONTX2_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
+#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
+#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
+#define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP 0xB400
+#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
+#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
+#define PCI_DEVID_OCTEONTX2_RST 0xA085
+#define PCI_DEVID_CN10K_PTP 0xA09E
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
+
+#define PCI_PTP_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1)
+#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2)
+#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9)
+#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8)
+#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10)
+#define PTP_CLOCK_CFG_ATOMIC_OP_MASK GENMASK_ULL(28, 26)
+#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30)
+#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31)
+
+#define PTP_PPS_HI_INCR 0xF60ULL
+#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_HI 0xF58ULL
+
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+#define PTP_TIMESTAMP 0xF20ULL
+#define PTP_CLOCK_SEC 0xFD0ULL
+#define PTP_SEC_ROLLOVER 0xFD8ULL
+/* Atomic update related CSRs */
+#define PTP_FRNS_TIMESTAMP 0xFE0ULL
+#define PTP_NXT_ROLLOVER_SET 0xFE8ULL
+#define PTP_CURR_ROLLOVER_SET 0xFF0ULL
+#define PTP_NANO_TIMESTAMP 0xFF8ULL
+#define PTP_SEC_TIMESTAMP 0x1000ULL
+
+#define CYCLE_MULT 1000
+
+#define is_rev_A0(ptp) (((ptp)->pdev->revision & 0x0F) == 0x0)
+#define is_rev_A1(ptp) (((ptp)->pdev->revision & 0x0F) == 0x1)
+
+/* PTP atomic update operation type */
+enum atomic_opcode {
+ ATOMIC_SET = 1,
+ ATOMIC_INC = 3,
+ ATOMIC_DEC = 4
+};
+
+static struct ptp *first_ptp_block;
+static const struct pci_device_id ptp_id_table[];
+
+static bool is_ptp_dev_cnf10ka(struct ptp *ptp)
+{
+ return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP;
+}
+
+static bool is_ptp_dev_cn10ka(struct ptp *ptp)
+{
+ return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP;
+}
+
+static bool cn10k_ptp_errata(struct ptp *ptp)
+{
+ if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) &&
+ (is_rev_A0(ptp) || is_rev_A1(ptp)))
+ return true;
+
+ return false;
+}
+
+static bool is_tstmp_atomic_update_supported(struct rvu *rvu)
+{
+ struct ptp *ptp = rvu->ptp;
+
+ if (is_rvu_otx2(rvu))
+ return false;
+
+ /* On older silicon variants of CN10K, atomic update feature
+ * is not available.
+ */
+ if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) &&
+ (is_rev_A0(ptp) || is_rev_A1(ptp)))
+ return false;
+
+ return true;
+}
+
+static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
+{
+ struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
+ ktime_t curr_ts = ktime_get();
+ ktime_t delta_ns, period_ns;
+ u64 ptp_clock_hi;
+
+ /* calculate the elapsed time since last restart */
+ delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
+
+ /* if the ptp clock value has crossed 0.5 seconds,
+ * its too late to update pps threshold value, so
+ * update threshold after 1 second.
+ */
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ if (ptp_clock_hi > 500000000) {
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
+ } else {
+ writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
+ }
+
+ hrtimer_forward_now(hrtimer, period_ns);
+ ptp->last_ts = curr_ts;
+
+ return HRTIMER_RESTART;
+}
+
+static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
+{
+ ktime_t period_ns;
+
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
+ hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
+ ptp->last_ts = ktime_get();
+}
+
+static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
+{
+ u64 sec, sec1, nsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ /* check nsec rollover */
+ if (sec1 > sec) {
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec = sec1;
+ }
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ return sec * NSEC_PER_SEC + nsec;
+}
+
+static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
+{
+ return readq(ptp->reg_base + PTP_CLOCK_HI);
+}
+
+static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
+{
+ u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
+ u32 ptp_clock_nsec, cycle_time;
+ int cycle;
+
+ /* Errata:
+ * Issue #1: At the time of 1 sec rollover of the nano-second counter,
+ * the nano-second counter is set to 0. However, it should be set to
+ * (existing counter_value - 10^9).
+ *
+ * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
+ * It should roll over at 0x3B9A_CA00.
+ */
+
+ /* calculate ptp_clock_comp value */
+ comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
+ /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
+ cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
+ /* cycles per sec */
+ cycles_per_sec = ptp_clock_freq;
+
+ /* check whether ptp nanosecond counter rolls over early */
+ cycle = cycles_per_sec - 1;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ while (ptp_clock_nsec < NSEC_PER_SEC) {
+ if (ptp_clock_nsec == 0x3B9AC9FF)
+ goto calc_adj_comp;
+ cycle++;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ }
+ /* compute nanoseconds lost per second when nsec counter rolls over */
+ ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
+ /* calculate ptp_clock_comp adjustment */
+ if (ns_drift > 0) {
+ adj = comp * ns_drift;
+ adj = adj / 1000000000ULL;
+ }
+ /* speed up the ptp clock to account for nanoseconds lost */
+ comp += adj;
+ return comp;
+
+calc_adj_comp:
+ /* slow down the ptp clock to not rollover early */
+ adj = comp * cycle_time;
+ adj = adj / 1000000000ULL;
+ adj = adj / CYCLE_MULT;
+ comp -= adj;
+
+ return comp;
+}
+
+struct ptp *ptp_get(void)
+{
+ struct ptp *ptp = first_ptp_block;
+
+ /* Check PTP block is present in hardware */
+ if (!pci_dev_present(ptp_id_table))
+ return ERR_PTR(-ENODEV);
+ /* Check driver is bound to PTP block */
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ else if (!IS_ERR(ptp))
+ pci_dev_get(ptp->pdev);
+
+ return ptp;
+}
+
+void ptp_put(struct ptp *ptp)
+{
+ if (!ptp)
+ return;
+
+ pci_dev_put(ptp->pdev);
+}
+
+static void ptp_atomic_update(struct ptp *ptp, u64 timestamp)
+{
+ u64 regval, curr_rollover_set, nxt_rollover_set;
+
+ /* First setup NSECs and SECs */
+ writeq(timestamp, ptp->reg_base + PTP_NANO_TIMESTAMP);
+ writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
+ writeq(timestamp / NSEC_PER_SEC,
+ ptp->reg_base + PTP_SEC_TIMESTAMP);
+
+ nxt_rollover_set = roundup(timestamp, NSEC_PER_SEC);
+ curr_rollover_set = nxt_rollover_set - NSEC_PER_SEC;
+ writeq(nxt_rollover_set, ptp->reg_base + PTP_NXT_ROLLOVER_SET);
+ writeq(curr_rollover_set, ptp->reg_base + PTP_CURR_ROLLOVER_SET);
+
+ /* Now, initiate atomic update */
+ regval = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
+ regval |= (ATOMIC_SET << 26);
+ writeq(regval, ptp->reg_base + PTP_CLOCK_CFG);
+}
+
+static void ptp_atomic_adjtime(struct ptp *ptp, s64 delta)
+{
+ bool neg_adj = false, atomic_inc_dec = false;
+ u64 regval, ptp_clock_hi;
+
+ if (delta < 0) {
+ delta = -delta;
+ neg_adj = true;
+ }
+
+ /* use atomic inc/dec when delta < 1 second */
+ if (delta < NSEC_PER_SEC)
+ atomic_inc_dec = true;
+
+ if (!atomic_inc_dec) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ if (neg_adj) {
+ if (ptp_clock_hi > delta)
+ ptp_clock_hi -= delta;
+ else
+ ptp_clock_hi = delta - ptp_clock_hi;
+ } else {
+ ptp_clock_hi += delta;
+ }
+ ptp_atomic_update(ptp, ptp_clock_hi);
+ } else {
+ writeq(delta, ptp->reg_base + PTP_NANO_TIMESTAMP);
+ writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
+
+ /* initiate atomic inc/dec */
+ regval = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
+ regval |= neg_adj ? (ATOMIC_DEC << 26) : (ATOMIC_INC << 26);
+ writeq(regval, ptp->reg_base + PTP_CLOCK_CFG);
+ }
+}
+
+static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
+{
+ bool neg_adj = false;
+ u32 freq, freq_adj;
+ u64 comp, adj;
+ s64 ppb;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per million" by which
+ * the compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated
+ * initialy in the probe function.
+ */
+ /* convert scaled_ppm to ppb */
+ ppb = 1 + scaled_ppm;
+ ppb *= 125;
+ ppb >>= 13;
+
+ if (cn10k_ptp_errata(ptp)) {
+ /* calculate the new frequency based on ppb */
+ freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
+ freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
+ comp = ptp_calc_adjusted_comp(freq);
+ } else {
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+ }
+ writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ return 0;
+}
+
+static int ptp_get_clock(struct ptp *ptp, u64 *clk)
+{
+ /* Return the current PTP clock */
+ *clk = ptp->read_ptp_tstmp(ptp);
+
+ return 0;
+}
+
+void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts)
+{
+ struct ptp *ptp = rvu->ptp;
+ struct pci_dev *pdev;
+ u64 clock_comp;
+ u64 clock_cfg;
+
+ if (!ptp)
+ return;
+
+ pdev = ptp->pdev;
+
+ if (!sclk) {
+ dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
+ return;
+ }
+
+ /* sclk is in MHz */
+ ptp->clock_rate = sclk * 1000000;
+
+ /* Program the seconds rollover value to 1 second */
+ if (is_tstmp_atomic_update_supported(rvu)) {
+ writeq(0, ptp->reg_base + PTP_NANO_TIMESTAMP);
+ writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
+ writeq(0, ptp->reg_base + PTP_SEC_TIMESTAMP);
+ writeq(0, ptp->reg_base + PTP_CURR_ROLLOVER_SET);
+ writeq(0x3b9aca00, ptp->reg_base + PTP_NXT_ROLLOVER_SET);
+ writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
+ }
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+
+ if (ext_clk_freq) {
+ ptp->clock_rate = ext_clk_freq;
+ /* Set GPIO as PTP clock source */
+ clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
+ }
+
+ if (extts) {
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
+ /* Set GPIO as timestamping source */
+ clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
+ clock_cfg |= (ATOMIC_SET << 26);
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ /* Set 50% duty cycle for 1Hz output */
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+ if (cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
+
+ if (cn10k_ptp_errata(ptp))
+ clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
+ else
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+}
+
+static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
+{
+ u64 timestamp;
+
+ if (is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) {
+ timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
+ *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
+ } else {
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ }
+
+ return 0;
+}
+
+static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
+{
+ if (!cn10k_ptp_errata(ptp))
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
+static int ptp_extts_on(struct ptp *ptp, int on)
+{
+ u64 ptp_clock_hi;
+
+ if (cn10k_ptp_errata(ptp)) {
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+ }
+
+ return 0;
+}
+
+static int ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct ptp *ptp;
+ int err;
+
+ ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ pci_set_drvdata(pdev, ptp);
+ if (!first_ptp_block)
+ first_ptp_block = ptp;
+
+ spin_lock_init(&ptp->ptp_lock);
+ if (cn10k_ptp_errata(ptp)) {
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
+ hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ptp->hrtimer.function = ptp_reset_thresh;
+ } else {
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+ }
+
+ return 0;
+
+error_free:
+ kfree(ptp);
+
+error:
+ /* For `ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ if (!first_ptp_block)
+ first_ptp_block = ERR_PTR(err);
+
+ return err;
+}
+
+static void ptp_remove(struct pci_dev *pdev)
+{
+ struct ptp *ptp = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(ptp))
+ return;
+
+ if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+
+ /* Disable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+ kfree(ptp);
+}
+
+static const struct pci_device_id ptp_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) },
+ { 0, }
+};
+
+struct pci_driver ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = ptp_id_table,
+ .probe = ptp_probe,
+ .remove = ptp_remove,
+};
+
+int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
+ struct ptp_rsp *rsp)
+{
+ int err = 0;
+
+ /* This function is the PTP mailbox handler invoked when
+ * called by AF consumers/netdev drivers via mailbox mechanism.
+ * It is used by netdev driver to get the PTP clock and to set
+ * frequency adjustments. Since mailbox can be called without
+ * notion of whether the driver is bound to ptp device below
+ * validation is needed as first step.
+ */
+ if (!rvu->ptp)
+ return -ENODEV;
+
+ switch (req->op) {
+ case PTP_OP_ADJFINE:
+ err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
+ break;
+ case PTP_OP_GET_CLOCK:
+ err = ptp_get_clock(rvu->ptp, &rsp->clk);
+ break;
+ case PTP_OP_GET_TSTMP:
+ err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
+ break;
+ case PTP_OP_SET_THRESH:
+ err = ptp_set_thresh(rvu->ptp, req->thresh);
+ break;
+ case PTP_OP_EXTTS_ON:
+ err = ptp_extts_on(rvu->ptp, req->extts_on);
+ break;
+ case PTP_OP_ADJTIME:
+ ptp_atomic_adjtime(rvu->ptp, req->delta);
+ break;
+ case PTP_OP_SET_CLOCK:
+ ptp_atomic_update(rvu->ptp, (u64)req->clk);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+int rvu_mbox_handler_ptp_get_cap(struct rvu *rvu, struct msg_req *req,
+ struct ptp_get_cap_rsp *rsp)
+{
+ if (!rvu->ptp)
+ return -ENODEV;
+
+ if (is_tstmp_atomic_update_supported(rvu))
+ rsp->cap |= PTP_CAP_HW_ATOMIC_UPDATE;
+ else
+ rsp->cap &= ~BIT_ULL_MASK(0);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
new file mode 100644
index 000000000..1229344c7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef PTP_H
+#define PTP_H
+
+#include <linux/timecounter.h>
+#include <linux/time64.h>
+#include <linux/spinlock.h>
+
+struct ptp {
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+ u64 (*read_ptp_tstmp)(struct ptp *ptp);
+ spinlock_t ptp_lock; /* lock */
+ struct hrtimer hrtimer;
+ ktime_t last_ts;
+ u32 clock_rate;
+ u32 clock_period;
+};
+
+struct rvu;
+struct ptp *ptp_get(void);
+void ptp_put(struct ptp *ptp);
+void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts);
+
+extern struct pci_driver ptp_driver;
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
new file mode 100644
index 000000000..76218f1cb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -0,0 +1,748 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include "cgx.h"
+#include "lmac_common.h"
+
+static struct mac_ops rpm_mac_ops = {
+ .name = "rpm",
+ .csr_offset = 0x4e00,
+ .lmac_offset = 20,
+ .int_register = RPMX_CMRX_SW_INT,
+ .int_set_reg = RPMX_CMRX_SW_INT_ENA_W1S,
+ .irq_offset = 1,
+ .int_ena_bit = BIT_ULL(0),
+ .lmac_fwi = RPM_LMAC_FWI,
+ .non_contiguous_serdes_lane = true,
+ .rx_stats_cnt = 43,
+ .tx_stats_cnt = 34,
+ .dmac_filter_count = 32,
+ .get_nr_lmacs = rpm_get_nr_lmacs,
+ .get_lmac_type = rpm_get_lmac_type,
+ .lmac_fifo_len = rpm_get_lmac_fifo_len,
+ .mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
+ .mac_get_rx_stats = rpm_get_rx_stats,
+ .mac_get_tx_stats = rpm_get_tx_stats,
+ .get_fec_stats = rpm_get_fec_stats,
+ .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
+ .mac_reset = rpm_lmac_reset,
+};
+
+static struct mac_ops rpm2_mac_ops = {
+ .name = "rpm",
+ .csr_offset = RPM2_CSR_OFFSET,
+ .lmac_offset = 20,
+ .int_register = RPM2_CMRX_SW_INT,
+ .int_set_reg = RPM2_CMRX_SW_INT_ENA_W1S,
+ .irq_offset = 1,
+ .int_ena_bit = BIT_ULL(0),
+ .lmac_fwi = RPM2_LMAC_FWI,
+ .non_contiguous_serdes_lane = true,
+ .rx_stats_cnt = 43,
+ .tx_stats_cnt = 34,
+ .dmac_filter_count = 64,
+ .get_nr_lmacs = rpm2_get_nr_lmacs,
+ .get_lmac_type = rpm_get_lmac_type,
+ .lmac_fifo_len = rpm2_get_lmac_fifo_len,
+ .mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
+ .mac_get_rx_stats = rpm_get_rx_stats,
+ .mac_get_tx_stats = rpm_get_tx_stats,
+ .get_fec_stats = rpm_get_fec_stats,
+ .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
+ .mac_reset = rpm_lmac_reset,
+};
+
+bool is_dev_rpm2(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return (rpm->pdev->device == PCI_DEVID_CN10KB_RPM);
+}
+
+struct mac_ops *rpm_get_mac_ops(rpm_t *rpm)
+{
+ if (is_dev_rpm2(rpm))
+ return &rpm2_mac_ops;
+ else
+ return &rpm_mac_ops;
+}
+
+static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val)
+{
+ cgx_write(rpm, lmac, offset, val);
+}
+
+static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset)
+{
+ return cgx_read(rpm, lmac, offset);
+}
+
+/* Read HW major version to determine RPM
+ * MAC type 100/USX
+ */
+static bool is_mac_rpmusx(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return rpm_read(rpm, 0, RPMX_CONST1) & 0x700ULL;
+}
+
+int rpm_get_nr_lmacs(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
+}
+
+int rpm2_get_nr_lmacs(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return hweight8(rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL);
+}
+
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ last = cfg;
+ if (enable)
+ cfg |= RPM_TX_EN;
+ else
+ cfg &= ~(RPM_TX_EN);
+
+ if (cfg != last)
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return !!(last & RPM_TX_EN);
+}
+
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN | RPM_TX_EN;
+ else
+ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
+
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!rpm)
+ return;
+
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ if (enable) {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ } else {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ }
+}
+
+int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (!(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE)) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
+
+static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id,
+ unsigned long pfc_en,
+ bool enable)
+{
+ u64 quanta_offset = 0, quanta_thresh = 0, cfg;
+ int i, shift;
+
+ /* Set pause time and interval */
+ for_each_set_bit(i, &pfc_en, 16) {
+ switch (i) {
+ case 0:
+ case 1:
+ quanta_offset = RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL01_QUANTA_THRESH;
+ break;
+ case 2:
+ case 3:
+ quanta_offset = RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL23_QUANTA_THRESH;
+ break;
+ case 4:
+ case 5:
+ quanta_offset = RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL45_QUANTA_THRESH;
+ break;
+ case 6:
+ case 7:
+ quanta_offset = RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL67_QUANTA_THRESH;
+ break;
+ case 8:
+ case 9:
+ quanta_offset = RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL89_QUANTA_THRESH;
+ break;
+ case 10:
+ case 11:
+ quanta_offset = RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH;
+ break;
+ case 12:
+ case 13:
+ quanta_offset = RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH;
+ break;
+ case 14:
+ case 15:
+ quanta_offset = RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH;
+ break;
+ }
+
+ if (!quanta_offset || !quanta_thresh)
+ continue;
+
+ shift = (i % 2) ? 1 : 0;
+ cfg = rpm_read(rpm, lmac_id, quanta_offset);
+ if (enable) {
+ cfg |= ((u64)RPM_DEFAULT_PAUSE_TIME << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_offset, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, quanta_thresh);
+ if (enable) {
+ cfg |= ((u64)(RPM_DEFAULT_PAUSE_TIME / 2) << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_thresh, cfg);
+ }
+}
+
+static void rpm2_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause)
+{
+ u64 cfg;
+
+ cfg = rpm_read(rpm, lmac_id, RPM2_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold
+ * for 802.3X frames
+ */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
+ cfg &= ~RPM2_CMR_RX_OVR_BP_EN;
+ } else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+ cfg |= RPM2_CMR_RX_OVR_BP_EN;
+ cfg &= ~RPM2_CMR_RX_OVR_BP_BP;
+ }
+ rpm_write(rpm, lmac_id, RPM2_CMR_RX_OVR_BP, cfg);
+}
+
+static void rpm_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause)
+{
+ u64 cfg;
+
+ cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold for
+ * 802.3X frames
+ */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+ cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
+}
+
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ if (is_dev_rpm2(rpm))
+ rpm2_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause);
+ else
+ rpm_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause);
+
+ return 0;
+}
+
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
+{
+ u64 cfg, pfc_class_mask_cfg;
+ rpm_t *rpm = rpmd;
+
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable forward pause to driver */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Enable channel mask for all LMACS */
+ if (is_dev_rpm2(rpm))
+ rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff);
+ else
+ rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
+
+ /* Disable all PFC classes */
+ pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
+ RPMX_CMRX_PRT_CBFC_CTL;
+ cfg = rpm_read(rpm, lmac_id, pfc_class_mask_cfg);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
+ rpm_write(rpm, lmac_id, pfc_class_mask_cfg, cfg);
+}
+
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Rx statistics page */
+ idx += lmac_id * rpm->mac_ops->rx_stats_cnt;
+
+ /* Read lower 32 bits of counter */
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+
+ /* upon read of lower 32 bits, higher 32 bits are written
+ * to RPMX_MTI_STAT_DATA_HI_CDC
+ */
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *rx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Tx statistics page */
+ idx += lmac_id * rpm->mac_ops->tx_stats_cnt;
+
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *tx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 req = 0, resp;
+ int err;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req);
+ err = cgx_fwi_cmd_generic(req, &resp, rpm, 0);
+ if (!err)
+ return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp);
+ return err;
+}
+
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 hi_perf_lmac;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = rpm->mac_ops->fifo_len;
+ num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC marked as hi_perf gets half of the FIFO and rest 1/4th */
+ hi_perf_lmac = rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS);
+ hi_perf_lmac = (hi_perf_lmac >> 4) & 0x3ULL;
+ if (lmac_id == hi_perf_lmac)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
+static int rpmusx_lmac_internal_loopback(rpm_t *rpm, int lmac_id, bool enable)
+{
+ u64 cfg;
+
+ cfg = rpm_read(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1);
+
+ if (enable)
+ cfg |= RPM2_USX_PCS_LBK;
+ else
+ cfg &= ~RPM2_USX_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1, cfg);
+
+ return 0;
+}
+
+u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
+{
+ u64 hi_perf_lmac, lmac_info;
+ rpm_t *rpm = rpmd;
+ u8 num_lmacs;
+ u32 fifo_len;
+ u16 max_lmac;
+
+ lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS);
+ /* LMACs are divided into two groups and each group
+ * gets half of the FIFO
+ * Group0 lmac_id range {0..3}
+ * Group1 lmac_id range {4..7}
+ */
+ max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF;
+ if (max_lmac > 4)
+ fifo_len = rpm->mac_ops->fifo_len / 2;
+ else
+ fifo_len = rpm->mac_ops->fifo_len;
+
+ if (lmac_id < 4) {
+ num_lmacs = hweight8(lmac_info & 0xF);
+ hi_perf_lmac = (lmac_info >> 8) & 0x3ULL;
+ } else {
+ num_lmacs = hweight8(lmac_info & 0xF0);
+ hi_perf_lmac = (lmac_info >> 10) & 0x3ULL;
+ hi_perf_lmac += 4;
+ }
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC marked as hi_perf gets half of the FIFO
+ * and rest 1/4th
+ */
+ if (lmac_id == hi_perf_lmac)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (lmac->lmac_type == LMAC_MODE_QSGMII ||
+ lmac->lmac_type == LMAC_MODE_SGMII) {
+ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
+ return 0;
+ }
+
+ if (is_dev_rpm2(rpm) && is_mac_rpmusx(rpm))
+ return rpmusx_lmac_internal_loopback(rpm, lmac_id, enable);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+
+ return 0;
+}
+
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
+ if (enable) {
+ cfg |= RPMX_RX_TS_PREPEND;
+ cfg |= RPMX_TX_PTP_1S_SUPPORT;
+ } else {
+ cfg &= ~RPMX_RX_TS_PREPEND;
+ cfg &= ~RPMX_TX_PTP_1S_SUPPORT;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE);
+
+ if (enable) {
+ cfg |= RPMX_ONESTEP_ENABLE;
+ cfg &= ~RPMX_TS_BINARY_MODE;
+ } else {
+ cfg &= ~RPMX_ONESTEP_ENABLE;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg);
+}
+
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
+{
+ u64 cfg, class_en, pfc_class_mask_cfg;
+ rpm_t *rpm = rpmd;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
+ RPMX_CMRX_PRT_CBFC_CTL;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ class_en = rpm_read(rpm, lmac_id, pfc_class_mask_cfg);
+ pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
+
+ if (rx_pause) {
+ cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
+ } else {
+ cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
+ }
+
+ if (tx_pause) {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, class_en);
+ } else {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, 0, class_en);
+ }
+
+ if (!rx_pause && !tx_pause)
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ else
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en);
+
+ return 0;
+}
+
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
+
+int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+{
+ u64 val_lo, val_hi;
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
+ return 0;
+
+ if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+ val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO);
+ val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_corr_blks = (val_hi << 16 | val_lo);
+
+ val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO);
+ val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_uncorr_blks = (val_hi << 16 | val_lo);
+
+ /* 50G uses 2 Physical serdes lines */
+ if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id ==
+ LMAC_MODE_50G_R) {
+ val_lo = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_VL1_CCW_LO);
+ val_hi = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_corr_blks += (val_hi << 16 | val_lo);
+
+ val_lo = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_VL1_NCCW_LO);
+ val_hi = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_uncorr_blks += (val_hi << 16 | val_lo);
+ }
+ } else {
+ /* enable RS-FEC capture */
+ cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
+ cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id);
+ rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
+
+ val_lo = rpm_read(rpm, 0,
+ RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2);
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+ rsp->fec_corr_blks = (val_hi << 32 | val_lo);
+
+ val_lo = rpm_read(rpm, 0,
+ RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3);
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+ rsp->fec_uncorr_blks = (val_hi << 32 | val_lo);
+ }
+
+ return 0;
+}
+
+int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr)
+{
+ u64 rx_logl_xon, cfg;
+ rpm_t *rpm = rpmd;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ /* Resetting PFC related CSRs */
+ rx_logl_xon = is_dev_rpm2(rpm) ? RPM2_CMRX_RX_LOGL_XON :
+ RPMX_CMRX_RX_LOGL_XON;
+ cfg = 0xff;
+
+ rpm_write(rpm, lmac_id, rx_logl_xon, cfg);
+
+ if (pf_req_flr)
+ rpm_lmac_internal_loopback(rpm, lmac_id, false);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
new file mode 100644
index 000000000..b79cfbc6f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef RPM_H
+#define RPM_H
+
+#include <linux/bits.h>
+
+/* PCI device IDs */
+#define PCI_DEVID_CN10K_RPM 0xA060
+#define PCI_SUBSYS_DEVID_CNF10KB_RPM 0xBC00
+#define PCI_DEVID_CN10KB_RPM 0xA09F
+
+/* Registers */
+#define RPMX_CMRX_CFG 0x00
+#define RPMX_RX_TS_PREPEND BIT_ULL(22)
+#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
+#define RPMX_CMRX_RX_ID_MAP 0x80
+#define RPMX_CMRX_SW_INT 0x180
+#define RPMX_CMRX_SW_INT_W1S 0x188
+#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
+#define RPMX_CMRX_LINK_CFG 0x1070
+#define RPMX_MTI_PCS100X_CONTROL1 0x20000
+#define RPMX_MTI_PCS_LBK BIT_ULL(14)
+#define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100))
+
+#define RPMX_CMRX_LINK_RANGE_MASK GENMASK_ULL(19, 16)
+#define RPMX_CMRX_LINK_BASE_MASK GENMASK_ULL(11, 0)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG 0x8010
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE BIT_ULL(29)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE BIT_ULL(28)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA 0x80B0
+#define RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA 0x80B8
+#define RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA 0x80C0
+#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
+#define RPMX_MTI_MAC100X_CL23_QUANTA_THRESH 0x80D0
+#define RPMX_MTI_MAC100X_CL45_QUANTA_THRESH 0x80D8
+#define RPMX_MTI_MAC100X_CL67_QUANTA_THRESH 0x80E0
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA 0x8110
+#define RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA 0x8118
+#define RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA 0x8120
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
+#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
+#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
+#define RPMX_CMR_RX_OVR_BP 0x4120
+#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
+#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
+#define RPMX_CMR_CHAN_MSK_OR 0x4118
+#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000
+#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000
+#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
+
+#define RPM_LMAC_FWI 0xa
+#define RPM_TX_EN BIT_ULL(0)
+#define RPM_RX_EN BIT_ULL(1)
+#define RPMX_CMRX_PRT_CBFC_CTL 0x5B08
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_RX_SHIFT 33
+#define RPMX_CMRX_PRT_CBFC_CTL_PHYS_BP_SHIFT 16
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_TX_SHIFT 0
+#define RPM_PFC_CLASS_MASK GENMASK_ULL(48, 33)
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_PAD_EN BIT_ULL(11)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPM_DEFAULT_PAUSE_TIME 0x7FF
+#define RPMX_CMRX_RX_LOGL_XON 0x4100
+
+#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
+#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
+#define RPMX_TS_BINARY_MODE BIT_ULL(11)
+#define RPMX_CONST1 0x2008
+
+/* FEC stats */
+#define RPMX_MTI_STAT_STATN_CONTROL 0x10018
+#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
+#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27)
+#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050
+#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058
+#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618
+#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620
+#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628
+#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630
+#define RPMX_MTI_FCFECX_CW_HI 0x38638
+
+/* CN10KB CSR Declaration */
+#define RPM2_CMRX_SW_INT 0x1b0
+#define RPM2_CMRX_SW_INT_ENA_W1S 0x1c8
+#define RPM2_LMAC_FWI 0x12
+#define RPM2_CMR_CHAN_MSK_OR 0x3120
+#define RPM2_CMR_RX_OVR_BP_EN BIT_ULL(2)
+#define RPM2_CMR_RX_OVR_BP_BP BIT_ULL(1)
+#define RPM2_CMR_RX_OVR_BP 0x3130
+#define RPM2_CSR_OFFSET 0x3e00
+#define RPM2_CMRX_PRT_CBFC_CTL 0x6510
+#define RPM2_CMRX_RX_LMACS 0x100
+#define RPM2_CMRX_RX_LOGL_XON 0x3100
+#define RPM2_CMRX_RX_STAT2 0x3010
+#define RPM2_USX_PCSX_CONTROL1 0x80000
+#define RPM2_USX_PCS_LBK BIT_ULL(14)
+
+/* Function Declarations */
+int rpm_get_nr_lmacs(void *rpmd);
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id);
+u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id);
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause);
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int rpm2_get_nr_lmacs(void *rpmd);
+bool is_dev_rpm2(void *rpmd);
+int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
+int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr);
+#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
new file mode 100644
index 000000000..731bb82b5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -0,0 +1,3434 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "cgx.h"
+#include "rvu.h"
+#include "rvu_reg.h"
+#include "ptp.h"
+#include "mcs.h"
+
+#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
+
+#define DRV_NAME "rvu_af"
+#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int type, int num,
+ void (mbox_handler)(struct work_struct *),
+ void (mbox_up_handler)(struct work_struct *));
+enum {
+ TYPE_AFVF,
+ TYPE_AFPF,
+};
+
+/* Supported devices */
+static const struct pci_device_id rvu_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, rvu_id_table);
+
+static char *mkex_profile; /* MKEX profile name */
+module_param(mkex_profile, charp, 0000);
+MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+
+static char *kpu_profile; /* KPU profile name */
+module_param(kpu_profile, charp, 0000);
+MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
+
+static void rvu_setup_hw_capabilities(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
+ hw->cap.nix_fixed_txschq_mapping = false;
+ hw->cap.nix_shaping = true;
+ hw->cap.nix_tx_link_bp = true;
+ hw->cap.nix_rx_multicast = true;
+ hw->cap.nix_shaper_toggle_wait = false;
+ hw->cap.npc_hash_extract = false;
+ hw->cap.npc_exact_match_enabled = false;
+ hw->rvu = rvu;
+
+ if (is_rvu_pre_96xx_C0(rvu)) {
+ hw->cap.nix_fixed_txschq_mapping = true;
+ hw->cap.nix_txsch_per_cgx_lmac = 4;
+ hw->cap.nix_txsch_per_lbk_lmac = 132;
+ hw->cap.nix_txsch_per_sdp_lmac = 76;
+ hw->cap.nix_shaping = false;
+ hw->cap.nix_tx_link_bp = false;
+ if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
+ hw->cap.nix_rx_multicast = false;
+ }
+ if (!is_rvu_pre_96xx_C0(rvu))
+ hw->cap.nix_shaper_toggle_wait = true;
+
+ if (!is_rvu_otx2(rvu))
+ hw->cap.per_pf_mbox_regs = true;
+
+ if (is_rvu_npc_hash_extract_en(rvu))
+ hw->cap.npc_hash_extract = true;
+}
+
+/* Poll a RVU block's register 'offset', for a 'zero'
+ * or 'nonzero' at bits specified by 'mask'
+ */
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ bool twice = false;
+ void __iomem *reg;
+ u64 reg_val;
+
+ reg = rvu->afreg_base + ((block << 28) | offset);
+again:
+ reg_val = readq(reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ if (time_before(jiffies, timeout)) {
+ usleep_range(1, 5);
+ goto again;
+ }
+ /* In scenarios where CPU is scheduled out before checking
+ * 'time_before' (above) and gets scheduled in such that
+ * jiffies are beyond timeout value, then check again if HW is
+ * done with the operation in the meantime.
+ */
+ if (!twice) {
+ twice = true;
+ goto again;
+ }
+ return -EBUSY;
+}
+
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
+{
+ int id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ id = find_first_zero_bit(rsrc->bmap, rsrc->max);
+ if (id >= rsrc->max)
+ return -ENOSPC;
+
+ __set_bit(id, rsrc->bmap);
+
+ return id;
+}
+
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, start, nrsrc);
+ return start;
+}
+
+static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
+{
+ if (!rsrc->bmap)
+ return;
+ if (start >= rsrc->max)
+ return;
+
+ bitmap_clear(rsrc->bmap, start, nrsrc);
+}
+
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return false;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return false;
+
+ return true;
+}
+
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return;
+
+ __clear_bit(id, rsrc->bmap);
+}
+
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
+{
+ int used;
+
+ if (!rsrc->bmap)
+ return 0;
+
+ used = bitmap_weight(rsrc->bmap, rsrc->max);
+ return (rsrc->max - used);
+}
+
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return false;
+
+ return !test_bit(id, rsrc->bmap);
+}
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
+{
+ rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
+ sizeof(long), GFP_KERNEL);
+ if (!rsrc->bmap)
+ return -ENOMEM;
+ return 0;
+}
+
+void rvu_free_bitmap(struct rsrc_bmap *rsrc)
+{
+ kfree(rsrc->bmap);
+}
+
+/* Get block LF's HW index from a PF_FUNC's block slot number */
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
+{
+ u16 match = 0;
+ int lf;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (lf = 0; lf < block->lf.max; lf++) {
+ if (block->fn_map[lf] == pcifunc) {
+ if (slot == match) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return lf;
+ }
+ match++;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return -ENODEV;
+}
+
+/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
+ * Some silicon variants of OcteonTX2 supports
+ * multiple blocks of same type.
+ *
+ * @pcifunc has to be zero when no LF is yet attached.
+ *
+ * For a pcifunc if LFs are attached from multiple blocks of same type, then
+ * return blkaddr of first encountered block.
+ */
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
+{
+ int devnum, blkaddr = -ENODEV;
+ u64 cfg, reg;
+ bool is_pf;
+
+ switch (blktype) {
+ case BLKTYPE_NPC:
+ blkaddr = BLKADDR_NPC;
+ goto exit;
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ goto exit;
+ case BLKTYPE_NIX:
+ /* For now assume NIX0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+ break;
+ case BLKTYPE_SSO:
+ blkaddr = BLKADDR_SSO;
+ goto exit;
+ case BLKTYPE_SSOW:
+ blkaddr = BLKADDR_SSOW;
+ goto exit;
+ case BLKTYPE_TIM:
+ blkaddr = BLKADDR_TIM;
+ goto exit;
+ case BLKTYPE_CPT:
+ /* For now assume CPT0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+ break;
+ }
+
+ /* Check if this is a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
+ * 'BLKADDR_NIX1'.
+ */
+ if (blktype == BLKTYPE_NIX) {
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
+ RVU_PRIV_HWVFX_NIXX_CFG(0);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg) {
+ blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
+ RVU_PRIV_HWVFX_NIXX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_NIX1;
+ }
+
+ if (blktype == BLKTYPE_CPT) {
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
+ RVU_PRIV_HWVFX_CPTX_CFG(0);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg) {
+ blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
+ RVU_PRIV_HWVFX_CPTX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_CPT1;
+ }
+
+exit:
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+ return -ENODEV;
+}
+
+static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, u16 pcifunc,
+ u16 lf, bool attach)
+{
+ int devnum, num_lfs = 0;
+ bool is_pf;
+ u64 reg;
+
+ if (lf >= block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
+ __func__, lf, block->name, block->lf.max);
+ return;
+ }
+
+ /* Check if this is for a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ block->fn_map[lf] = attach ? pcifunc : 0;
+
+ switch (block->addr) {
+ case BLKADDR_NPA:
+ pfvf->npalf = attach ? true : false;
+ num_lfs = pfvf->npalf;
+ break;
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
+ pfvf->nixlf = attach ? true : false;
+ num_lfs = pfvf->nixlf;
+ break;
+ case BLKADDR_SSO:
+ attach ? pfvf->sso++ : pfvf->sso--;
+ num_lfs = pfvf->sso;
+ break;
+ case BLKADDR_SSOW:
+ attach ? pfvf->ssow++ : pfvf->ssow--;
+ num_lfs = pfvf->ssow;
+ break;
+ case BLKADDR_TIM:
+ attach ? pfvf->timlfs++ : pfvf->timlfs--;
+ num_lfs = pfvf->timlfs;
+ break;
+ case BLKADDR_CPT0:
+ attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
+ num_lfs = pfvf->cptlfs;
+ break;
+ case BLKADDR_CPT1:
+ attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
+ num_lfs = pfvf->cpt1_lfs;
+ break;
+ }
+
+ reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
+ rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
+}
+
+inline int rvu_get_pf(u16 pcifunc)
+{
+ return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
+{
+ u64 cfg;
+
+ /* Get numVFs attached to this PF and first HWVF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ if (numvfs)
+ *numvfs = (cfg >> 12) & 0xFF;
+ if (hwvf)
+ *hwvf = cfg & 0xFFF;
+}
+
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+{
+ int pf, func;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ func = pcifunc & RVU_PFVF_FUNC_MASK;
+
+ /* Get first HWVF attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+
+ return ((cfg & 0xFFF) + func - 1);
+}
+
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
+{
+ /* Check if it is a PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ return &rvu->pf[rvu_get_pf(pcifunc)];
+}
+
+static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
+{
+ int pf, vf, nvfs;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ if (pf >= rvu->hw->total_pfs)
+ return false;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return true;
+
+ /* Check if VF is within number of VFs attached to this PF */
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ nvfs = (cfg >> 12) & 0xFF;
+ if (vf >= nvfs)
+ return false;
+
+ return true;
+}
+
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
+{
+ struct rvu_block *block;
+
+ if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
+ return false;
+
+ block = &hw->block[blkaddr];
+ return block->implemented;
+}
+
+static void rvu_check_block_implemented(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* For each block check if 'implemented' bit is set */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
+ if (cfg & BIT_ULL(11))
+ block->implemented = true;
+ }
+}
+
+static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
+ RVU_BLK_RVUM_REVID);
+}
+
+static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
+}
+
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
+{
+ int err;
+
+ if (!block->implemented)
+ return 0;
+
+ rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
+ err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
+ true);
+ return err;
+}
+
+static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
+{
+ struct rvu_block *block = &rvu->hw->block[blkaddr];
+ int err;
+
+ if (!block->implemented)
+ return;
+
+ rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+ if (err) {
+ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
+ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
+ ;
+ }
+}
+
+static void rvu_reset_all_blocks(struct rvu *rvu)
+{
+ /* Do a HW reset of all RVU blocks */
+ rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
+}
+
+static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
+{
+ struct rvu_pfvf *pfvf;
+ u64 cfg;
+ int lf;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ cfg = rvu_read64(rvu, block->addr,
+ block->lfcfg_reg | (lf << block->lfshift));
+ if (!(cfg & BIT_ULL(63)))
+ continue;
+
+ /* Set this resource as being used */
+ __set_bit(lf, block->lf.bmap);
+
+ /* Get, to whom this LF is attached */
+ pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ (cfg >> 8) & 0xFFFF, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
+{
+ int min_vecs;
+
+ if (!vf)
+ goto check_pf;
+
+ if (!nvecs) {
+ dev_warn(rvu->dev,
+ "PF%d:VF%d is configured with zero msix vectors, %d\n",
+ pf, vf - 1, nvecs);
+ }
+ return;
+
+check_pf:
+ if (pf == 0)
+ min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
+ else
+ min_vecs = RVU_PF_INT_VEC_CNT;
+
+ if (!(nvecs < min_vecs))
+ return;
+ dev_warn(rvu->dev,
+ "PF%d is configured with too few vectors, %d, min is %d\n",
+ pf, nvecs, min_vecs);
+}
+
+static int rvu_setup_msix_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf, err;
+ int nvecs, offset, max_msix;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, phy_addr;
+ dma_addr_t iova;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ pfvf = &rvu->pf[pf];
+ /* Get num of MSIX vectors attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
+
+ /* Alloc msix bitmap for this PF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ /* Allocate memory for MSIX vector to RVU block LF mapping */
+ pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* For PF0 (AF) firmware will set msix vector offsets for
+ * AF, block AF and PF0_INT vectors, so jump to VFs.
+ */
+ if (!pf)
+ goto setup_vfmsix;
+
+ /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
+setup_vfmsix:
+ /* Alloc msix bitmap for VFs */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ /* Get num of MSIX vectors attached to this VF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = (cfg & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
+
+ /* Alloc msix bitmap for this VF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ pfvf->msix_lfmap =
+ devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
+ cfg | offset);
+ }
+ }
+
+ /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
+ * create an IOMMU mapping for the physical address configured by
+ * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ if (rvu->fwdata && rvu->fwdata->msixtr_base)
+ phy_addr = rvu->fwdata->msixtr_base;
+ else
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+
+ iova = dma_map_resource(rvu->dev, phy_addr,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+
+ if (dma_mapping_error(rvu->dev, iova))
+ return -ENOMEM;
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
+ rvu->msix_base_iova = iova;
+ rvu->msixtr_base_phy = phy_addr;
+
+ return 0;
+}
+
+static void rvu_reset_msix(struct rvu *rvu)
+{
+ /* Restore msixtr base register */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
+ rvu->msixtr_base_phy);
+}
+
+static void rvu_free_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int id, max_msix;
+ u64 cfg;
+
+ rvu_npa_freemem(rvu);
+ rvu_npc_freemem(rvu);
+ rvu_nix_freemem(rvu);
+
+ /* Free block LF bitmaps */
+ for (id = 0; id < BLK_COUNT; id++) {
+ block = &hw->block[id];
+ kfree(block->lf.bmap);
+ }
+
+ /* Free MSIX bitmaps */
+ for (id = 0; id < hw->total_pfs; id++) {
+ pfvf = &rvu->pf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ for (id = 0; id < hw->total_vfs; id++) {
+ pfvf = &rvu->hwvf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ /* Unmap MSIX vector base IOVA mapping */
+ if (!rvu->msix_base_iova)
+ return;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+
+ rvu_reset_msix(rvu);
+ mutex_destroy(&rvu->rsrc_lock);
+}
+
+static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf;
+ struct rvu_pfvf *pfvf;
+ u64 *mac;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
+ if (!pf)
+ goto lbkvf;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+ /* Assign MAC address to PF */
+ pfvf = &rvu->pf[pf];
+ if (rvu->fwdata && pf < PF_MACNUM_MAX) {
+ mac = &rvu->fwdata->pf_macs[pf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
+
+lbkvf:
+ /* Assign MAC address to VFs*/
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ pfvf = &rvu->hwvf[hwvf];
+ if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
+ mac = &rvu->fwdata->vf_macs[hwvf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
+ }
+ }
+}
+
+static int rvu_fwdata_init(struct rvu *rvu)
+{
+ u64 fwdbase;
+ int err;
+
+ /* Get firmware data base address */
+ err = cgx_get_fwdata_base(&fwdbase);
+ if (err)
+ goto fail;
+ rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
+ if (!rvu->fwdata)
+ goto fail;
+ if (!is_rvu_fwdata_valid(rvu)) {
+ dev_err(rvu->dev,
+ "Mismatch in 'fwdata' struct btw kernel and firmware\n");
+ iounmap(rvu->fwdata);
+ rvu->fwdata = NULL;
+ return -EINVAL;
+ }
+ return 0;
+fail:
+ dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
+ return -EIO;
+}
+
+static void rvu_fwdata_exit(struct rvu *rvu)
+{
+ if (rvu->fwdata)
+ iounmap(rvu->fwdata);
+}
+
+static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init NIX LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ block->lf.max = cfg & 0xFFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_NIX;
+ block->lfshift = 8;
+ block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
+ block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+ block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NIX_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "NIX%d", blkid);
+ rvu->nix_blkaddr[blkid] = blkaddr;
+ return rvu_alloc_bitmap(&block->lf);
+}
+
+static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init CPT LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ block->lf.max = cfg & 0xFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_CPT;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
+ block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+ block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = CPT_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "CPT%d", blkid);
+ return rvu_alloc_bitmap(&block->lf);
+}
+
+static void rvu_get_lbk_bufsize(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+
+ /* cache fifo size */
+ rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
+
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+}
+
+static int rvu_setup_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid, err;
+ u64 cfg;
+
+ /* Get HW supported max RVU PF & VF count */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ hw->total_pfs = (cfg >> 32) & 0xFF;
+ hw->total_vfs = (cfg >> 20) & 0xFFF;
+ hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+
+ /* Init NPA LF's bitmap */
+ block = &hw->block[BLKADDR_NPA];
+ if (!block->implemented)
+ goto nix;
+ cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
+ block->lf.max = (cfg >> 16) & 0xFFF;
+ block->addr = BLKADDR_NPA;
+ block->type = BLKTYPE_NPA;
+ block->lfshift = 8;
+ block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
+ block->lfcfg_reg = NPA_PRIV_LFX_CFG;
+ block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NPA_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "NPA");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NPA LF bitmap\n", __func__);
+ return err;
+ }
+
+nix:
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
+ return err;
+ }
+
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
+ return err;
+ }
+
+ /* Init SSO group's bitmap */
+ block = &hw->block[BLKADDR_SSO];
+ if (!block->implemented)
+ goto ssow;
+ cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_SSO;
+ block->type = BLKTYPE_SSO;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
+ block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
+ block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
+ block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "SSO GROUP");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate SSO LF bitmap\n", __func__);
+ return err;
+ }
+
+ssow:
+ /* Init SSO workslot's bitmap */
+ block = &hw->block[BLKADDR_SSOW];
+ if (!block->implemented)
+ goto tim;
+ block->lf.max = (cfg >> 56) & 0xFF;
+ block->addr = BLKADDR_SSOW;
+ block->type = BLKTYPE_SSOW;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
+ block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
+ block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
+ block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "SSOWS");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate SSOW LF bitmap\n", __func__);
+ return err;
+ }
+
+tim:
+ /* Init TIM LF's bitmap */
+ block = &hw->block[BLKADDR_TIM];
+ if (!block->implemented)
+ goto cpt;
+ cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_TIM;
+ block->type = BLKTYPE_TIM;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
+ block->lfcfg_reg = TIM_PRIV_LFX_CFG;
+ block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = TIM_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "TIM");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate TIM LF bitmap\n", __func__);
+ return err;
+ }
+
+cpt:
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
+ return err;
+ }
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
+ return err;
+ }
+
+ /* Allocate memory for PFVF data */
+ rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->pf) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
+ return -ENOMEM;
+ }
+
+ rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->hwvf) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_init(&rvu->rsrc_lock);
+
+ rvu_fwdata_init(rvu);
+
+ err = rvu_setup_msix_resources(rvu);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to setup MSIX resources\n", __func__);
+ return err;
+ }
+
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+
+ /* Allocate memory for block LF/slot to pcifunc mapping info */
+ block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!block->fn_map) {
+ err = -ENOMEM;
+ goto msix_err;
+ }
+
+ /* Scan all blocks to check if low level firmware has
+ * already provisioned any of the resources to a PF/VF.
+ */
+ rvu_scan_block(rvu, block);
+ }
+
+ err = rvu_set_channels_base(rvu);
+ if (err)
+ goto msix_err;
+
+ err = rvu_npc_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
+ goto npc_err;
+ }
+
+ err = rvu_cgx_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
+ goto cgx_err;
+ }
+
+ err = rvu_npc_exact_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "failed to initialize exact match table\n");
+ return err;
+ }
+
+ /* Assign MACs for CGX mapped functions */
+ rvu_setup_pfvf_macaddress(rvu);
+
+ err = rvu_npa_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
+ goto npa_err;
+ }
+
+ rvu_get_lbk_bufsize(rvu);
+
+ err = rvu_nix_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
+ goto nix_err;
+ }
+
+ err = rvu_sdp_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
+ goto nix_err;
+ }
+
+ rvu_program_channels(rvu);
+
+ err = rvu_mcs_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
+ goto nix_err;
+ }
+
+ err = rvu_cpt_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
+ goto mcs_err;
+ }
+
+ return 0;
+
+mcs_err:
+ rvu_mcs_exit(rvu);
+nix_err:
+ rvu_nix_freemem(rvu);
+npa_err:
+ rvu_npa_freemem(rvu);
+cgx_err:
+ rvu_cgx_exit(rvu);
+npc_err:
+ rvu_npc_freemem(rvu);
+ rvu_fwdata_exit(rvu);
+msix_err:
+ rvu_reset_msix(rvu);
+ return err;
+}
+
+/* NPA and NIX admin queue APIs */
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
+{
+ if (!aq)
+ return;
+
+ qmem_free(rvu->dev, aq->inst);
+ qmem_free(rvu->dev, aq->res);
+ devm_kfree(rvu->dev, aq);
+}
+
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size)
+{
+ struct admin_queue *aq;
+ int err;
+
+ *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
+ if (!*ad_queue)
+ return -ENOMEM;
+ aq = *ad_queue;
+
+ /* Alloc memory for instructions i.e AQ */
+ err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
+ if (err) {
+ devm_kfree(rvu->dev, aq);
+ return err;
+ }
+
+ /* Alloc memory for results */
+ err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
+ if (err) {
+ rvu_aq_free(rvu, aq);
+ return err;
+ }
+
+ spin_lock_init(&aq->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
+ struct ready_msg_rsp *rsp)
+{
+ if (rvu->fwdata) {
+ rsp->rclk_freq = rvu->fwdata->rclk;
+ rsp->sclk_freq = rvu->fwdata->sclk;
+ }
+ return 0;
+}
+
+/* Get current count of a RVU block's LF/slots
+ * provisioned to a given RVU func.
+ */
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
+{
+ switch (blkaddr) {
+ case BLKADDR_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKADDR_SSO:
+ return pfvf->sso;
+ case BLKADDR_SSOW:
+ return pfvf->ssow;
+ case BLKADDR_TIM:
+ return pfvf->timlfs;
+ case BLKADDR_CPT0:
+ return pfvf->cptlfs;
+ case BLKADDR_CPT1:
+ return pfvf->cpt1_lfs;
+ }
+ return 0;
+}
+
+/* Return true if LFs of block type are attached to pcifunc */
+static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
+{
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKTYPE_NIX:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKTYPE_SSO:
+ return !!pfvf->sso;
+ case BLKTYPE_SSOW:
+ return !!pfvf->ssow;
+ case BLKTYPE_TIM:
+ return !!pfvf->timlfs;
+ case BLKTYPE_CPT:
+ return pfvf->cptlfs || pfvf->cpt1_lfs;
+ }
+
+ return false;
+}
+
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf;
+
+ if (!is_pf_func_valid(rvu, pcifunc))
+ return false;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Check if this PFFUNC has a LF of type blktype attached */
+ if (!is_blktype_attached(pfvf, blktype))
+ return false;
+
+ return true;
+}
+
+static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ int pcifunc, int slot)
+{
+ u64 val;
+
+ val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
+ rvu_write64(rvu, block->addr, block->lookup_reg, val);
+ /* Wait for the lookup to finish */
+ /* TODO: put some timeout here */
+ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
+ ;
+
+ val = rvu_read64(rvu, block->addr, block->lookup_reg);
+
+ /* Check LF valid bit */
+ if (!(val & (1ULL << 12)))
+ return -1;
+
+ return (val & 0xFFF);
+}
+
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int numlfs, total_lfs = 0, nr_blocks = 0;
+ int i, num_blkaddr[BLK_COUNT] = { 0 };
+ struct rvu_block *block;
+ int blkaddr;
+ u16 start_slot;
+
+ if (!is_blktype_attached(pfvf, blktype))
+ return -ENODEV;
+
+ /* Get all the block addresses from which LFs are attached to
+ * the given pcifunc in num_blkaddr[].
+ */
+ for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
+ block = &rvu->hw->block[blkaddr];
+ if (block->type != blktype)
+ continue;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ continue;
+
+ numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
+ if (numlfs) {
+ total_lfs += numlfs;
+ num_blkaddr[nr_blocks] = blkaddr;
+ nr_blocks++;
+ }
+ }
+
+ if (global_slot >= total_lfs)
+ return -ENODEV;
+
+ /* Based on the given global slot number retrieve the
+ * correct block address out of all attached block
+ * addresses and slot number in that block.
+ */
+ total_lfs = 0;
+ blkaddr = -ENODEV;
+ for (i = 0; i < nr_blocks; i++) {
+ numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
+ total_lfs += numlfs;
+ if (global_slot < total_lfs) {
+ blkaddr = num_blkaddr[i];
+ start_slot = total_lfs - numlfs;
+ *slot_in_block = global_slot - start_slot;
+ break;
+ }
+ }
+
+ return blkaddr;
+}
+
+static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ if (blktype == BLKTYPE_NIX)
+ rvu_nix_reset_mac(pfvf, pcifunc);
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ if (!num_lfs)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
+ if (lf < 0) /* This should never happen */
+ continue;
+
+ /* Disable the LF */
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), 0x00ULL);
+
+ /* Update SW maintained mapping info as well */
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, false);
+
+ /* Free the resource */
+ rvu_free_rsrc(&block->lf, lf);
+
+ /* Clear MSIX vector offset for this LF */
+ rvu_clear_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
+ u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ bool detach_all = true;
+ struct rvu_block *block;
+ int blkid;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ /* Check for partial resource detach */
+ if (detach && detach->partial)
+ detach_all = false;
+
+ /* Check for RVU block's LFs attached to this func,
+ * if so, detach them.
+ */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+ if (!detach_all && detach) {
+ if (blkid == BLKADDR_NPA && !detach->npalf)
+ continue;
+ else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
+ continue;
+ else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
+ continue;
+ else if ((blkid == BLKADDR_SSO) && !detach->sso)
+ continue;
+ else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
+ continue;
+ else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
+ continue;
+ else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
+ continue;
+ else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
+ continue;
+ }
+ rvu_detach_block(rvu, pcifunc, block->type);
+ }
+
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_detach_resources(struct rvu *rvu,
+ struct rsrc_detach *detach,
+ struct msg_rsp *rsp)
+{
+ return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
+}
+
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr = BLKADDR_NIX0, vf;
+ struct rvu_pfvf *pf;
+
+ pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ /* All CGX mapped PFs are set with assigned NIX block during init */
+ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ blkaddr = pf->nix_blkaddr;
+ } else if (is_afvf(pcifunc)) {
+ vf = pcifunc - 1;
+ /* Assign NIX based on VF number. All even numbered VFs get
+ * NIX0 and odd numbered gets NIX1
+ */
+ blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
+ /* NIX1 is not present on all silicons */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ blkaddr = BLKADDR_NIX0;
+ }
+
+ /* if SDP1 then the blkaddr is NIX1 */
+ if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
+ blkaddr = BLKADDR_NIX1;
+
+ switch (blkaddr) {
+ case BLKADDR_NIX1:
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(1);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(1);
+ break;
+ case BLKADDR_NIX0:
+ default:
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(0);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(0);
+ break;
+ }
+
+ return pfvf->nix_blkaddr;
+}
+
+static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
+ u16 pcifunc, struct rsrc_attach *attach)
+{
+ int blkaddr;
+
+ switch (blktype) {
+ case BLKTYPE_NIX:
+ blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
+ break;
+ case BLKTYPE_CPT:
+ if (attach->hdr.ver < RVU_MULTI_BLK_VER)
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
+ BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -ENODEV;
+ break;
+ default:
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ }
+
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+
+ return -ENODEV;
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
+ int num_lfs, struct rsrc_attach *attach)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf;
+ int blkaddr;
+ u64 cfg;
+
+ if (!num_lfs)
+ return;
+
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ /* Allocate the resource */
+ lf = rvu_alloc_rsrc(&block->lf);
+ if (lf < 0)
+ return;
+
+ cfg = (1ULL << 63) | (pcifunc << 8) | slot;
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), cfg);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_check_rsrc_availability(struct rvu *rvu,
+ struct rsrc_attach *req, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int free_lfs, mappedlfs, blkaddr;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+
+ /* Only one NPA LF can be attached */
+ if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
+ block = &hw->block[BLKADDR_NPA];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->npalf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NPA\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ /* Only one NIX LF can be attached */
+ if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->nixlf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NIX\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ if (req->sso) {
+ block = &hw->block[BLKADDR_SSO];
+ /* Is request within limits ? */
+ if (req->sso > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSO req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ /* Check if additional resources are available */
+ if (req->sso > mappedlfs &&
+ ((req->sso - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->ssow) {
+ block = &hw->block[BLKADDR_SSOW];
+ if (req->ssow > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSOW req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->ssow > mappedlfs &&
+ ((req->ssow - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->timlfs) {
+ block = &hw->block[BLKADDR_TIM];
+ if (req->timlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
+ pcifunc, req->timlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->timlfs > mappedlfs &&
+ ((req->timlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->cptlfs) {
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
+ if (req->cptlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
+ pcifunc, req->cptlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->cptlfs > mappedlfs &&
+ ((req->cptlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_info(rvu->dev, "Request for %s failed\n", block->name);
+ return -ENOSPC;
+}
+
+static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
+ struct rsrc_attach *attach)
+{
+ int blkaddr, num_lfs;
+
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
+ attach->hdr.pcifunc, attach);
+ if (blkaddr < 0)
+ return false;
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
+ blkaddr);
+ /* Requester already has LFs from given block ? */
+ return !!num_lfs;
+}
+
+int rvu_mbox_handler_attach_resources(struct rvu *rvu,
+ struct rsrc_attach *attach,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = attach->hdr.pcifunc;
+ int err;
+
+ /* If first request, detach all existing attached resources */
+ if (!attach->modify)
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ /* Check if the request can be accommodated */
+ err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
+ if (err)
+ goto exit;
+
+ /* Now attach the requested resources */
+ if (attach->npalf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
+
+ if (attach->nixlf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
+
+ if (attach->sso) {
+ /* RVU func doesn't know which exact LF or slot is attached
+ * to it, it always sees as slot 0,1,2. So for a 'modify'
+ * request, simply detach all existing attached LFs/slots
+ * and attach a fresh.
+ */
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
+ attach->sso, attach);
+ }
+
+ if (attach->ssow) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
+ attach->ssow, attach);
+ }
+
+ if (attach->timlfs) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
+ attach->timlfs, attach);
+ }
+
+ if (attach->cptlfs) {
+ if (attach->modify &&
+ rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
+ attach->cptlfs, attach);
+ }
+
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return err;
+}
+
+static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int lf)
+{
+ u16 vec;
+
+ if (lf < 0)
+ return MSIX_VECTOR_INVALID;
+
+ for (vec = 0; vec < pfvf->msix.max; vec++) {
+ if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
+ return vec;
+ }
+ return MSIX_VECTOR_INVALID;
+}
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Check and alloc MSIX vectors, must be contiguous */
+ if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
+ return;
+
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+
+ /* Config MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
+
+ /* Update the bitmap as well */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
+}
+
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Clear MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), cfg & ~0x7FFULL);
+
+ offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
+
+ /* Update the mapping */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = 0;
+
+ /* Free the same in MSIX bitmap */
+ rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
+}
+
+int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
+ struct msix_offset_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int lf, slot, blkaddr;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->msix.bmap)
+ return 0;
+
+ /* Set MSIX offsets for each block's LFs attached to this PF/VF */
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
+ rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
+
+ /* Get BLKADDR from which LFs are attached to pcifunc */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0) {
+ rsp->nix_msixoff = MSIX_VECTOR_INVALID;
+ } else {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
+ }
+
+ rsp->sso = pfvf->sso;
+ for (slot = 0; slot < rsp->sso; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
+ rsp->sso_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
+ }
+
+ rsp->ssow = pfvf->ssow;
+ for (slot = 0; slot < rsp->ssow; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
+ rsp->ssow_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
+ }
+
+ rsp->timlfs = pfvf->timlfs;
+ for (slot = 0; slot < rsp->timlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
+ rsp->timlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
+ }
+
+ rsp->cptlfs = pfvf->cptlfs;
+ for (slot = 0; slot < rsp->cptlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
+ rsp->cptlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
+ }
+
+ rsp->cpt1_lfs = pfvf->cpt1_lfs;
+ for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
+ rsp->cpt1_lf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
+ struct free_rsrcs_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ block = &hw->block[BLKADDR_NPA];
+ rsp->npa = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_NIX0];
+ rsp->nix = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_NIX1];
+ rsp->nix1 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_SSO];
+ rsp->sso = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_SSOW];
+ rsp->ssow = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_TIM];
+ rsp->tim = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_CPT0];
+ rsp->cpt = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_CPT1];
+ rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping) {
+ rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
+ /* NIX1 */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
+ } else {
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ rsp->schq[NIX_TXSCH_LVL_SMQ] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
+ rsp->schq[NIX_TXSCH_LVL_TL4] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
+ rsp->schq[NIX_TXSCH_LVL_TL3] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rsp->schq[NIX_TXSCH_LVL_TL2] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
+ rvu_rsrc_free_count(&txsch->schq);
+ }
+
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
+out:
+ rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
+
+int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 vf, numvfs;
+ u64 cfg;
+
+ vf = pcifunc & RVU_PFVF_FUNC_MASK;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+ numvfs = (cfg >> 12) & 0xFF;
+
+ if (vf && vf <= numvfs)
+ __rvu_flr_handler(rvu, pcifunc);
+ else
+ return RVU_INVALID_VF_ID;
+
+ return 0;
+}
+
+int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
+ struct get_hw_cap_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
+ rsp->nix_shaping = hw->cap.nix_shaping;
+ rsp->npc_hash_extract = hw->cap.npc_hash_extract;
+
+ return 0;
+}
+
+int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+ u16 target;
+
+ /* Only PF can add VF permissions */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ return -EOPNOTSUPP;
+
+ target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ if (req->flags & RESET_VF_PERM) {
+ pfvf->flags &= RVU_CLEAR_VF_PERM;
+ } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
+ (req->flags & VF_TRUSTED)) {
+ change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
+ /* disable multicast and promisc entries */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
+ if (blkaddr < 0)
+ return 0;
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ target, 0);
+ if (nixlf < 0)
+ return 0;
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_ALLMULTI_ENTRY,
+ false);
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_PROMISC_ENTRY,
+ false);
+ }
+ }
+
+ return 0;
+}
+
+static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *req)
+{
+ struct rvu *rvu = pci_get_drvdata(mbox->pdev);
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ goto bad_message;
+
+ switch (req->id) {
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ mbox, devid, \
+ sizeof(struct _rsp_type)); \
+ /* some handlers should complete even if reply */ \
+ /* could not be allocated */ \
+ if (!rsp && \
+ _id != MBOX_MSG_DETACH_RESOURCES && \
+ _id != MBOX_MSG_NIX_TXSCH_FREE && \
+ _id != MBOX_MSG_VF_FLR) \
+ return -ENOMEM; \
+ if (rsp) { \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = req->pcifunc; \
+ rsp->hdr.rc = 0; \
+ } \
+ \
+ err = rvu_mbox_handler_ ## _fn_name(rvu, \
+ (struct _req_type *)req, \
+ rsp); \
+ if (rsp && err) \
+ rsp->hdr.rc = err; \
+ \
+ trace_otx2_msg_process(mbox->pdev, _id, err); \
+ return rsp ? err : -ENOMEM; \
+ }
+MBOX_MESSAGES
+#undef M
+
+bad_message:
+ default:
+ otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
+ return -ENODEV;
+ }
+}
+
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+{
+ struct rvu *rvu = mwork->rvu;
+ int offset, err, id, devid;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct mbox_wq_info *mw;
+ struct otx2_mbox *mbox;
+
+ switch (type) {
+ case TYPE_AFPF:
+ mw = &rvu->afpf_wq_info;
+ break;
+ case TYPE_AFVF:
+ mw = &rvu->afvf_wq_info;
+ break;
+ default:
+ return;
+ }
+
+ devid = mwork - mw->mbox_wrk;
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[devid];
+
+ /* Process received mbox messages */
+ req_hdr = mdev->mbase + mbox->rx_start;
+ if (mw->mbox_wrk[devid].num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ /* Set which PF/VF sent this message based on mbox IRQ */
+ switch (type) {
+ case TYPE_AFPF:
+ msg->pcifunc &=
+ ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+ break;
+ case TYPE_AFVF:
+ msg->pcifunc &=
+ ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
+ msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
+ break;
+ }
+
+ err = rvu_process_mbox_msg(mbox, devid, msg);
+ if (!err) {
+ offset = mbox->rx_start + msg->next_msgoff;
+ continue;
+ }
+
+ if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
+ err, otx2_mbox_id2name(msg->id),
+ msg->id, rvu_get_pf(msg->pcifunc),
+ (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ else
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
+ err, otx2_mbox_id2name(msg->id),
+ msg->id, devid);
+ }
+ mw->mbox_wrk[devid].num_msgs = 0;
+
+ /* Send mbox responses to VF/PF */
+ otx2_mbox_msg_send(mbox, devid);
+}
+
+static inline void rvu_afpf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_handler(mwork, TYPE_AFVF);
+}
+
+static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
+{
+ struct rvu *rvu = mwork->rvu;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct mbox_wq_info *mw;
+ struct otx2_mbox *mbox;
+ int offset, id, devid;
+
+ switch (type) {
+ case TYPE_AFPF:
+ mw = &rvu->afpf_wq_info;
+ break;
+ case TYPE_AFVF:
+ mw = &rvu->afvf_wq_info;
+ break;
+ default:
+ return;
+ }
+
+ devid = mwork - mw->mbox_wrk_up;
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[devid];
+
+ rsp_hdr = mdev->mbase + mbox->rx_start;
+ if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
+ dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
+ return;
+ }
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(rvu->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(rvu->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ break;
+ default:
+ if (msg->rc)
+ dev_err(rvu->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+end:
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+ mw->mbox_wrk_up[devid].up_num_msgs = 0;
+
+ otx2_mbox_reset(mbox, devid);
+}
+
+static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_up_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_up_handler(mwork, TYPE_AFVF);
+}
+
+static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type, unsigned long *pf_bmap)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int region;
+ u64 bar4;
+
+ /* For cn10k platform VF mailbox regions of a PF follows after the
+ * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
+ * RVU_PF_VF_BAR4_ADDR register.
+ */
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(0)) +
+ MBOX_SIZE;
+ bar4 += region * MBOX_SIZE;
+ } else {
+ bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+ }
+
+ /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
+ * PF registers. Whereas for Octeontx2 it is read from
+ * RVU_AF_PF_BAR4_ADDR register.
+ */
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(region));
+ } else {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+
+error:
+ while (region--)
+ iounmap((void __iomem *)mbox_addr[region]);
+ return -ENOMEM;
+}
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int type, int num,
+ void (mbox_handler)(struct work_struct *),
+ void (mbox_up_handler)(struct work_struct *))
+{
+ int err = -EINVAL, i, dir, dir_up;
+ void __iomem *reg_base;
+ struct rvu_work *mwork;
+ unsigned long *pf_bmap;
+ void **mbox_regions;
+ const char *name;
+ u64 cfg;
+
+ pf_bmap = bitmap_zalloc(num, GFP_KERNEL);
+ if (!pf_bmap)
+ return -ENOMEM;
+
+ /* RVU VFs */
+ if (type == TYPE_AFVF)
+ bitmap_set(pf_bmap, 0, num);
+
+ if (type == TYPE_AFPF) {
+ /* Mark enabled PFs in bitmap */
+ for (i = 0; i < num; i++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
+ if (cfg & BIT_ULL(20))
+ set_bit(i, pf_bmap);
+ }
+ }
+
+ mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ if (!mbox_regions) {
+ err = -ENOMEM;
+ goto free_bitmap;
+ }
+
+ switch (type) {
+ case TYPE_AFPF:
+ name = "rvu_afpf_mailbox";
+ dir = MBOX_DIR_AFPF;
+ dir_up = MBOX_DIR_AFPF_UP;
+ reg_base = rvu->afreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
+ if (err)
+ goto free_regions;
+ break;
+ case TYPE_AFVF:
+ name = "rvu_afvf_mailbox";
+ dir = MBOX_DIR_PFVF;
+ dir_up = MBOX_DIR_PFVF_UP;
+ reg_base = rvu->pfreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
+ if (err)
+ goto free_regions;
+ break;
+ default:
+ goto free_regions;
+ }
+
+ mw->mbox_wq = alloc_workqueue(name,
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ num);
+ if (!mw->mbox_wq) {
+ err = -ENOMEM;
+ goto unmap_regions;
+ }
+
+ mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!mw->mbox_wrk) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!mw->mbox_wrk_up) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
+ reg_base, dir, num, pf_bmap);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
+ reg_base, dir_up, num, pf_bmap);
+ if (err)
+ goto exit;
+
+ for (i = 0; i < num; i++) {
+ if (!test_bit(i, pf_bmap))
+ continue;
+
+ mwork = &mw->mbox_wrk[i];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, mbox_handler);
+
+ mwork = &mw->mbox_wrk_up[i];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, mbox_up_handler);
+ }
+ goto free_regions;
+
+exit:
+ destroy_workqueue(mw->mbox_wq);
+unmap_regions:
+ while (num--)
+ iounmap((void __iomem *)mbox_regions[num]);
+free_regions:
+ kfree(mbox_regions);
+free_bitmap:
+ bitmap_free(pf_bmap);
+ return err;
+}
+
+static void rvu_mbox_destroy(struct mbox_wq_info *mw)
+{
+ struct otx2_mbox *mbox = &mw->mbox;
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
+ if (mw->mbox_wq) {
+ destroy_workqueue(mw->mbox_wq);
+ mw->mbox_wq = NULL;
+ }
+
+ for (devid = 0; devid < mbox->ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ if (mdev->hwbase)
+ iounmap((void __iomem *)mdev->hwbase);
+ }
+
+ otx2_mbox_destroy(&mw->mbox);
+ otx2_mbox_destroy(&mw->mbox_up);
+}
+
+static void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr)
+{
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ int i;
+
+ for (i = first; i < mdevs; i++) {
+ /* start from 0 */
+ if (!(intr & BIT_ULL(i - first)))
+ continue;
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[i];
+ hdr = mdev->mbase + mbox->rx_start;
+
+ /*The hdr->num_msgs is set to zero immediately in the interrupt
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called.
+ * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+ * pf>mbox.up_num_msgs holds the data for use in
+ * pfaf_mbox_up_handler.
+ */
+
+ if (hdr->num_msgs) {
+ mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+ }
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[i];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs) {
+ mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
+ }
+ }
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfs = rvu->vfs;
+ u64 intr;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
+ /* Clear interrupts */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+
+ /* Handle VF interrupts */
+ if (vfs > 64) {
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
+
+ rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
+ vfs -= 64;
+ }
+
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
+
+ rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_enable_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Clear spurious irqs, if any */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
+
+ /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+}
+
+static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
+{
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int err;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return;
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (lf < 0)
+ continue;
+
+ /* Cleanup LF and reset it */
+ if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
+ rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
+ else if (block->addr == BLKADDR_NPA)
+ rvu_npa_lf_teardown(rvu, pcifunc, lf);
+ else if ((block->addr == BLKADDR_CPT0) ||
+ (block->addr == BLKADDR_CPT1))
+ rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
+ slot);
+
+ err = rvu_lf_reset(rvu, block, lf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, lf);
+ }
+ }
+}
+
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_reset(rvu, pcifunc);
+
+ mutex_lock(&rvu->flr_lock);
+ /* Reset order should reflect inter-block dependencies:
+ * 1. Reset any packet/work sources (NIX, CPT, TIM)
+ * 2. Flush and reset SSO/SSOW
+ * 3. Cleanup pools (NPA)
+ */
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_reset_lmt_map_tbl(rvu, pcifunc);
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
+ * entries, check and free the MCAM entries explicitly to avoid leak.
+ * Since LF is detached use LF number as -1.
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+ rvu_mac_reset(rvu, pcifunc);
+
+ if (rvu->mcs_blk_cnt)
+ rvu_mcs_flr_handler(rvu, pcifunc);
+
+ mutex_unlock(&rvu->flr_lock);
+}
+
+static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
+{
+ int reg = 0;
+
+ /* pcifunc = 0(PF0) | (vf + 1) */
+ __rvu_flr_handler(rvu, vf + 1);
+
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+
+ /* Signal FLR finish and enable IRQ */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+}
+
+static void rvu_flr_handler(struct work_struct *work)
+{
+ struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = flrwork->rvu;
+ u16 pcifunc, numvfs, vf;
+ u64 cfg;
+ int pf;
+
+ pf = flrwork - rvu->flr_wrk;
+ if (pf >= rvu->hw->total_pfs) {
+ rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
+ return;
+ }
+
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ numvfs = (cfg >> 12) & 0xFF;
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+
+ for (vf = 0; vf < numvfs; vf++)
+ __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
+
+ __rvu_flr_handler(rvu, pcifunc);
+
+ /* Signal FLR finish */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
+
+ /* Enable interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
+}
+
+static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
+{
+ int dev, vf, reg = 0;
+ u64 intr;
+
+ if (start_vf >= 64)
+ reg = 1;
+
+ intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ return;
+
+ for (vf = 0; vf < numvfs; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ /* Clear and disable the interrupt */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
+
+ dev = vf + start_vf + rvu->hw->total_pfs;
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
+ }
+}
+
+static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
+ if (!intr)
+ goto afvf_flr;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
+ BIT_ULL(pf));
+ /* Disable the interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+ BIT_ULL(pf));
+ /* PF is already dead do only AF related operations */
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
+ }
+ }
+
+afvf_flr:
+ rvu_afvf_queue_flr_work(rvu, 0, 64);
+ if (rvu->vfs > 64)
+ rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
+{
+ int vf;
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (vf = 0; vf < 64; vf++) {
+ if (intr & (1ULL << vf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
+ /* clear interrupt */
+ rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
+ }
+ }
+}
+
+/* Handles ME interrupts from VFs of AF */
+static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfset;
+ u64 intr;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ for (vfset = 0; vfset <= 1; vfset++) {
+ intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
+ if (intr)
+ rvu_me_handle_vfset(rvu, vfset, intr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handles ME interrupts from PFs */
+static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
+ BIT_ULL(pf));
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
+ BIT_ULL(pf));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_unregister_interrupts(struct rvu *rvu)
+{
+ int irq;
+
+ rvu_cpt_unregister_interrupts(rvu);
+
+ /* Disable the Mbox interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Disable the PF FLR interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Disable the PF ME interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ for (irq = 0; irq < rvu->num_vec; irq++) {
+ if (rvu->irq_allocated[irq]) {
+ free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
+ rvu->irq_allocated[irq] = false;
+ }
+ }
+
+ pci_free_irq_vectors(rvu->pdev);
+ rvu->num_vec = 0;
+}
+
+static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[0];
+ int offset;
+
+ pfvf = &rvu->pf[0];
+ offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+ /* Make sure there are enough MSIX vectors configured so that
+ * VF interrupts can be handled. Offset equal to zero means
+ * that PF vectors are not configured and overlapping AF vectors.
+ */
+ return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
+ offset;
+}
+
+static int rvu_register_interrupts(struct rvu *rvu)
+{
+ int ret, offset, pf_vec_start;
+
+ rvu->num_vec = pci_msix_vec_count(rvu->pdev);
+
+ rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
+ NAME_SIZE, GFP_KERNEL);
+ if (!rvu->irq_name)
+ return -ENOMEM;
+
+ rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
+ sizeof(bool), GFP_KERNEL);
+ if (!rvu->irq_allocated)
+ return -ENOMEM;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
+ rvu->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(rvu->dev,
+ "RVUAF: Request for %d msix vectors failed, ret %d\n",
+ rvu->num_vec, ret);
+ return ret;
+ }
+
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox irq\n");
+ goto fail;
+ }
+
+ rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+
+ /* Enable mailbox interrupts from all PFs */
+ rvu_enable_mbox_intr(rvu);
+
+ /* Register FLR interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+ "RVUAF FLR");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for FLR\n");
+ goto fail;
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
+
+ /* Enable FLR interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Register ME interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ "RVUAF ME");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
+ rvu_me_pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for ME\n");
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+
+ /* Clear TRPEND bit for all PF */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
+ /* Enable ME interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ if (!rvu_afvf_msix_vectors_num_ok(rvu))
+ return 0;
+
+ /* Get PF MSIX vectors offset. */
+ pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+ /* Register MBOX0 interrupt. */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox0\n");
+
+ rvu->irq_allocated[offset] = true;
+
+ /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+ * simply increment current offset by 1.
+ */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox1\n");
+
+ rvu->irq_allocated[offset] = true;
+
+ /* Register FLR interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ /* Register ME interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ ret = rvu_cpt_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ rvu_unregister_interrupts(rvu);
+ return ret;
+}
+
+static void rvu_flr_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->flr_wq) {
+ destroy_workqueue(rvu->flr_wq);
+ rvu->flr_wq = NULL;
+ }
+}
+
+static int rvu_flr_init(struct rvu *rvu)
+{
+ int dev, num_devs;
+ u64 cfg;
+ int pf;
+
+ /* Enable FLR for all PFs*/
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
+ cfg | BIT_ULL(22));
+ }
+
+ rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ if (!rvu->flr_wq)
+ return -ENOMEM;
+
+ num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
+ rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->flr_wrk) {
+ destroy_workqueue(rvu->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (dev = 0; dev < num_devs; dev++) {
+ rvu->flr_wrk[dev].rvu = rvu;
+ INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
+ }
+
+ mutex_init(&rvu->flr_lock);
+
+ return 0;
+}
+
+static void rvu_disable_afvf_intr(struct rvu *rvu)
+{
+ int vfs = rvu->vfs;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+ INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+static void rvu_enable_afvf_intr(struct rvu *rvu)
+{
+ int vfs = rvu->vfs;
+
+ /* Clear any pending interrupts and enable AF VF interrupts for
+ * the first 64 VFs.
+ */
+ /* Mbox */
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* FLR */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* Same for remaining VFs, if any. */
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ INTR_MASK(vfs - 64));
+
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+int rvu_get_num_lbk_chans(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ int ret = -EIO;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
+ NULL);
+ if (!pdev)
+ goto err;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ /* Read number of available LBK channels from LBK(0)_CONST register. */
+ ret = (readq(base + 0x10) >> 32) & 0xffff;
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+err:
+ return ret;
+}
+
+static int rvu_enable_sriov(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ int err, chans, vfs;
+
+ if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
+ dev_warn(&pdev->dev,
+ "Skipping SRIOV enablement since not enough IRQs are available\n");
+ return 0;
+ }
+
+ chans = rvu_get_num_lbk_chans();
+ if (chans < 0)
+ return chans;
+
+ vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Limit VFs in case we have more VFs than LBK channels available. */
+ if (vfs > chans)
+ vfs = chans;
+
+ if (!vfs)
+ return 0;
+
+ /* LBK channel number 63 is used for switching packets between
+ * CGX mapped VFs. Hence limit LBK pairs till 62 only.
+ */
+ if (vfs > 62)
+ vfs = 62;
+
+ /* Save VFs number for reference in VF interrupts handlers.
+ * Since interrupts might start arriving during SRIOV enablement
+ * ordinary API cannot be used to get number of enabled VFs.
+ */
+ rvu->vfs = vfs;
+
+ err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
+ rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
+ if (err)
+ return err;
+
+ rvu_enable_afvf_intr(rvu);
+ /* Make sure IRQs are enabled before SRIOV. */
+ mb();
+
+ err = pci_enable_sriov(pdev, vfs);
+ if (err) {
+ rvu_disable_afvf_intr(rvu);
+ rvu_mbox_destroy(&rvu->afvf_wq_info);
+ return err;
+ }
+
+ return 0;
+}
+
+static void rvu_disable_sriov(struct rvu *rvu)
+{
+ rvu_disable_afvf_intr(rvu);
+ rvu_mbox_destroy(&rvu->afvf_wq_info);
+ pci_disable_sriov(rvu->pdev);
+}
+
+static void rvu_update_module_params(struct rvu *rvu)
+{
+ const char *default_pfl_name = "default";
+
+ strscpy(rvu->mkex_pfl_name,
+ mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+ strscpy(rvu->kpu_pfl_name,
+ kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
+}
+
+static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct rvu *rvu;
+ int err;
+
+ rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
+ if (!rvu)
+ return -ENOMEM;
+
+ rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
+ if (!rvu->hw) {
+ devm_kfree(dev, rvu);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, rvu);
+ rvu->pdev = pdev;
+ rvu->dev = &pdev->dev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_freemem;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ rvu->ptp = ptp_get();
+ if (IS_ERR(rvu->ptp)) {
+ err = PTR_ERR(rvu->ptp);
+ if (err)
+ goto err_release_regions;
+ rvu->ptp = NULL;
+ }
+
+ /* Map Admin function CSRs */
+ rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
+ rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!rvu->afreg_base || !rvu->pfreg_base) {
+ dev_err(dev, "Unable to map admin function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_put_ptp;
+ }
+
+ /* Store module params in rvu structure */
+ rvu_update_module_params(rvu);
+
+ /* Check which blocks the HW supports */
+ rvu_check_block_implemented(rvu);
+
+ rvu_reset_all_blocks(rvu);
+
+ rvu_setup_hw_capabilities(rvu);
+
+ err = rvu_setup_hw_resources(rvu);
+ if (err)
+ goto err_put_ptp;
+
+ /* Init mailbox btw AF and PFs */
+ err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
+ rvu->hw->total_pfs, rvu_afpf_mbox_handler,
+ rvu_afpf_mbox_up_handler);
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
+ goto err_hwsetup;
+ }
+
+ err = rvu_flr_init(rvu);
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize flr\n", __func__);
+ goto err_mbox;
+ }
+
+ err = rvu_register_interrupts(rvu);
+ if (err) {
+ dev_err(dev, "%s: Failed to register interrupts\n", __func__);
+ goto err_flr;
+ }
+
+ err = rvu_register_dl(rvu);
+ if (err) {
+ dev_err(dev, "%s: Failed to register devlink\n", __func__);
+ goto err_irq;
+ }
+
+ rvu_setup_rvum_blk_revid(rvu);
+
+ /* Enable AF's VFs (if any) */
+ err = rvu_enable_sriov(rvu);
+ if (err) {
+ dev_err(dev, "%s: Failed to enable sriov\n", __func__);
+ goto err_dl;
+ }
+
+ /* Initialize debugfs */
+ rvu_dbg_init(rvu);
+
+ mutex_init(&rvu->rswitch.switch_lock);
+
+ if (rvu->fwdata)
+ ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
+ rvu->fwdata->ptp_ext_tstamp);
+
+ return 0;
+err_dl:
+ rvu_unregister_dl(rvu);
+err_irq:
+ rvu_unregister_interrupts(rvu);
+err_flr:
+ rvu_flr_wq_destroy(rvu);
+err_mbox:
+ rvu_mbox_destroy(&rvu->afpf_wq_info);
+err_hwsetup:
+ rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+ rvu_clear_rvum_blk_revid(rvu);
+err_put_ptp:
+ ptp_put(rvu->ptp);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_freemem:
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(dev, rvu);
+ return err;
+}
+
+static void rvu_remove(struct pci_dev *pdev)
+{
+ struct rvu *rvu = pci_get_drvdata(pdev);
+
+ rvu_dbg_exit(rvu);
+ rvu_unregister_dl(rvu);
+ rvu_unregister_interrupts(rvu);
+ rvu_flr_wq_destroy(rvu);
+ rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
+ rvu_mbox_destroy(&rvu->afpf_wq_info);
+ rvu_disable_sriov(rvu);
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+ rvu_clear_rvum_blk_revid(rvu);
+ ptp_put(rvu->ptp);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(&pdev->dev, rvu);
+}
+
+static struct pci_driver rvu_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_id_table,
+ .probe = rvu_probe,
+ .remove = rvu_remove,
+};
+
+static int __init rvu_init_module(void)
+{
+ int err;
+
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ err = pci_register_driver(&cgx_driver);
+ if (err < 0)
+ return err;
+
+ err = pci_register_driver(&ptp_driver);
+ if (err < 0)
+ goto ptp_err;
+
+ err = pci_register_driver(&mcs_driver);
+ if (err < 0)
+ goto mcs_err;
+
+ err = pci_register_driver(&rvu_driver);
+ if (err < 0)
+ goto rvu_err;
+
+ return 0;
+rvu_err:
+ pci_unregister_driver(&mcs_driver);
+mcs_err:
+ pci_unregister_driver(&ptp_driver);
+ptp_err:
+ pci_unregister_driver(&cgx_driver);
+
+ return err;
+}
+
+static void __exit rvu_cleanup_module(void)
+{
+ pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&mcs_driver);
+ pci_unregister_driver(&ptp_driver);
+ pci_unregister_driver(&cgx_driver);
+}
+
+module_init(rvu_init_module);
+module_exit(rvu_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
new file mode 100644
index 000000000..8802961b8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -0,0 +1,972 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef RVU_H
+#define RVU_H
+
+#include <linux/pci.h>
+#include <net/devlink.h>
+
+#include "rvu_struct.h"
+#include "rvu_devlink.h"
+#include "common.h"
+#include "mbox.h"
+#include "npc.h"
+#include "rvu_reg.h"
+#include "ptp.h"
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
+
+/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_98XX 0xB100
+#define PCI_SUBSYS_DEVID_96XX 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
+#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+
+/* PCI BAR nos */
+#define PCI_AF_REG_BAR_NUM 0
+#define PCI_PF_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+#define NAME_SIZE 32
+#define MAX_NIX_BLKS 2
+#define MAX_CPT_BLKS 2
+
+/* PF_FUNC */
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+#ifdef CONFIG_DEBUG_FS
+struct dump_ctx {
+ int lf;
+ int id;
+ bool all;
+};
+
+struct cpt_ctx {
+ int blkaddr;
+ struct rvu *rvu;
+};
+
+struct rvu_debugfs {
+ struct dentry *root;
+ struct dentry *cgx_root;
+ struct dentry *cgx;
+ struct dentry *lmac;
+ struct dentry *npa;
+ struct dentry *nix;
+ struct dentry *npc;
+ struct dentry *cpt;
+ struct dentry *mcs_root;
+ struct dentry *mcs;
+ struct dentry *mcs_rx;
+ struct dentry *mcs_tx;
+ struct dump_ctx npa_aura_ctx;
+ struct dump_ctx npa_pool_ctx;
+ struct dump_ctx nix_cq_ctx;
+ struct dump_ctx nix_rq_ctx;
+ struct dump_ctx nix_sq_ctx;
+ struct cpt_ctx cpt_ctx[MAX_CPT_BLKS];
+ int npa_qsize_id;
+ int nix_qsize_id;
+};
+#endif
+
+struct rvu_work {
+ struct work_struct work;
+ struct rvu *rvu;
+ int num_msgs;
+ int up_num_msgs;
+};
+
+struct rsrc_bmap {
+ unsigned long *bmap; /* Pointer to resource bitmap */
+ u16 max; /* Max resource id or count */
+};
+
+struct rvu_block {
+ struct rsrc_bmap lf;
+ struct admin_queue *aq; /* NIX/NPA AQ */
+ u16 *fn_map; /* LF to pcifunc mapping */
+ bool multislot;
+ bool implemented;
+ u8 addr; /* RVU_BLOCK_ADDR_E */
+ u8 type; /* RVU_BLOCK_TYPE_E */
+ u8 lfshift;
+ u64 lookup_reg;
+ u64 pf_lfcnt_reg;
+ u64 vf_lfcnt_reg;
+ u64 lfcfg_reg;
+ u64 msixcfg_reg;
+ u64 lfreset_reg;
+ unsigned char name[NAME_SIZE];
+ struct rvu *rvu;
+ u64 cpt_flt_eng_map[3];
+ u64 cpt_rcvrd_eng_map[3];
+};
+
+struct nix_mcast {
+ struct qmem *mce_ctx;
+ struct qmem *mcast_buf;
+ int replay_pkind;
+ int next_free_mce;
+ struct mutex mce_lock; /* Serialize MCE updates */
+};
+
+struct nix_mce_list {
+ struct hlist_head head;
+ int count;
+ int max;
+};
+
+/* layer metadata to uniquely identify a packet header field */
+struct npc_layer_mdata {
+ u8 lid;
+ u8 ltype;
+ u8 hdr;
+ u8 key;
+ u8 len;
+};
+
+/* Structure to represent a field present in the
+ * generated key. A key field may present anywhere and can
+ * be of any size in the generated key. Once this structure
+ * is populated for fields of interest then field's presence
+ * and location (if present) can be known.
+ */
+struct npc_key_field {
+ /* Masks where all set bits indicate position
+ * of a field in the key
+ */
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ /* Number of words in the key a field spans. If a field is
+ * of 16 bytes and key offset is 4 then the field will use
+ * 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and
+ * nr_kws will be 3(KW0, KW1 and KW2).
+ */
+ int nr_kws;
+ /* used by packet header fields */
+ struct npc_layer_mdata layer_mdata;
+};
+
+struct npc_mcam {
+ struct rsrc_bmap counters;
+ struct mutex lock; /* MCAM entries and counters update lock */
+ unsigned long *bmap; /* bitmap, 0 => bmap_entries */
+ unsigned long *bmap_reverse; /* Reverse bitmap, bmap_entries => 0 */
+ u16 bmap_entries; /* Number of unreserved MCAM entries */
+ u16 bmap_fcnt; /* MCAM entries free count */
+ u16 *entry2pfvf_map;
+ u16 *entry2cntr_map;
+ u16 *cntr2pfvf_map;
+ u16 *cntr_refcnt;
+ u16 *entry2target_pffunc;
+ u8 keysize; /* MCAM keysize 112/224/448 bits */
+ u8 banks; /* Number of MCAM banks */
+ u8 banks_per_entry;/* Number of keywords in key */
+ u16 banksize; /* Number of MCAM entries in each bank */
+ u16 total_entries; /* Total number of MCAM entries */
+ u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */
+ u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */
+ u16 lprio_count;
+ u16 lprio_start;
+ u16 hprio_count;
+ u16 hprio_end;
+ u16 rx_miss_act_cntr; /* Counter for RX MISS action */
+ /* fields present in the generated key */
+ struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX];
+ struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX];
+ u64 tx_features;
+ u64 rx_features;
+ struct list_head mcam_rules;
+};
+
+/* Structure for per RVU func info ie PF/VF */
+struct rvu_pfvf {
+ bool npalf; /* Only one NPALF per RVU_FUNC */
+ bool nixlf; /* Only one NIXLF per RVU_FUNC */
+ u16 sso;
+ u16 ssow;
+ u16 cptlfs;
+ u16 timlfs;
+ u16 cpt1_lfs;
+ u8 cgx_lmac;
+
+ /* Block LF's MSIX vector info */
+ struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
+#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
+ u16 *msix_lfmap; /* Vector to block LF mapping */
+
+ /* NPA contexts */
+ struct qmem *aura_ctx;
+ struct qmem *pool_ctx;
+ struct qmem *npa_qints_ctx;
+ unsigned long *aura_bmap;
+ unsigned long *pool_bmap;
+
+ /* NIX contexts */
+ struct qmem *rq_ctx;
+ struct qmem *sq_ctx;
+ struct qmem *cq_ctx;
+ struct qmem *rss_ctx;
+ struct qmem *cq_ints_ctx;
+ struct qmem *nix_qints_ctx;
+ unsigned long *sq_bmap;
+ unsigned long *rq_bmap;
+ unsigned long *cq_bmap;
+
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u8 rx_chan_cnt; /* total number of RX channels */
+ u8 tx_chan_cnt; /* total number of TX channels */
+ u16 maxlen;
+ u16 minlen;
+
+ bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
+ u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
+
+ /* Broadcast/Multicast/Promisc pkt replication info */
+ u16 bcast_mce_idx;
+ u16 mcast_mce_idx;
+ u16 promisc_mce_idx;
+ struct nix_mce_list bcast_mce_list;
+ struct nix_mce_list mcast_mce_list;
+ struct nix_mce_list promisc_mce_list;
+ bool use_mce_list;
+
+ struct rvu_npc_mcam_rule *def_ucast_rule;
+
+ bool cgx_in_use; /* this PF/VF using CGX? */
+ int cgx_users; /* number of cgx users - used only by PFs */
+
+ int intf_mode;
+ u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
+ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
+ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ u8 lbkid; /* NIX0/1 lbk link ID */
+ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
+ u64 lmt_map_ent_w1; /* Preseving the word1 of lmtst map table entry*/
+ unsigned long flags;
+ struct sdp_node_info *sdp_info;
+};
+
+enum rvu_pfvf_flags {
+ NIXLF_INITIALIZED = 0,
+ PF_SET_VF_MAC,
+ PF_SET_VF_CFG,
+ PF_SET_VF_TRUSTED,
+};
+
+#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+
+struct nix_txsch {
+ struct rsrc_bmap schq;
+ u8 lvl;
+#define NIX_TXSCHQ_FREE BIT_ULL(1)
+#define NIX_TXSCHQ_CFG_DONE BIT_ULL(0)
+#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
+#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
+#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
+#define TXSCH_SET_FLAG(__pfvf_map, flag) ((__pfvf_map) | ((flag) << 16))
+ u32 *pfvf_map;
+};
+
+struct nix_mark_format {
+ u8 total;
+ u8 in_use;
+ u32 *cfg;
+};
+
+/* smq(flush) to tl1 cir/pir info */
+struct nix_smq_tree_ctx {
+ u64 cir_off;
+ u64 cir_val;
+ u64 pir_off;
+ u64 pir_val;
+};
+
+/* smq flush context */
+struct nix_smq_flush_ctx {
+ int smq;
+ u16 tl1_schq;
+ u16 tl2_schq;
+ struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
+};
+
+struct npc_pkind {
+ struct rsrc_bmap rsrc;
+ u32 *pfchan_map;
+};
+
+struct nix_flowkey {
+#define NIX_FLOW_KEY_ALG_MAX 32
+ u32 flowkey[NIX_FLOW_KEY_ALG_MAX];
+ int in_use;
+};
+
+struct nix_lso {
+ u8 total;
+ u8 in_use;
+};
+
+struct nix_txvlan {
+#define NIX_TX_VTAG_DEF_MAX 0x400
+ struct rsrc_bmap rsrc;
+ u16 *entry2pfvf_map;
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+};
+
+struct nix_ipolicer {
+ struct rsrc_bmap band_prof;
+ u16 *pfvf_map;
+ u16 *match_id;
+ u16 *ref_count;
+};
+
+struct nix_hw {
+ int blkaddr;
+ struct rvu *rvu;
+ struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
+ struct nix_mcast mcast;
+ struct nix_flowkey flowkey;
+ struct nix_mark_format mark_format;
+ struct nix_lso lso;
+ struct nix_txvlan txvlan;
+ struct nix_ipolicer *ipolicer;
+ u64 *tx_credits;
+ u8 cc_mcs_cnt;
+};
+
+/* RVU block's capabilities or functionality,
+ * which vary by silicon version/skew.
+ */
+struct hw_cap {
+ /* Transmit side supported functionality */
+ u8 nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */
+ u16 nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */
+ u16 nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */
+ u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
+ bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
+ bool nix_shaping; /* Is shaping and coloring supported */
+ bool nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */
+ bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
+ bool nix_rx_multicast; /* Rx packet replication support */
+ bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */
+ bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
+ bool programmable_chans; /* Channels programmable ? */
+ bool ipolicer;
+ bool nix_multiple_dwrr_mtu; /* Multiple DWRR_MTU to choose from */
+ bool npc_hash_extract; /* Hash extract enabled ? */
+ bool npc_exact_match_enabled; /* Exact match supported ? */
+};
+
+struct rvu_hwinfo {
+ u8 total_pfs; /* MAX RVU PFs HW supports */
+ u16 total_vfs; /* Max RVU VFs HW supports */
+ u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
+ u8 cgx;
+ u8 lmac_per_cgx;
+ u16 cgx_chan_base; /* CGX base channel number */
+ u16 lbk_chan_base; /* LBK base channel number */
+ u16 sdp_chan_base; /* SDP base channel number */
+ u16 cpt_chan_base; /* CPT base channel number */
+ u8 cgx_links;
+ u8 lbk_links;
+ u8 sdp_links;
+ u8 cpt_links; /* Number of CPT links */
+ u8 npc_kpus; /* No of parser units */
+ u8 npc_pkinds; /* No of port kinds */
+ u8 npc_intfs; /* No of interfaces */
+ u8 npc_kpu_entries; /* No of KPU entries */
+ u16 npc_counters; /* No of match stats counters */
+ u32 lbk_bufsize; /* FIFO size supported by LBK */
+ bool npc_ext_set; /* Extended register set */
+ u64 npc_stat_ena; /* Match stats enable bit */
+
+ struct hw_cap cap;
+ struct rvu_block block[BLK_COUNT]; /* Block info */
+ struct nix_hw *nix;
+ struct rvu *rvu;
+ struct npc_pkind pkind;
+ struct npc_mcam mcam;
+ struct npc_exact_table *table;
+};
+
+struct mbox_wq_info {
+ struct otx2_mbox mbox;
+ struct rvu_work *mbox_wrk;
+
+ struct otx2_mbox mbox_up;
+ struct rvu_work *mbox_wrk_up;
+
+ struct workqueue_struct *mbox_wq;
+};
+
+struct rvu_fwdata {
+#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
+#define RVU_FWDATA_VERSION 0x0001
+ u32 header_magic;
+ u32 version; /* version id */
+
+ /* MAC address */
+#define PF_MACNUM_MAX 32
+#define VF_MACNUM_MAX 256
+ u64 pf_macs[PF_MACNUM_MAX];
+ u64 vf_macs[VF_MACNUM_MAX];
+ u64 sclk;
+ u64 rclk;
+ u64 mcam_addr;
+ u64 mcam_sz;
+ u64 msixtr_base;
+ u32 ptp_ext_clk_rate;
+ u32 ptp_ext_tstamp;
+#define FWDATA_RESERVED_MEM 1022
+ u64 reserved[FWDATA_RESERVED_MEM];
+#define CGX_MAX 9
+#define CGX_LMACS_MAX 4
+#define CGX_LMACS_USX 8
+ union {
+ struct cgx_lmac_fwdata_s
+ cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
+ struct cgx_lmac_fwdata_s
+ cgx_fw_data_usx[CGX_MAX][CGX_LMACS_USX];
+ };
+ /* Do not add new fields below this line */
+};
+
+struct ptp;
+
+/* KPU profile adapter structure gathering all KPU configuration data and abstracting out the
+ * source where it came from.
+ */
+struct npc_kpu_profile_adapter {
+ const char *name;
+ u64 version;
+ const struct npc_lt_def_cfg *lt_def;
+ const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
+ const struct npc_kpu_profile *kpu; /* array[kpus] */
+ struct npc_mcam_kex *mkex;
+ struct npc_mcam_kex_hash *mkex_hash;
+ bool custom;
+ size_t pkinds;
+ size_t kpus;
+};
+
+#define RVU_SWITCH_LBK_CHAN 63
+
+struct rvu_switch {
+ struct mutex switch_lock; /* Serialize flow installation */
+ u32 used_entries;
+ u16 *entry2pcifunc;
+ u16 mode;
+ u16 start_entry;
+};
+
+struct rvu {
+ void __iomem *afreg_base;
+ void __iomem *pfreg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct rvu_hwinfo *hw;
+ struct rvu_pfvf *pf;
+ struct rvu_pfvf *hwvf;
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+ struct mutex alias_lock; /* Serialize bar2 alias access */
+ int vfs; /* Number of VFs attached to RVU */
+ int nix_blkaddr[MAX_NIX_BLKS];
+
+ /* Mbox */
+ struct mbox_wq_info afpf_wq_info;
+ struct mbox_wq_info afvf_wq_info;
+
+ /* PF FLR */
+ struct rvu_work *flr_wrk;
+ struct workqueue_struct *flr_wq;
+ struct mutex flr_lock; /* Serialize FLRs */
+
+ /* MSI-X */
+ u16 num_vec;
+ char *irq_name;
+ bool *irq_allocated;
+ dma_addr_t msix_base_iova;
+ u64 msixtr_base_phy; /* Register reset value */
+
+ /* CGX */
+#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */
+ u8 cgx_mapped_pfs;
+ u8 cgx_cnt_max; /* CGX port count max */
+ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
+ u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for
+ * every cgx lmac port
+ */
+ unsigned long pf_notify_bmap; /* Flags for PF notification */
+ void **cgx_idmap; /* cgx id to cgx data map table */
+ struct work_struct cgx_evh_work;
+ struct workqueue_struct *cgx_evh_wq;
+ spinlock_t cgx_evq_lock; /* cgx event queue lock */
+ struct list_head cgx_evq_head; /* cgx event queue head */
+ struct mutex cgx_cfg_lock; /* serialize cgx configuration */
+
+ char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */
+
+ /* Firmware data */
+ struct rvu_fwdata *fwdata;
+ void *kpu_fwdata;
+ size_t kpu_fwdata_sz;
+ void __iomem *kpu_prfl_addr;
+
+ /* NPC KPU data */
+ struct npc_kpu_profile_adapter kpu;
+
+ struct ptp *ptp;
+
+ int mcs_blk_cnt;
+ int cpt_pf_num;
+
+#ifdef CONFIG_DEBUG_FS
+ struct rvu_debugfs rvu_dbg;
+#endif
+ struct rvu_devlink *rvu_dl;
+
+ /* RVU switch implementation over NPC with DMAC rules */
+ struct rvu_switch rswitch;
+
+ struct work_struct mcs_intr_work;
+ struct workqueue_struct *mcs_intr_wq;
+ struct list_head mcs_intrq_head;
+ /* mcs interrupt queue lock */
+ spinlock_t mcs_intrq_lock;
+ /* CPT interrupt lock */
+ spinlock_t cpt_intr_lock;
+};
+
+static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ writeq(val, rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
+{
+ return readq(rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
+{
+ writeq(val, rvu->pfreg_base + offset);
+}
+
+static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
+{
+ return readq(rvu->pfreg_base + offset);
+}
+
+static inline void rvu_bar2_sel_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ /* HW requires read back of RVU_AF_BAR2_SEL register to make sure completion of
+ * write operation.
+ */
+ rvu_write64(rvu, block, offset, val);
+ rvu_read64(rvu, block, offset);
+ /* Barrier to ensure read completes before accessing LF registers */
+ mb();
+}
+
+/* Silicon revisions */
+static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ /* 96XX A0/B0, 95XX A0/A1/B0 chips */
+ return ((pdev->revision == 0x00) || (pdev->revision == 0x01) ||
+ (pdev->revision == 0x10) || (pdev->revision == 0x11) ||
+ (pdev->revision == 0x14));
+}
+
+static inline bool is_rvu_96xx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x00);
+}
+
+static inline bool is_rvu_96xx_B0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x00) || (pdev->revision == 0x01);
+}
+
+static inline bool is_rvu_95xx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x10) || (pdev->revision == 0x11);
+}
+
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_95XXN 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+#define PCI_REVISION_ID_95XXO 0xE0
+
+static inline bool is_rvu_otx2(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
+}
+
+static inline bool is_cnf10ka_a0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A &&
+ (pdev->revision & 0x0F) == 0x0)
+ return true;
+ return false;
+}
+
+static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
+{
+ u64 npc_const3;
+
+ npc_const3 = rvu_read64(rvu, BLKADDR_NPC, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62)))
+ return false;
+
+ return true;
+}
+
+static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
+ u8 lmacid, u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 cgx_chans = nix_const & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_CGX_LMAC_CHX(cgxid, lmacid, chan);
+
+ return rvu->hw->cgx_chan_base +
+ (cgxid * hw->lmac_per_cgx + lmacid) * cgx_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid,
+ u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 lbk_chans = (nix_const >> 16) & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_LBK_CHX(lbkid, chan);
+
+ return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_SDP_CHX(chan);
+
+ return hw->sdp_chan_base + chan;
+}
+
+static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
+{
+ return rvu->hw->cpt_chan_base + chan;
+}
+
+static inline bool is_rvu_supports_nix1(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_98XX)
+ return true;
+
+ return false;
+}
+
+/* Function Prototypes
+ * RVU
+ */
+static inline bool is_afvf(u16 pcifunc)
+{
+ return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+}
+
+static inline bool is_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
+/* check if PF_FUNC is AF */
+static inline bool is_pffunc_af(u16 pcifunc)
+{
+ return !pcifunc;
+}
+
+static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
+{
+ return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) &&
+ (rvu->fwdata->version == RVU_FWDATA_VERSION);
+}
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+void rvu_free_bitmap(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
+int rvu_get_pf(u16 pcifunc);
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype);
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+int rvu_get_num_lbk_chans(void);
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block);
+
+/* RVU HW reg validation */
+enum regmap_block {
+ TXSCHQ_HWREGMAP = 0,
+ MAX_HWREGMAP,
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);
+
+/* NPA/NIX AQ APIs */
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size);
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
+
+/* SDP APIs */
+int rvu_sdp_init(struct rvu *rvu);
+bool is_sdp_pfvf(u16 pcifunc);
+bool is_sdp_pf(u16 pcifunc);
+bool is_sdp_vf(u16 pcifunc);
+
+/* CGX APIs */
+static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
+{
+ return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
+ !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
+}
+
+static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
+{
+ *cgx_id = (map >> 4) & 0xF;
+ *lmac_id = (map & 0xF);
+}
+
+static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
+{
+ return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+}
+
+#define M(_name, _id, fn_name, req, rsp) \
+int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
+MBOX_MESSAGES
+#undef M
+
+int rvu_cgx_init(struct rvu *rvu);
+int rvu_cgx_exit(struct rvu *rvu);
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
+void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
+int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
+int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
+ int rxtxflag, u64 *stat);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
+
+/* NPA APIs */
+int rvu_npa_init(struct rvu *rvu);
+void rvu_npa_freemem(struct rvu *rvu);
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp);
+
+/* NIX APIs */
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
+int rvu_nix_init(struct rvu *rvu);
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, u32 cfg);
+void rvu_nix_freemem(struct rvu *rvu);
+int rvu_get_nixlf_count(struct rvu *rvu);
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add);
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx);
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr);
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id);
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx);
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
+int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type);
+u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
+u32 convert_bytes_to_dwrr_mtu(u32 bytes);
+void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
+ struct nix_txsch *txsch, bool enable);
+
+/* NPC APIs */
+void rvu_npc_freemem(struct rvu *rvu);
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 *mac_addr);
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 chan_cnt);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan);
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan);
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable);
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable);
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index);
+
+void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt);
+void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt);
+bool is_npc_intf_tx(u8 intf);
+bool is_npc_intf_rx(u8 intf);
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
+const char *npc_get_field_name(u8 hdr);
+int npc_get_bank(struct npc_mcam *mcam, int index);
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable);
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, struct mcam_entry *entry,
+ u8 *intf, u8 *ena);
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
+u32 rvu_cgx_get_fifolen(struct rvu *rvu);
+void *rvu_first_cgx_pdata(struct rvu *rvu);
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable);
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
+void rvu_mac_reset(struct rvu *rvu, u16 pcifunc);
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
+int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
+ int type);
+bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
+ int index);
+int rvu_npc_init(struct rvu *rvu);
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask);
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
+bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf);
+
+/* CPT APIs */
+int rvu_cpt_register_interrupts(struct rvu *rvu);
+void rvu_cpt_unregister_interrupts(struct rvu *rvu);
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ int slot);
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
+int rvu_cpt_init(struct rvu *rvu);
+
+#define NDC_AF_BANK_MASK GENMASK_ULL(7, 0)
+#define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16)
+
+/* CN10K RVU */
+int rvu_set_channels_base(struct rvu *rvu);
+void rvu_program_channels(struct rvu *rvu);
+
+/* CN10K NIX */
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
+
+/* CN10K RVU - LMT*/
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+
+#ifdef CONFIG_DEBUG_FS
+void rvu_dbg_init(struct rvu *rvu);
+void rvu_dbg_exit(struct rvu *rvu);
+#else
+static inline void rvu_dbg_init(struct rvu *rvu) {}
+static inline void rvu_dbg_exit(struct rvu *rvu) {}
+#endif
+
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
+
+/* RVU Switch */
+void rvu_switch_enable(struct rvu *rvu);
+void rvu_switch_disable(struct rvu *rvu);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir);
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+/* CN10K MCS */
+int rvu_mcs_init(struct rvu *rvu);
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
+void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
+void rvu_mcs_exit(struct rvu *rvu);
+
+#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
new file mode 100644
index 000000000..ce987ccd4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -0,0 +1,1291 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu.h"
+#include "cgx.h"
+#include "lmac_common.h"
+#include "rvu_reg.h"
+#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
+
+struct cgx_evq_entry {
+ struct list_head evq_node;
+ struct cgx_link_event link_event;
+};
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
+ return req; \
+}
+
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ return (cgx_features_get(cgxd) & feature);
+}
+
+#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
+/* Returns bitmap of mapped PFs */
+static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+{
+ return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
+}
+
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+{
+ unsigned long pfmap;
+
+ pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
+
+ /* Assumes only one pf mapped to a cgx lmac port */
+ if (!pfmap)
+ return -ENODEV;
+ else
+ return find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
+}
+
+static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+{
+ return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
+}
+
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
+{
+ if (cgx_id >= rvu->cgx_cnt_max)
+ return NULL;
+
+ return rvu->cgx_idmap[cgx_id];
+}
+
+/* Return first enabled CGX instance if none are enabled then return NULL */
+void *rvu_first_cgx_pdata(struct rvu *rvu)
+{
+ int first_enabled_cgx = 0;
+ void *cgxd = NULL;
+
+ for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
+ cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
+ if (cgxd)
+ break;
+ }
+
+ return cgxd;
+}
+
+/* Based on P2X connectivity find mapped NIX block for a PF */
+static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
+ int cgx_id, int lmac_id)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[pf];
+ u8 p2x;
+
+ p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
+ /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ int cgx_cnt_max = rvu->cgx_cnt_max;
+ int pf = PF_CGXMAP_BASE;
+ unsigned long lmac_bmap;
+ int size, free_pkind;
+ int cgx, lmac, iter;
+ int numvfs, hwvfs;
+
+ if (!cgx_cnt_max)
+ return 0;
+
+ if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
+ return -EINVAL;
+
+ /* Alloc map table
+ * An additional entry is required since PF id starts from 1 and
+ * hence entry at offset 0 is invalid.
+ */
+ size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
+ if (!rvu->pf2cgxlmac_map)
+ return -ENOMEM;
+
+ /* Initialize all entries with an invalid cgx and lmac id */
+ memset(rvu->pf2cgxlmac_map, 0xFF, size);
+
+ /* Reverse map table */
+ rvu->cgxlmac2pf_map =
+ devm_kzalloc(rvu->dev,
+ cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
+ GFP_KERNEL);
+ if (!rvu->cgxlmac2pf_map)
+ return -ENOMEM;
+
+ rvu->cgx_mapped_pfs = 0;
+ for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+ if (!rvu_cgx_pdata(cgx, rvu))
+ continue;
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
+ iter);
+ rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+ rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+ free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+ pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+ rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
+ rvu->cgx_mapped_pfs++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+ rvu->cgx_mapped_vfs += numvfs;
+ pf++;
+ }
+ }
+ return 0;
+}
+
+static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
+{
+ struct cgx_evq_entry *qentry;
+ unsigned long flags;
+ int err;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
+ if (!qentry)
+ return -ENOMEM;
+
+ /* Lock the event queue before we read the local link status */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &qentry->link_event.link_uinfo);
+ qentry->link_event.cgx_id = cgx_id;
+ qentry->link_event.lmac_id = lmac_id;
+ if (err) {
+ kfree(qentry);
+ goto skip_add;
+ }
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+skip_add:
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+/* This is called from interrupt context and is expected to be atomic */
+static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
+{
+ struct cgx_evq_entry *qentry;
+ struct rvu *rvu = data;
+
+ /* post event to the event queue */
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+ qentry->link_event = *event;
+ spin_lock(&rvu->cgx_evq_lock);
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+ spin_unlock(&rvu->cgx_evq_lock);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+{
+ struct cgx_link_user_info *linfo;
+ struct cgx_link_info_msg *msg;
+ unsigned long pfmap;
+ int err, pfid;
+
+ linfo = &event->link_uinfo;
+ pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+ if (!pfmap) {
+ dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n",
+ event->cgx_id, event->lmac_id);
+ return;
+ }
+
+ do {
+ pfid = find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
+ clear_bit(pfid, &pfmap);
+
+ /* check if notification is enabled */
+ if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
+ dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
+ event->cgx_id, event->lmac_id,
+ linfo->link_up ? "UP" : "DOWN");
+ continue;
+ }
+
+ /* Send mbox message to PF */
+ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
+ if (!msg)
+ continue;
+ msg->link_info = *linfo;
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
+ if (err)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ pfid);
+ } while (pfmap);
+}
+
+static void cgx_evhandler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
+ struct cgx_evq_entry *qentry;
+ struct cgx_link_event *event;
+ unsigned long flags;
+
+ do {
+ /* Dequeue an event */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
+ struct cgx_evq_entry,
+ evq_node);
+ if (qentry)
+ list_del(&qentry->evq_node);
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->link_event;
+
+ /* process event */
+ cgx_notify_pfs(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+static int cgx_lmac_event_handler_init(struct rvu *rvu)
+{
+ unsigned long lmac_bmap;
+ struct cgx_event_cb cb;
+ int cgx, lmac, err;
+ void *cgxd;
+
+ spin_lock_init(&rvu->cgx_evq_lock);
+ INIT_LIST_HEAD(&rvu->cgx_evq_head);
+ INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
+ rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+ if (!rvu->cgx_evh_wq) {
+ dev_err(rvu->dev, "alloc workqueue failed");
+ return -ENOMEM;
+ }
+
+ cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
+ cb.data = rvu;
+
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+ if (err)
+ dev_err(rvu->dev,
+ "%d:%d handler register failed\n",
+ cgx, lmac);
+ }
+ }
+
+ return 0;
+}
+
+static void rvu_cgx_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->cgx_evh_wq) {
+ destroy_workqueue(rvu->cgx_evh_wq);
+ rvu->cgx_evh_wq = NULL;
+ }
+}
+
+int rvu_cgx_init(struct rvu *rvu)
+{
+ int cgx, err;
+ void *cgxd;
+
+ /* CGX port id starts from 0 and are not necessarily contiguous
+ * Hence we allocate resources based on the maximum port id value.
+ */
+ rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
+ if (!rvu->cgx_cnt_max) {
+ dev_info(rvu->dev, "No CGX devices found!\n");
+ return 0;
+ }
+
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
+ sizeof(void *), GFP_KERNEL);
+ if (!rvu->cgx_idmap)
+ return -ENOMEM;
+
+ /* Initialize the cgxdata table */
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
+ rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
+
+ /* Map CGX LMAC interfaces to RVU PFs */
+ err = rvu_map_cgx_lmac_pf(rvu);
+ if (err)
+ return err;
+
+ /* Register for CGX events */
+ err = cgx_lmac_event_handler_init(rvu);
+ if (err)
+ return err;
+
+ mutex_init(&rvu->cgx_cfg_lock);
+
+ /* Ensure event handler registration is completed, before
+ * we turn on the links
+ */
+ mb();
+
+ /* Do link up for all CGX ports */
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ err = cgx_lmac_linkup_start(cgxd);
+ if (err)
+ dev_err(rvu->dev,
+ "Link up process failed to start on cgx %d\n",
+ cgx);
+ }
+
+ return 0;
+}
+
+int rvu_cgx_exit(struct rvu *rvu)
+{
+ unsigned long lmac_bmap;
+ int cgx, lmac;
+ void *cgxd;
+
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
+ cgx_lmac_evh_unregister(cgxd, lmac);
+ }
+
+ /* Ensure event handler unregister is completed */
+ mb();
+
+ rvu_cgx_wq_destroy(rvu);
+ return 0;
+}
+
+/* Most of the CGX configuration is restricted to the mapped PF only,
+ * VF's of mapped PF and other PFs are not allowed. This fn() checks
+ * whether a PFFUNC is permitted to do the config or not.
+ */
+inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+{
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return false;
+ return true;
+}
+
+void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
+{
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ mac_ops = get_mac_ops(cgxd);
+ /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
+ if (enable)
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
+ else
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+}
+
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+}
+
+int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
+}
+
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+{
+ struct mac_ops *mac_ops;
+
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
+}
+
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int i = 0, lmac_count = 0;
+ struct mac_ops *mac_ops;
+ u8 max_dmac_filters;
+ u8 cgx_id, lmac_id;
+ void *cgx_dev;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return;
+
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rvu_npc_exact_reset(rvu, pcifunc);
+ return;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgx_dev = cgx_get_pdata(cgx_id);
+ lmac_count = cgx_get_lmac_cnt(cgx_dev);
+
+ mac_ops = get_mac_ops(cgx_dev);
+ if (!mac_ops)
+ return;
+
+ max_dmac_filters = mac_ops->dmac_filter_count / lmac_count;
+
+ for (i = 0; i < max_dmac_filters; i++)
+ cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
+ void *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ int stat = 0, err = 0;
+ u64 tx_stat, rx_stat;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ /* Rx stats */
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
+ if (err)
+ return err;
+ if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
+ stat++;
+ }
+
+ /* Tx stats */
+ stat = 0;
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
+ if (err)
+ return err;
+ if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
+ stat++;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
+ struct rpm_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->get_fec_stats(cgxd, lmac, rsp);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
+
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+ if (rc >= 0) {
+ rsp->index = rc;
+ return 0;
+ }
+
+ return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_max_dmac_entries_get_rsp
+ *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* If msg is received from PFs(which are not mapped to CGX LMACs)
+ * or VF then no entries are allocated for DMAC filters at CGX level.
+ * So returning zero.
+ */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+ rsp->max_dmac_filters = 0;
+ return 0;
+ }
+
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+ u64 cfg;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rsp->hdr.rc = rc;
+ cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
+ /* copy 48 bit mac address to req->mac_addr */
+ u64_to_ether_addr(cfg, rsp->mac_addr);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_promisc_config(cgx_id, lmac_id, true);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_promisc_config(cgx_id, lmac_id, false);
+ return 0;
+}
+
+static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
+ /* If PTP is enabled then inform NPC that packets to be
+ * parsed by this PF will have their data shifted by 8 bytes
+ * and if PTP is disabled then no shift is required
+ */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
+ return -EINVAL;
+ /* This flag is required to clean up CGX conf if app gets killed */
+ pfvf->hw_rx_tstamp_en = enable;
+
+ /* Inform MCS about 8B RX header */
+ rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
+ return -EPERM;
+
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
+static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (en) {
+ set_bit(pf, &rvu->pf_notify_bmap);
+ /* Send the current link status to PF */
+ rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
+ } else {
+ clear_bit(pf, &rvu->pf_notify_bmap);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
+ struct cgx_link_info_msg *rsp)
+{
+ u8 cgx_id, lmac_id;
+ int pf, err;
+
+ pf = rvu_get_pf(req->hdr.pcifunc);
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &rsp->link_info);
+ return err;
+}
+
+int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_features_info_msg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->lmac_features = cgx_features_get(cgxd);
+
+ return 0;
+}
+
+u32 rvu_cgx_get_fifolen(struct rvu *rvu)
+{
+ struct mac_ops *mac_ops;
+ u32 fifo_len;
+
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ fifo_len = mac_ops ? mac_ops->fifo_len : 0;
+
+ return fifo_len;
+}
+
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
+{
+ struct mac_ops *mac_ops;
+ void *cgxd;
+
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ return 0;
+
+ mac_ops = get_mac_ops(cgxd);
+ if (!mac_ops->lmac_fifo_len)
+ return 0;
+
+ return mac_ops->lmac_fifo_len(cgxd, lmac);
+}
+
+static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
+
+ return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, en);
+}
+
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_pfc = 0, tx_pfc = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
+ if (tx_pfc || rx_pfc) {
+ dev_warn(rvu->dev,
+ "Can not configure 802.3X flow control as PFC frames are enabled");
+ return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
+}
+
+int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
+ struct cgx_pause_frm_cfg *req,
+ struct cgx_pause_frm_cfg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ int err = 0;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ if (req->set)
+ err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
+ else
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+
+ return err;
+}
+
+int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
+}
+
+/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
+ * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
+ */
+int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
+ int index, int rxtxflag, u64 *stat)
+{
+ struct rvu_block *block;
+ int blkaddr;
+ u16 pcifunc;
+ int pf, lf;
+
+ *stat = 0;
+
+ if (!cgxd || !rvu)
+ return -EINVAL;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ if (pf < 0)
+ return pf;
+
+ /* Assumes LF of a PF and all of its VF belongs to the same
+ * NIX block
+ */
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return 0;
+ block = &rvu->hw->block[blkaddr];
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ /* Check if a lf is attached to this PF or one of its VFs */
+ if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
+ ~RVU_PFVF_FUNC_MASK)))
+ continue;
+ if (rxtxflag == NIX_STATS_RX)
+ *stat += rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_RX_STATX(lf, index));
+ else
+ *stat += rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_TX_STATX(lf, index));
+ }
+
+ return 0;
+}
+
+int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
+{
+ struct rvu_pfvf *parent_pf, *pfvf;
+ int cgx_users, err = 0;
+
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return 0;
+
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ mutex_lock(&rvu->cgx_cfg_lock);
+
+ if (start && pfvf->cgx_in_use)
+ goto exit; /* CGX is already started hence nothing to do */
+ if (!start && !pfvf->cgx_in_use)
+ goto exit; /* CGX is already stopped hence nothing to do */
+
+ if (start) {
+ cgx_users = parent_pf->cgx_users;
+ parent_pf->cgx_users++;
+ } else {
+ parent_pf->cgx_users--;
+ cgx_users = parent_pf->cgx_users;
+ }
+
+ /* Start CGX when first of all NIXLFs is started.
+ * Stop CGX when last of all NIXLFs is stopped.
+ */
+ if (!cgx_users) {
+ err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ start);
+ if (err) {
+ dev_err(rvu->dev, "Unable to %s CGX\n",
+ start ? "start" : "stop");
+ /* Revert the usage count in case of error */
+ parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
+ : parent_pf->cgx_users + 1;
+ goto exit;
+ }
+ }
+ pfvf->cgx_in_use = start;
+exit:
+ mutex_unlock(&rvu->cgx_cfg_lock);
+ return err;
+}
+
+int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
+ struct fec_mode *req,
+ struct fec_mode *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
+
+ if (req->fec == OTX2_FEC_OFF)
+ req->fec = OTX2_FEC_NONE;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
+ struct cgx_fw_data *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!rvu->fwdata)
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
+ memcpy(&rsp->fwdata,
+ &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+ else
+ memcpy(&rsp->fwdata,
+ &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
+ struct cgx_set_link_mode_req *req,
+ struct cgx_set_link_mode_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
+
+ return cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+}
+
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_8023 = 0, tx_8023 = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
+ if (tx_8023 || rx_8023) {
+ dev_warn(rvu->dev,
+ "Can not configure PFC as 802.3X pause frames are enabled");
+ return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
+}
+
+int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
+ struct cgx_pfc_cfg *req,
+ struct cgx_pfc_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+ int err;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
+ req->rx_pause, req->pfc_en);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+ return err;
+}
+
+void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ struct cgx *cgxd;
+ u8 cgx, lmac;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc)))
+ dev_err(rvu->dev, "Failed to reset MAC\n");
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
new file mode 100644
index 000000000..0e74c5a22
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RPM CN10K driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include "rvu.h"
+#include "cgx.h"
+#include "rvu_reg.h"
+
+/* RVU LMTST */
+#define LMT_TBL_OP_READ 0
+#define LMT_TBL_OP_WRITE 1
+#define LMT_MAP_TABLE_SIZE (128 * 1024)
+#define LMT_MAPTBL_ENTRY_SIZE 16
+
+/* Function to perform operations (read/write) on lmtst map table */
+static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ int lmt_tbl_op)
+{
+ void __iomem *lmt_map_base;
+ u64 tbl_base;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ return -ENOMEM;
+ }
+
+ if (lmt_tbl_op == LMT_TBL_OP_READ) {
+ *val = readq(lmt_map_base + index);
+ } else {
+ writeq((*val), (lmt_map_base + index));
+ /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+ * changes effective. Write 1 for flush and read is being used as a
+ * barrier and sets up a data dependency. Write to 0 after a write
+ * to 1 to complete the flush.
+ */
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
+ rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
+ }
+
+ iounmap(lmt_map_base);
+ return 0;
+}
+
+#define LMT_MAP_TBL_W1_OFF 8
+static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+{
+ return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+}
+
+static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ u64 iova, u64 *lmt_addr)
+{
+ u64 pa, val, pf;
+ int err = 0;
+
+ if (!iova) {
+ dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ pf = rvu_get_pf(pcifunc) & 0x1F;
+ val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+ ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+
+ err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ if (err) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+ goto exit;
+ }
+ val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ if (val & ~0x1ULL) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+ err = -EIO;
+ goto exit;
+ }
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
+ * PA[11:0] = IOVA[11:0]
+ */
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
+ pa &= GENMASK_ULL(39, 0);
+ *lmt_addr = (pa << 12) | (iova & 0xFFF);
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return err;
+}
+
+static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err = 0;
+ u64 val;
+
+ /* Read the current lmt addr of pcifunc */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+
+ /* Storing the seondary's lmt base address as this needs to be
+ * reverted in FLR. Also making sure this default value doesn't
+ * get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_base_addr)
+ pfvf->lmt_base_addr = val;
+
+ /* Update the LMT table with new addr */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
+ struct lmtst_tbl_setup_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ u32 pri_tbl_idx, tbl_idx;
+ u64 lmt_addr;
+ int err = 0;
+ u64 val;
+
+ /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ * region, if so, convert that IOVA to physical address and
+ * populate LMT table with that address
+ */
+ if (req->use_local_lmt_region) {
+ err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
+ req->lmt_iova, &lmt_addr);
+ if (err < 0)
+ return err;
+
+ /* Update the lmt addr for this PFFUNC in the LMT table */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
+ if (err)
+ return err;
+ }
+
+ /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
+ * multiple PF_FUNCs to share an LMTLINE region, so primary/base
+ * pcifunc (which is passed as an argument to mailbox) is the one
+ * whose lmt base address will be shared among other secondary
+ * pcifunc (will be the one who is calling this mailbox).
+ */
+ if (req->base_pcifunc) {
+ /* Calculating the LMT table index equivalent to primary
+ * pcifunc.
+ */
+ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
+
+ /* Read the base lmt addr of the primary pcifunc */
+ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
+ LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ pri_tbl_idx, err);
+ goto error;
+ }
+
+ /* Update the base lmt addr of secondary with primary's base
+ * lmt addr.
+ */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
+ if (err)
+ return err;
+ }
+
+ /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S
+ * like enabling scheduled LMTST, disable LMTLINE prefetch, disable
+ * early completion for ordered LMTST.
+ */
+ if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ goto error;
+ }
+
+ /* Storing lmt map table entry word1 default value as this needs
+ * to be reverted in FLR. Also making sure this default value
+ * doesn't get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_map_ent_w1)
+ pfvf->lmt_map_ent_w1 = val;
+
+ /* Disable early completion for Ordered LMTSTs. */
+ if (req->dis_sched_early_comp)
+ val |= (req->dis_sched_early_comp <<
+ APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
+ /* Enable scheduled LMTST */
+ if (req->sch_ena)
+ val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
+ req->ssow_pf_func;
+ /* Disables LMTLINE prefetch before receiving store data. */
+ if (req->dis_line_pref)
+ val |= (req->dis_line_pref <<
+ APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
+
+ err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &val, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ goto error;
+ }
+ }
+
+error:
+ return err;
+}
+
+/* Resetting the lmtst map table to original base addresses */
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
+ /* This corresponds to lmt map table index */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ /* Reverting back original lmt base addr for respective
+ * pcifunc.
+ */
+ if (pfvf->lmt_base_addr) {
+ err = lmtst_map_table_ops(rvu, tbl_idx,
+ &pfvf->lmt_base_addr,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ pfvf->lmt_base_addr = 0;
+ }
+ /* Reverting back to orginal word1 val of lmtst map table entry
+ * which underwent changes.
+ */
+ if (pfvf->lmt_map_ent_w1) {
+ err = lmtst_map_table_ops(rvu,
+ tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &pfvf->lmt_map_ent_w1,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ pfvf->lmt_map_ent_w1 = 0;
+ }
+ }
+}
+
+int rvu_set_channels_base(struct rvu *rvu)
+{
+ u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
+ u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 nix_const, nix_const1;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ hw->cgx = (nix_const >> 12) & 0xFULL;
+ hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
+ hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+ hw->lbk_links = (nix_const >> 24) & 0xFULL;
+ hw->cpt_links = (nix_const >> 44) & 0xFULL;
+ hw->sdp_links = 1;
+
+ hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
+ hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
+ hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
+
+ /* No Programmable channels */
+ if (!(nix_const & BIT_ULL(60)))
+ return 0;
+
+ hw->cap.programmable_chans = true;
+
+ /* If programmable channels are present then configure
+ * channels such that all channel numbers are contiguous
+ * leaving no holes. This way the new CPT channels can be
+ * accomodated. The order of channel numbers assigned is
+ * LBK, SDP, CGX and CPT. Also the base channel number
+ * of a block must be multiple of number of channels
+ * of the block.
+ */
+ nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
+ nr_sdp_chans = nix_const1 & 0xFFFULL;
+ nr_cgx_chans = nix_const & 0xFFULL;
+ nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
+ /* Round up base channel to multiple of number of channels */
+ hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
+
+ cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
+ hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
+
+ cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
+ hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
+
+ /* Out of 4096 channels start CPT from 2048 so
+ * that MSB for CPT channels is always set
+ */
+ if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
+ hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
+ } else {
+ dev_err(rvu->dev,
+ "CPT channels could not fit in the range 2048-4095\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define LBK_CONNECT_NIXX(a) (0x0 + (a))
+
+static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
+ u64 offset, int lbkid, u16 chans)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 cfg;
+
+ cfg = readq(base + offset);
+ cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
+ LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
+ cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
+
+ writeq(cfg, base + offset);
+}
+
+static void rvu_lbk_set_channels(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+ u8 src, dst;
+ u16 chans;
+
+ /* To loopback packets between multiple NIX blocks
+ * mutliple LBK blocks are needed. With two NIX blocks,
+ * four LBK blocks are needed and each LBK block
+ * source and destination are as follows:
+ * LBK0 - source NIX0 and destination NIX1
+ * LBK1 - source NIX0 and destination NIX1
+ * LBK2 - source NIX1 and destination NIX0
+ * LBK3 - source NIX1 and destination NIX1
+ * As per the HRM channel numbers should be programmed as:
+ * P2X and X2P of LBK0 as same
+ * P2X and X2P of LBK3 as same
+ * P2X of LBK1 and X2P of LBK2 as same
+ * P2X of LBK2 and X2P of LBK1 as same
+ */
+ while (true) {
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+ chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
+ dst = FIELD_GET(LBK_CONST_DST, lbk_const);
+ src = FIELD_GET(LBK_CONST_SRC, lbk_const);
+
+ if (src == dst) {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ }
+ } else {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ }
+ }
+ iounmap(base);
+ }
+err_put:
+ pci_dev_put(pdev);
+}
+
+static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
+{
+ u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link, nix_link = 0;
+ u16 start;
+ u64 cfg;
+
+ cgx_chans = nix_const & 0xFFULL;
+ lbk_chans = (nix_const >> 16) & 0xFFULL;
+ sdp_chans = nix_const1 & 0xFFFULL;
+ cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ start = hw->cgx_chan_base;
+ for (link = 0; link < hw->cgx_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cgx_chans;
+ }
+
+ start = hw->lbk_chan_base;
+ for (link = 0; link < hw->lbk_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += lbk_chans;
+ }
+
+ start = hw->sdp_chan_base;
+ for (link = 0; link < hw->sdp_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += sdp_chans;
+ }
+
+ start = hw->cpt_chan_base;
+ for (link = 0; link < hw->cpt_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cpt_chans;
+ }
+}
+
+static void rvu_nix_set_channels(struct rvu *rvu)
+{
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ __rvu_nix_set_channels(rvu, blkaddr);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+}
+
+static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
+{
+ u64 cfg;
+
+ cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
+ cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
+
+ /* There is no read-only constant register to read
+ * the number of channels for LMAC and it is always 16.
+ */
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
+ cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
+}
+
+static void rvu_rpm_set_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 base = hw->cgx_chan_base;
+ int cgx, lmac;
+
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
+ for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
+ __rvu_rpm_set_channels(cgx, lmac, base);
+ base += 16;
+ }
+ }
+}
+
+void rvu_program_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return;
+
+ rvu_nix_set_channels(rvu);
+ rvu_lbk_set_channels(rvu);
+ rvu_rpm_set_channels(rvu);
+}
+
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ int blkaddr = nix_hw->blkaddr;
+ u64 cfg;
+
+ /* Set AF vWQE timer interval to a LF configurable range of
+ * 6.4us to 1.632ms.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL);
+
+ /* Enable NIX RX stream and global conditional clock to
+ * avoild multiple free of NPA buffers.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
+ cfg |= BIT_ULL(1) | BIT_ULL(2);
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
new file mode 100644
index 000000000..f047185f3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -0,0 +1,1221 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "mbox.h"
+#include "rvu.h"
+
+/* CPT PF device id */
+#define PCI_DEVID_OTX2_CPT_PF 0xA0FD
+#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
+
+/* Length of initial context fetch in 128 byte words */
+#define CPT_CTX_ILEN 1ULL
+
+#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
+({ \
+ u64 free_sts = 0, busy_sts = 0; \
+ typeof(rsp) _rsp = rsp; \
+ u32 e, i; \
+ \
+ for (e = (e_min), i = 0; e < (e_max); e++, i++) { \
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \
+ if (reg & 0x1) \
+ busy_sts |= 1ULL << i; \
+ \
+ if (reg & 0x2) \
+ free_sts |= 1ULL << i; \
+ } \
+ (_rsp)->busy_sts_##etype = busy_sts; \
+ (_rsp)->free_sts_##etype = free_sts; \
+})
+
+static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg, val;
+ int i, eng;
+ u8 grp;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec));
+ dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg);
+
+ i = -1;
+ while ((i = find_next_bit((unsigned long *)&reg, 64, i + 1)) < 64) {
+ switch (vec) {
+ case 0:
+ eng = i;
+ break;
+ case 1:
+ eng = i + 64;
+ break;
+ case 2:
+ eng = i + 128;
+ break;
+ }
+ grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF;
+ /* Disable and enable the engine which triggers fault */
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
+ val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
+
+ spin_lock(&rvu->cpt_intr_lock);
+ block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
+ val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
+ val = val & 0x3;
+ if (val == 0x1 || val == 0x2)
+ block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
+ spin_unlock(&rvu->cpt_intr_lock);
+ }
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr);
+}
+
+static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr);
+}
+
+static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr);
+}
+
+static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ struct rvu *rvu = block->rvu;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, block);
+ if (ret) {
+ dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
+ return ret;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+ return 0;
+}
+
+static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i;
+
+ /* Disable all CPT AF interrupts */
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[off + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, off + i), block);
+ rvu->irq_allocated[off + i] = false;
+ }
+}
+
+static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return;
+ }
+ block = &hw->block[blkaddr];
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_unregister_interrupts(block, offs);
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_cpt_unregister_interrupts(struct rvu *rvu)
+{
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ irq_handler_t flt_fn;
+ int i, ret;
+
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
+
+ switch (i) {
+ case CPT_10K_AF_INT_VEC_FLT0:
+ flt_fn = rvu_cpt_af_flt0_intr_handler;
+ break;
+ case CPT_10K_AF_INT_VEC_FLT1:
+ flt_fn = rvu_cpt_af_flt1_intr_handler;
+ break;
+ case CPT_10K_AF_INT_VEC_FLT2:
+ flt_fn = rvu_cpt_af_flt2_intr_handler;
+ break;
+ }
+ ret = rvu_cpt_do_register_interrupt(block, off + i,
+ flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
+ if (ret)
+ goto err;
+ if (i == CPT_10K_AF_INT_VEC_FLT2)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
+ else
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ irq_handler_t flt_fn;
+ int i, offs, ret = 0;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_register_interrupts(block, offs);
+
+ for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
+ sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i);
+ switch (i) {
+ case CPT_AF_INT_VEC_FLT0:
+ flt_fn = rvu_cpt_af_flt0_intr_handler;
+ break;
+ case CPT_AF_INT_VEC_FLT1:
+ flt_fn = rvu_cpt_af_flt1_intr_handler;
+ break;
+ }
+ ret = rvu_cpt_do_register_interrupt(block, offs + i,
+ flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_cpt_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ return cpt_register_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int get_cpt_pf_num(struct rvu *rvu)
+{
+ int i, domain_nr, cpt_pf_num = -1;
+ struct pci_dev *pdev;
+
+ domain_nr = pci_domain_nr(rvu->pdev->bus);
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0);
+ if (!pdev)
+ continue;
+
+ if (pdev->device == PCI_DEVID_OTX2_CPT_PF ||
+ pdev->device == PCI_DEVID_OTX2_CPT10K_PF) {
+ cpt_pf_num = i;
+ put_device(&pdev->dev);
+ break;
+ }
+ put_device(&pdev->dev);
+ }
+ return cpt_pf_num;
+}
+
+static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
+{
+ int cpt_pf_num = rvu->cpt_pf_num;
+
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return false;
+
+ return true;
+}
+
+static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
+{
+ int cpt_pf_num = rvu->cpt_pf_num;
+
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return false;
+
+ return true;
+}
+
+static int validate_and_get_cpt_blkaddr(int req_blkaddr)
+{
+ int blkaddr;
+
+ blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -EINVAL;
+
+ return blkaddr;
+}
+
+int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
+ struct cpt_lf_alloc_req_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr;
+ int num_lfs, slot;
+ u64 val;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (req->eng_grpmsk == 0x0)
+ return CPT_AF_ERR_GRP_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (req->nix_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->nix_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+ }
+
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (req->sso_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+ }
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Set CPT LF group and priority */
+ val = (u64)req->eng_grpmsk << 48 | 1;
+ if (!is_rvu_otx2(rvu)) {
+ if (req->ctx_ilen_valid)
+ val |= (req->ctx_ilen << 17);
+ else
+ val |= (CPT_CTX_ILEN << 17);
+ }
+
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
+ * on reset.
+ */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
+ val |= ((u64)req->nix_pf_func << 48 |
+ (u64)req->sso_pf_func << 32);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+
+ return 0;
+}
+
+static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int num_lfs, cptlf, slot, err;
+ struct rvu_block *block;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return 0;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Perform teardown */
+ rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
+
+ /* Reset LF */
+ err = rvu_lf_reset(rvu, block, cptlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+ }
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int ret;
+
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT1);
+
+ return ret;
+}
+
+static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 sso_pf_func = req->sso_pf_func;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(16))) {
+ /* IPSec inline outbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
+ }
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+
+ nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
+ /* Enable CPT LF for IPsec inline inbound operations */
+ if (req->enable)
+ val |= BIT_ULL(9);
+ else
+ val &= ~BIT_ULL(9);
+
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (sso_pf_func) {
+ /* Set SSO_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)sso_pf_func << 32;
+ val |= (u64)req->nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+ if (req->sso_pf_func_ovrd)
+ /* Set SSO_PF_FUNC_OVRD for inline IPSec */
+ rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
+
+ /* Configure the X2P Link register with the cpt base channel number and
+ * range of channels it should propagate to X2P
+ */
+ if (!is_rvu_otx2(rvu)) {
+ val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
+ val |= (u64)rvu->hw->cpt_chan_base;
+
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
+ }
+
+ return 0;
+}
+
+static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 nix_pf_func = req->nix_pf_func;
+ int nix_blkaddr;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(9))) {
+ /* IPSec inline inbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
+ }
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+
+ /* Enable CPT LF for IPsec inline outbound operations */
+ if (req->enable)
+ val |= BIT_ULL(16);
+ else
+ val &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (nix_pf_func) {
+ /* Set NIX_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
+ nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+ struct cpt_inline_ipsec_cfg_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ switch (req->dir) {
+ case CPT_INLINE_INBOUND:
+ ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ case CPT_INLINE_OUTBOUND:
+ ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ default:
+ return CPT_AF_ERR_PARAM;
+ }
+
+ return ret;
+}
+
+static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
+{
+ u64 offset = req->reg_offset;
+ int blkaddr, num_lfs, lf;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return false;
+
+ /* Registers that can be accessed from PF/VF */
+ if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
+ (offset & 0xFF000) == CPT_AF_LFX_CTL2(0)) {
+ if (offset & 7)
+ return false;
+
+ lf = (offset & 0xFFF) >> 3;
+ block = &rvu->hw->block[blkaddr];
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ if (lf >= num_lfs)
+ /* Slot is not valid for that PF/VF */
+ return false;
+
+ /* Translate local LF used by VFs to global CPT LF */
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
+ req->hdr.pcifunc, lf);
+ if (lf < 0)
+ return false;
+
+ return true;
+ } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Registers that can be accessed from PF */
+ switch (offset) {
+ case CPT_AF_DIAG:
+ case CPT_AF_CTL:
+ case CPT_AF_PF_FUNC:
+ case CPT_AF_BLK_RST:
+ case CPT_AF_CONSTANTS1:
+ case CPT_AF_CTX_FLUSH_TIMER:
+ return true;
+ }
+
+ switch (offset & 0xFF000) {
+ case CPT_AF_EXEX_STS(0):
+ case CPT_AF_EXEX_CTL(0):
+ case CPT_AF_EXEX_CTL2(0):
+ case CPT_AF_EXEX_UCODE_BASE(0):
+ if (offset & 7)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ struct cpt_rd_wr_reg_msg *req,
+ struct cpt_rd_wr_reg_msg *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ rsp->reg_offset = req->reg_offset;
+ rsp->ret_val = req->ret_val;
+ rsp->is_write = req->is_write;
+
+ if (!is_valid_offset(rvu, req))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ if (req->is_write)
+ rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ else
+ rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+
+ return 0;
+}
+
+static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
+{
+ if (is_rvu_otx2(rvu))
+ return;
+
+ rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC);
+ rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC);
+ rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC);
+ rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_AOP_LATENCY_PC);
+ rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC);
+ rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_IFETCH_LATENCY_PC);
+ rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
+ rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
+ rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
+
+ rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
+ rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
+ rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
+}
+
+static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
+{
+ u16 max_ses, max_ies, max_aes;
+ u32 e_min = 0, e_max = 0;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ /* Get AE status */
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes;
+ cpt_get_eng_sts(e_min, e_max, rsp, ae);
+ /* Get SE status */
+ e_min = 0;
+ e_max = max_ses;
+ cpt_get_eng_sts(e_min, e_max, rsp, se);
+ /* Get IE status */
+ e_min = max_ses;
+ e_max = max_ses + max_ies;
+ cpt_get_eng_sts(e_min, e_max, rsp, ie);
+}
+
+int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
+ struct cpt_sts_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ get_ctx_pc(rvu, rsp, blkaddr);
+
+ /* Get CPT engines status */
+ get_eng_sts(rvu, rsp, blkaddr);
+
+ /* Read CPT instruction PC registers */
+ rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ rsp->active_cycles_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_ACTIVE_CYCLES_PC);
+ rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG);
+
+ return 0;
+}
+
+#define RXC_ZOMBIE_THRES GENMASK_ULL(59, 48)
+#define RXC_ZOMBIE_LIMIT GENMASK_ULL(43, 32)
+#define RXC_ACTIVE_THRES GENMASK_ULL(27, 16)
+#define RXC_ACTIVE_LIMIT GENMASK_ULL(11, 0)
+#define RXC_ACTIVE_COUNT GENMASK_ULL(60, 48)
+#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
+
+static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
+ int blkaddr, struct cpt_rxc_time_cfg_req *save)
+{
+ u64 dfrg_reg;
+
+ if (save) {
+ /* Save older config */
+ dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
+ save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
+ save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
+ save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
+
+ save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ }
+
+ dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
+ dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
+ dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
+ dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step);
+ rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg);
+}
+
+int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
+ struct cpt_rxc_time_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
+}
+
+int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+ u64 ctl, ctl2;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+ ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+
+ ret = rvu_lf_reset(rvu, block, cptlf);
+ if (ret)
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2);
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
+ struct cpt_flt_eng_info_rsp *rsp)
+{
+ struct rvu_block *block;
+ unsigned long flags;
+ int blkaddr, vec;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ block = &rvu->hw->block[blkaddr];
+ for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
+ spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
+ rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
+ rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
+ if (req->reset) {
+ block->cpt_flt_eng_map[vec] = 0x0;
+ block->cpt_rcvrd_eng_map[vec] = 0x0;
+ }
+ spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
+ }
+ return 0;
+}
+
+static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
+{
+ struct cpt_rxc_time_cfg_req req, prev;
+ int timeout = 2000;
+ u64 reg;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ /* Set time limit to minimum values, so that rxc entries will be
+ * flushed out quickly.
+ */
+ req.step = 1;
+ req.zombie_thres = 1;
+ req.zombie_limit = 1;
+ req.active_thres = 1;
+ req.active_limit = 1;
+
+ cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
+
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
+
+ timeout = 2000;
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+
+ /* Restore config */
+ cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
+}
+
+#define INFLIGHT GENMASK_ULL(8, 0)
+#define GRB_CNT GENMASK_ULL(39, 32)
+#define GWB_CNT GENMASK_ULL(47, 40)
+#define XQ_XOR GENMASK_ULL(63, 63)
+#define DQPTR GENMASK_ULL(19, 0)
+#define NQPTR GENMASK_ULL(51, 32)
+
+static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
+{
+ int timeout = 1000000;
+ u64 inprog, inst_ptr;
+ u64 qsize, pending;
+ int i = 0;
+
+ /* Disable instructions enqueuing */
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
+
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ inprog |= BIT_ULL(16);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
+
+ qsize = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_SIZE)) & 0x7FFF;
+ do {
+ inst_ptr = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_INST_PTR));
+ pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +
+ FIELD_GET(NQPTR, inst_ptr) -
+ FIELD_GET(DQPTR, inst_ptr);
+ udelay(1);
+ timeout--;
+ } while ((pending != 0) && (timeout != 0));
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n");
+
+ timeout = 1000000;
+ /* Wait for CPT queue to become execution-quiescent */
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+
+ if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
+ (FIELD_GET(GRB_CNT, inprog) == 0)) {
+ i++;
+ } else {
+ i = 0;
+ timeout--;
+ }
+ } while ((timeout != 0) && (i < 10));
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n");
+ /* Wait for 2 us to flush all queue writes to memory */
+ udelay(2);
+}
+
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
+{
+ u64 reg;
+
+ if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ mutex_lock(&rvu->alias_lock);
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ cpt_lf_disable_iqueue(rvu, blkaddr, slot);
+
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+ mutex_unlock(&rvu->alias_lock);
+
+ return 0;
+}
+
+#define CPT_RES_LEN 16
+#define CPT_SE_IE_EGRP 1ULL
+
+static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
+ int nix_blkaddr)
+{
+ int cpt_pf_num = rvu->cpt_pf_num;
+ struct cpt_inst_lmtst_req *req;
+ dma_addr_t res_daddr;
+ int timeout = 3000;
+ u8 cpt_idx;
+ u64 *inst;
+ u16 *res;
+ int rc;
+
+ res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(rvu->dev, res_daddr)) {
+ dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
+ rc = -EFAULT;
+ goto res_free;
+ }
+ *res = 0xFFFF;
+
+ /* Send mbox message to CPT PF */
+ req = (struct cpt_inst_lmtst_req *)
+ otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
+ cpt_pf_num, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ rc = -ENOMEM;
+ goto res_daddr_unmap;
+ }
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
+
+ inst = req->inst;
+ /* Prepare CPT_INST_S */
+ inst[0] = 0;
+ inst[1] = res_daddr;
+ /* AF PF FUNC */
+ inst[2] = 0;
+ /* Set QORD */
+ inst[3] = 1;
+ inst[4] = 0;
+ inst[5] = 0;
+ inst[6] = 0;
+ /* Set EGRP */
+ inst[7] = CPT_SE_IE_EGRP << 61;
+
+ /* Subtract 1 from the NIX-CPT credit count to preserve
+ * credit counts.
+ */
+ cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ BIT_ULL(22) - 1);
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ if (rc)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ cpt_pf_num);
+ /* Wait for CPT instruction to be completed */
+ do {
+ mdelay(1);
+ if (*res == 0xFFFF)
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
+
+res_daddr_unmap:
+ dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
+res_free:
+ kfree(res);
+
+ return 0;
+}
+
+#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46)
+#define CTX_CAM_CPTR GENMASK_ULL(45, 0)
+
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
+{
+ int nix_blkaddr, blkaddr;
+ u16 max_ctx_entries, i;
+ int slot = 0, num_lfs;
+ u64 reg, cam_data;
+ int rc;
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (nix_blkaddr < 0)
+ return -EINVAL;
+
+ if (is_rvu_otx2(rvu))
+ return 0;
+
+ blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
+
+ /* Submit CPT_INST_S to track when all packets have been
+ * flushed through for the NIX PF FUNC in inline inbound case.
+ */
+ rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
+ if (rc)
+ return rc;
+
+ /* Wait for rxc entries to be flushed out */
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ max_ctx_entries = (reg >> 48) & 0xFFF;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ blkaddr);
+ if (num_lfs == 0) {
+ dev_warn(rvu->dev, "CPT LF is not configured\n");
+ goto unlock;
+ }
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ for (i = 0; i < max_ctx_entries; i++) {
+ cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
+
+ if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
+ FIELD_GET(CTX_CAM_CPTR, cam_data)) {
+ reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
+ reg);
+ }
+ }
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+unlock:
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
+
+int rvu_cpt_init(struct rvu *rvu)
+{
+ /* Retrieve CPT PF number */
+ rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+ spin_lock_init(&rvu->cpt_intr_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
new file mode 100644
index 000000000..d30e84803
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -0,0 +1,3440 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell.
+ *
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "cgx.h"
+#include "lmac_common.h"
+#include "npc.h"
+#include "rvu_npc_hash.h"
+#include "mcs.h"
+
+#define DEBUGFS_DIR_NAME "octeontx2"
+
+enum {
+ CGX_STAT0,
+ CGX_STAT1,
+ CGX_STAT2,
+ CGX_STAT3,
+ CGX_STAT4,
+ CGX_STAT5,
+ CGX_STAT6,
+ CGX_STAT7,
+ CGX_STAT8,
+ CGX_STAT9,
+ CGX_STAT10,
+ CGX_STAT11,
+ CGX_STAT12,
+ CGX_STAT13,
+ CGX_STAT14,
+ CGX_STAT15,
+ CGX_STAT16,
+ CGX_STAT17,
+ CGX_STAT18,
+};
+
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
+
+static char *cgx_rx_stats_fields[] = {
+ [CGX_STAT0] = "Received packets",
+ [CGX_STAT1] = "Octets of received packets",
+ [CGX_STAT2] = "Received PAUSE packets",
+ [CGX_STAT3] = "Received PAUSE and control packets",
+ [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
+ [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
+ [CGX_STAT6] = "Packets dropped due to RX FIFO full",
+ [CGX_STAT7] = "Octets dropped due to RX FIFO full",
+ [CGX_STAT8] = "Error packets",
+ [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
+ [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
+ [CGX_STAT11] = "NCSI-bound packets dropped",
+ [CGX_STAT12] = "NCSI-bound octets dropped",
+};
+
+static char *cgx_tx_stats_fields[] = {
+ [CGX_STAT0] = "Packets dropped due to excessive collisions",
+ [CGX_STAT1] = "Packets dropped due to excessive deferral",
+ [CGX_STAT2] = "Multiple collisions before successful transmission",
+ [CGX_STAT3] = "Single collisions before successful transmission",
+ [CGX_STAT4] = "Total octets sent on the interface",
+ [CGX_STAT5] = "Total frames sent on the interface",
+ [CGX_STAT6] = "Packets sent with an octet count < 64",
+ [CGX_STAT7] = "Packets sent with an octet count == 64",
+ [CGX_STAT8] = "Packets sent with an octet count of 65-127",
+ [CGX_STAT9] = "Packets sent with an octet count of 128-255",
+ [CGX_STAT10] = "Packets sent with an octet count of 256-511",
+ [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
+ [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
+ [CGX_STAT13] = "Packets sent with an octet count of > 1518",
+ [CGX_STAT14] = "Packets sent to a broadcast DMAC",
+ [CGX_STAT15] = "Packets sent to the multicast DMAC",
+ [CGX_STAT16] = "Transmit underflow and were truncated",
+ [CGX_STAT17] = "Control/PAUSE packets sent",
+};
+
+static char *rpm_rx_stats_fields[] = {
+ "Octets of received packets",
+ "Octets of received packets with out error",
+ "Received packets with alignment errors",
+ "Control/PAUSE packets received",
+ "Packets received with Frame too long Errors",
+ "Packets received with a1nrange length Errors",
+ "Received packets",
+ "Packets received with FrameCheckSequenceErrors",
+ "Packets received with VLAN header",
+ "Error packets",
+ "Packets received with unicast DMAC",
+ "Packets received with multicast DMAC",
+ "Packets received with broadcast DMAC",
+ "Dropped packets",
+ "Total frames received on interface",
+ "Packets received with an octet count < 64",
+ "Packets received with an octet count == 64",
+ "Packets received with an octet count of 65-127",
+ "Packets received with an octet count of 128-255",
+ "Packets received with an octet count of 256-511",
+ "Packets received with an octet count of 512-1023",
+ "Packets received with an octet count of 1024-1518",
+ "Packets received with an octet count of > 1518",
+ "Oversized Packets",
+ "Jabber Packets",
+ "Fragmented Packets",
+ "CBFC(class based flow control) pause frames received for class 0",
+ "CBFC pause frames received for class 1",
+ "CBFC pause frames received for class 2",
+ "CBFC pause frames received for class 3",
+ "CBFC pause frames received for class 4",
+ "CBFC pause frames received for class 5",
+ "CBFC pause frames received for class 6",
+ "CBFC pause frames received for class 7",
+ "CBFC pause frames received for class 8",
+ "CBFC pause frames received for class 9",
+ "CBFC pause frames received for class 10",
+ "CBFC pause frames received for class 11",
+ "CBFC pause frames received for class 12",
+ "CBFC pause frames received for class 13",
+ "CBFC pause frames received for class 14",
+ "CBFC pause frames received for class 15",
+ "MAC control packets received",
+};
+
+static char *rpm_tx_stats_fields[] = {
+ "Total octets sent on the interface",
+ "Total octets transmitted OK",
+ "Control/Pause frames sent",
+ "Total frames transmitted OK",
+ "Total frames sent with VLAN header",
+ "Error Packets",
+ "Packets sent to unicast DMAC",
+ "Packets sent to the multicast DMAC",
+ "Packets sent to a broadcast DMAC",
+ "Packets sent with an octet count == 64",
+ "Packets sent with an octet count of 65-127",
+ "Packets sent with an octet count of 128-255",
+ "Packets sent with an octet count of 256-511",
+ "Packets sent with an octet count of 512-1023",
+ "Packets sent with an octet count of 1024-1518",
+ "Packets sent with an octet count of > 1518",
+ "CBFC(class based flow control) pause frames transmitted for class 0",
+ "CBFC pause frames transmitted for class 1",
+ "CBFC pause frames transmitted for class 2",
+ "CBFC pause frames transmitted for class 3",
+ "CBFC pause frames transmitted for class 4",
+ "CBFC pause frames transmitted for class 5",
+ "CBFC pause frames transmitted for class 6",
+ "CBFC pause frames transmitted for class 7",
+ "CBFC pause frames transmitted for class 8",
+ "CBFC pause frames transmitted for class 9",
+ "CBFC pause frames transmitted for class 10",
+ "CBFC pause frames transmitted for class 11",
+ "CBFC pause frames transmitted for class 12",
+ "CBFC pause frames transmitted for class 13",
+ "CBFC pause frames transmitted for class 14",
+ "CBFC pause frames transmitted for class 15",
+ "MAC control packets sent",
+ "Total frames sent on the interface"
+};
+
+enum cpt_eng_type {
+ CPT_AE_TYPE = 1,
+ CPT_SE_TYPE = 2,
+ CPT_IE_TYPE = 3,
+};
+
+#define rvu_dbg_NULL NULL
+#define rvu_dbg_open_NULL NULL
+
+#define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
+static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, rvu_dbg_##read_op, inode->i_private); \
+} \
+static const struct file_operations rvu_dbg_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = rvu_dbg_open_##name, \
+ .read = seq_read, \
+ .write = rvu_dbg_##write_op, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define RVU_DEBUG_FOPS(name, read_op, write_op) \
+static const struct file_operations rvu_dbg_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = rvu_dbg_##read_op, \
+ .write = rvu_dbg_##write_op \
+}
+
+static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+
+static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_port_stats stats;
+ int lmac;
+
+ seq_puts(filp, "\n port stats\n");
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
+ mcs_get_port_stats(mcs, &stats, lmac, dir);
+ seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
+ seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
+
+ if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
+ stats.preempt_err_cnt);
+ if (dir == MCS_TX)
+ seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
+ stats.sectag_insert_err_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sa_stats stats;
+ struct rsrc_bmap *map;
+ int sa_id;
+
+ if (dir == MCS_TX) {
+ map = &mcs->tx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n TX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
+ seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
+ stats.pkt_encrypt_cnt);
+
+ seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
+ stats.pkt_protected_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+ }
+
+ /* RX stats */
+ map = &mcs->rx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n RX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
+ seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->tx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
+ seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
+
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
+ stats.octet_encrypt_cnt);
+ seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
+ stats.octet_protected_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->rx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
+ seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
+ seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
+
+ if (mcs->hw->mcs_blks > 1) {
+ seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
+ seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
+ stats.octet_decrypt_cnt);
+ seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
+ stats.octet_validate_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_flowid_stats stats;
+ struct rsrc_bmap *map;
+ int flow_id;
+
+ seq_puts(filp, "\n Flowid stats\n");
+
+ if (dir == MCS_RX)
+ map = &mcs->rx.flow_ids;
+ else
+ map = &mcs->tx.flow_ids;
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
+ mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
+ seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->tx.secy;
+ seq_puts(filp, "\n MCS TX secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_tx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
+ stats.octet_encrypted_cnt);
+ seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
+ stats.octet_protected_cnt);
+ seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
+ stats.pkt_noactivesa_cnt);
+ seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
+ seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->rx.secy;
+ seq_puts(filp, "\n MCS secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_rx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
+ stats.octet_decrypted_cnt);
+ seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
+ stats.octet_validated_cnt);
+ seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
+ stats.pkt_port_disabled_cnt);
+ seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
+ seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
+ stats.pkt_nosa_cnt);
+ seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
+ stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
+ stats.pkt_tagged_ctl_cnt);
+ seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
+ seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
+ if (mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
+ stats.pkt_notag_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
+
+static void rvu_dbg_mcs_init(struct rvu *rvu)
+{
+ struct mcs *mcs;
+ char dname[10];
+ int i;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
+
+ for (i = 0; i < rvu->mcs_blk_cnt; i++) {
+ mcs = mcs_get_pdata(i);
+
+ sprintf(dname, "mcs%d", i);
+ rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
+ rvu->rvu_dbg.mcs_root);
+
+ rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_port_stats_fops);
+
+ rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_port_stats_fops);
+ }
+}
+
+#define LMT_MAPTBL_ENTRY_SIZE 16
+/* Dump LMTST map table */
+static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rvu *rvu = filp->private_data;
+ u64 lmt_addr, val, tbl_base;
+ int pf, vf, num_vfs, hw_vfs;
+ void __iomem *lmt_map_base;
+ int buf_size = 10240;
+ size_t off = 0;
+ int index = 0;
+ char *buf;
+ int ret;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ kfree(buf);
+ return false;
+ }
+
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\tLmtst Map Table Entries");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\t=======================");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmtline Base (word 0)\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmt Map Entry (word 1)");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
+ pf);
+
+ index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
+ (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
+ val);
+ /* Reading num of VFs per PF */
+ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
+ for (vf = 0; vf < num_vfs; vf++) {
+ index = (pf * rvu->hw->total_vfs * 16) +
+ ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "PF%d:VF%d \t\t", pf, vf);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%llx\t\t", (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\n", val);
+ }
+ }
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+
+ ret = min(off, count);
+ if (copy_to_user(buffer, buf, ret))
+ ret = -EFAULT;
+ kfree(buf);
+
+ iounmap(lmt_map_base);
+ if (ret < 0)
+ return ret;
+
+ *ppos = ret;
+ return ret;
+}
+
+RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
+
+static void get_lf_str_list(struct rvu_block block, int pcifunc,
+ char *lfs)
+{
+ int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
+
+ for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
+ if (lf >= block.lf.max)
+ break;
+
+ if (block.fn_map[lf] != pcifunc)
+ continue;
+
+ if (lf == prev_lf + 1) {
+ prev_lf = lf;
+ seq = 1;
+ continue;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
+ else
+ len += (len ? sprintf(lfs + len, ",%d", lf) :
+ sprintf(lfs + len, "%d", lf));
+
+ prev_lf = lf;
+ seq = 0;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d", prev_lf);
+
+ lfs[len] = '\0';
+}
+
+static int get_max_column_width(struct rvu *rvu)
+{
+ int index, pf, vf, lf_str_size = 12, buf_size = 256;
+ struct rvu_block block;
+ u16 pcifunc;
+ char *buf;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ for (index = 0; index < BLK_COUNT; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+
+ get_lf_str_list(block, pcifunc, buf);
+ if (lf_str_size <= strlen(buf))
+ lf_str_size = strlen(buf) + 1;
+ }
+ }
+ }
+
+ kfree(buf);
+ return lf_str_size;
+}
+
+/* Dumps current provisioning status of all RVU block LFs */
+static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int index, off = 0, flag = 0, len = 0, i = 0;
+ struct rvu *rvu = filp->private_data;
+ int bytes_not_copied = 0;
+ struct rvu_block block;
+ int pf, vf, pcifunc;
+ int buf_size = 2048;
+ int lf_str_size;
+ char *lfs;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the maximum width of a column */
+ lf_str_size = get_max_column_width(rvu);
+
+ lfs = kzalloc(lf_str_size, GFP_KERNEL);
+ if (!lfs) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+ off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
+ "pcifunc");
+ for (index = 0; index < BLK_COUNT; index++)
+ if (strlen(rvu->hw->block[index].name)) {
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "%-*s", lf_str_size,
+ rvu->hw->block[index].name);
+ }
+
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ off = 0;
+ flag = 0;
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ if (vf) {
+ sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
+ } else {
+ sprintf(lfs, "PF%d", pf);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
+ }
+
+ for (index = 0; index < BLK_COUNT; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+ len = 0;
+ lfs[len] = '\0';
+ get_lf_str_list(block, pcifunc, lfs);
+ if (strlen(lfs))
+ flag = 1;
+
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
+ }
+ if (flag) {
+ off += scnprintf(&buf[off],
+ buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer +
+ (i * off),
+ buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
+ }
+ }
+ }
+
+out:
+ kfree(lfs);
+ kfree(buf);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ return *ppos;
+}
+
+RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
+
+static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct pci_dev *pdev = NULL;
+ struct mac_ops *mac_ops;
+ char cgx[10], lmac[10];
+ struct rvu_pfvf *pfvf;
+ int pf, domain, blkid;
+ u8 cgx_id, lmac_id;
+ u16 pcifunc;
+
+ domain = 2;
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ /* There can be no CGX devices at all */
+ if (!mac_ops)
+ return 0;
+ seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
+ mac_ops->name);
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ continue;
+
+ cgx[0] = 0;
+ lmac[0] = 0;
+ pcifunc = pf << 10;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (pfvf->nix_blkaddr == BLKADDR_NIX0)
+ blkid = 0;
+ else
+ blkid = 1;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
+ &lmac_id);
+ sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
+ sprintf(lmac, "LMAC%d", lmac_id);
+ seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
+ dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+
+ pci_dev_put(pdev);
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+
+static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
+ u16 *pcifunc)
+{
+ struct rvu_block *block;
+ struct rvu_hwinfo *hw;
+
+ hw = rvu->hw;
+ block = &hw->block[blkaddr];
+
+ if (lf < 0 || lf >= block->lf.max) {
+ dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
+ block->lf.max - 1);
+ return false;
+ }
+
+ *pcifunc = block->fn_map[lf];
+ if (!*pcifunc) {
+ dev_warn(rvu->dev,
+ "This LF is not attached to any RVU PFFUNC\n");
+ return false;
+ }
+ return true;
+}
+
+static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ if (!pfvf->aura_ctx) {
+ seq_puts(m, "Aura context is not initialized\n");
+ } else {
+ bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
+ pfvf->aura_ctx->qsize);
+ seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
+ seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
+ }
+
+ if (!pfvf->pool_ctx) {
+ seq_puts(m, "Pool context is not initialized\n");
+ } else {
+ bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
+ pfvf->pool_ctx->qsize);
+ seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
+ seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
+ }
+ kfree(buf);
+}
+
+/* The 'qsize' entry dumps current Aura/Pool context Qsize
+ * and each context's current enable/disable status in a bitmap.
+ */
+static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+ int blktype)
+{
+ void (*print_qsize)(struct seq_file *filp,
+ struct rvu_pfvf *pfvf) = NULL;
+ struct dentry *current_dir;
+ struct rvu_pfvf *pfvf;
+ struct rvu *rvu;
+ int qsize_id;
+ u16 pcifunc;
+ int blkaddr;
+
+ rvu = filp->private;
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ qsize_id = rvu->rvu_dbg.npa_qsize_id;
+ print_qsize = print_npa_qsize;
+ break;
+
+ case BLKTYPE_NIX:
+ qsize_id = rvu->rvu_dbg.nix_qsize_id;
+ print_qsize = print_nix_qsize;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ print_qsize(filp, pfvf);
+
+ return 0;
+}
+
+static ssize_t rvu_dbg_qsize_write(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos, int blktype)
+{
+ char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
+ struct seq_file *seqfile = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp, *subtoken;
+ struct rvu *rvu = seqfile->private;
+ struct dentry *current_dir;
+ int blkaddr;
+ u16 pcifunc;
+ int ret, lf;
+
+ cmd_buf = memdup_user(buffer, count + 1);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ cmd_buf_tmp = cmd_buf;
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
+ if (cmd_buf)
+ ret = -EINVAL;
+
+ if (ret < 0 || !strncmp(subtoken, "help", 4)) {
+ dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
+ goto qsize_write_done;
+ }
+
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
+ ret = -EINVAL;
+ goto qsize_write_done;
+ }
+ if (blktype == BLKTYPE_NPA)
+ rvu->rvu_dbg.npa_qsize_id = lf;
+ else
+ rvu->rvu_dbg.nix_qsize_id = lf;
+
+qsize_write_done:
+ kfree(cmd_buf_tmp);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_qsize_write(filp, buffer, count, ppos,
+ BLKTYPE_NPA);
+}
+
+static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
+
+/* Dumps given NPA Aura's context */
+static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
+{
+ struct npa_aura_s *aura = &rsp->aura;
+ struct rvu *rvu = m->private;
+
+ seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
+
+ seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
+ aura->ena, aura->pool_caching);
+ seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
+ aura->pool_way_mask, aura->avg_con);
+ seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
+ aura->pool_drop_ena, aura->aura_drop_ena);
+ seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
+ aura->bp_ena, aura->aura_drop);
+ seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
+ aura->shift, aura->avg_level);
+
+ seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
+ (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
+
+ seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
+ (u64)aura->limit, aura->bp, aura->fc_ena);
+
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
+ seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
+ aura->fc_up_crossing, aura->fc_stype);
+ seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
+
+ seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
+
+ seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
+ aura->pool_drop, aura->update_time);
+ seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
+ aura->err_int, aura->err_int_ena);
+ seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
+ aura->thresh_int, aura->thresh_int_ena);
+ seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
+ aura->thresh_up, aura->thresh_qint_idx);
+ seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
+
+ seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
+}
+
+/* Dumps given NPA Pool's context */
+static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
+{
+ struct npa_pool_s *pool = &rsp->pool;
+ struct rvu *rvu = m->private;
+
+ seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
+
+ seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
+ pool->ena, pool->nat_align);
+ seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
+ pool->stack_caching, pool->stack_way_mask);
+ seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
+ pool->buf_offset, pool->buf_size);
+
+ seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
+ pool->stack_max_pages, pool->stack_pages);
+
+ seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
+
+ seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
+ pool->stack_offset, pool->shift, pool->avg_level);
+ seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
+ pool->avg_con, pool->fc_ena, pool->fc_stype);
+ seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
+ pool->fc_hyst_bits, pool->fc_up_crossing);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
+ seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
+
+ seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
+
+ seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
+
+ seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
+
+ seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
+ pool->err_int, pool->err_int_ena);
+ seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
+ seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
+ pool->thresh_int_ena, pool->thresh_up);
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
+ pool->thresh_qint_idx, pool->err_qint_idx);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
+}
+
+/* Reads aura/pool's ctx from admin queue */
+static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
+{
+ void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
+ struct npa_aq_enq_req aq_req;
+ struct npa_aq_enq_rsp rsp;
+ struct rvu_pfvf *pfvf;
+ int aura, rc, max_id;
+ int npalf, id, all;
+ struct rvu *rvu;
+ u16 pcifunc;
+
+ rvu = m->private;
+
+ switch (ctype) {
+ case NPA_AQ_CTYPE_AURA:
+ npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
+ id = rvu->rvu_dbg.npa_aura_ctx.id;
+ all = rvu->rvu_dbg.npa_aura_ctx.all;
+ break;
+
+ case NPA_AQ_CTYPE_POOL:
+ npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
+ id = rvu->rvu_dbg.npa_pool_ctx.id;
+ all = rvu->rvu_dbg.npa_pool_ctx.all;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
+ seq_puts(m, "Aura context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
+ seq_puts(m, "Pool context is not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = ctype;
+ aq_req.op = NPA_AQ_INSTOP_READ;
+ if (ctype == NPA_AQ_CTYPE_AURA) {
+ max_id = pfvf->aura_ctx->qsize;
+ print_npa_ctx = print_npa_aura_ctx;
+ } else {
+ max_id = pfvf->pool_ctx->qsize;
+ print_npa_ctx = print_npa_pool_ctx;
+ }
+
+ if (id < 0 || id >= max_id) {
+ seq_printf(m, "Invalid %s, valid range is 0-%d\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
+ max_id - 1);
+ return -EINVAL;
+ }
+
+ if (all)
+ id = 0;
+ else
+ max_id = id + 1;
+
+ for (aura = id; aura < max_id; aura++) {
+ aq_req.aura_id = aura;
+
+ /* Skip if queue is uninitialized */
+ if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
+ continue;
+
+ seq_printf(m, "======%s : %d=======\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
+ aq_req.aura_id);
+ rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
+ if (rc) {
+ seq_puts(m, "Failed to read context\n");
+ return -EINVAL;
+ }
+ print_npa_ctx(m, &rsp);
+ }
+ return 0;
+}
+
+static int write_npa_ctx(struct rvu *rvu, bool all,
+ int npalf, int id, int ctype)
+{
+ struct rvu_pfvf *pfvf;
+ int max_id = 0;
+ u16 pcifunc;
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (ctype == NPA_AQ_CTYPE_AURA) {
+ if (!pfvf->aura_ctx) {
+ dev_warn(rvu->dev, "Aura context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->aura_ctx->qsize;
+ } else if (ctype == NPA_AQ_CTYPE_POOL) {
+ if (!pfvf->pool_ctx) {
+ dev_warn(rvu->dev, "Pool context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->pool_ctx->qsize;
+ }
+
+ if (id < 0 || id >= max_id) {
+ dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
+ max_id - 1);
+ return -EINVAL;
+ }
+
+ switch (ctype) {
+ case NPA_AQ_CTYPE_AURA:
+ rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
+ rvu->rvu_dbg.npa_aura_ctx.id = id;
+ rvu->rvu_dbg.npa_aura_ctx.all = all;
+ break;
+
+ case NPA_AQ_CTYPE_POOL:
+ rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
+ rvu->rvu_dbg.npa_pool_ctx.id = id;
+ rvu->rvu_dbg.npa_pool_ctx.all = all;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
+ const char __user *buffer, int *npalf,
+ int *id, bool *all)
+{
+ int bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+ int ret;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken && strcmp(subtoken, "all") == 0) {
+ *all = true;
+ } else {
+ ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+ return ret;
+}
+
+static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos, int ctype)
+{
+ char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
+ "aura" : "pool";
+ struct seq_file *seqfp = filp->private_data;
+ struct rvu *rvu = seqfp->private;
+ int npalf, id = 0, ret;
+ bool all = false;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
+ &npalf, &id, &all);
+ if (ret < 0) {
+ dev_info(rvu->dev,
+ "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
+ ctype_string, ctype_string);
+ goto done;
+ } else {
+ ret = write_npa_ctx(rvu, all, npalf, id, ctype);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
+ NPA_AQ_CTYPE_AURA);
+}
+
+static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
+
+static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
+ NPA_AQ_CTYPE_POOL);
+}
+
+static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
+
+static void ndc_cache_stats(struct seq_file *s, int blk_addr,
+ int ctype, int transaction)
+{
+ u64 req, out_req, lat, cant_alloc;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
+ int port;
+
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
+ for (port = 0; port < NDC_MAX_PORT; port++) {
+ req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
+ (port, ctype, transaction));
+ lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
+ (port, ctype, transaction));
+ out_req = rvu_read64(rvu, blk_addr,
+ NDC_AF_PORTX_RTX_RWX_OSTDN_PC
+ (port, ctype, transaction));
+ cant_alloc = rvu_read64(rvu, blk_addr,
+ NDC_AF_PORTX_RTX_CANT_ALLOC_PC
+ (port, transaction));
+ seq_printf(s, "\nPort:%d\n", port);
+ seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
+ seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
+ seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
+ seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
+ seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
+ }
+}
+
+static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
+{
+ seq_puts(s, "\n***** CACHE mode read stats *****\n");
+ ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
+ seq_puts(s, "\n***** CACHE mode write stats *****\n");
+ ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
+ seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
+ ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
+ seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
+ ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
+ return 0;
+}
+
+static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
+
+static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
+{
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
+ int bank, max_bank;
+ u64 ndc_af_const;
+
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
+ ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+ for (bank = 0; bank < max_bank; bank++) {
+ seq_printf(s, "BANK:%d\n", bank);
+ seq_printf(s, "\tHits:\t%lld\n",
+ (u64)rvu_read64(rvu, blk_addr,
+ NDC_AF_BANKX_HIT_PC(bank)));
+ seq_printf(s, "\tMiss:\t%lld\n",
+ (u64)rvu_read64(rvu, blk_addr,
+ NDC_AF_BANKX_MISS_PC(bank)));
+ }
+ return 0;
+}
+
+static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
+{
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
+
+static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
+{
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
+
+static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
+
+static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
+
+static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
+
+static void print_nix_cn10k_sq_ctx(struct seq_file *m,
+ struct nix_cn10k_sq_ctx_s *sq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
+ sq_ctx->ena, sq_ctx->qint_idx);
+ seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
+ sq_ctx->substream, sq_ctx->sdp_mcast);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
+ sq_ctx->cq, sq_ctx->sqe_way_mask);
+
+ seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
+ sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
+ seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
+ sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
+ seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
+ sq_ctx->default_chan, sq_ctx->sqb_count);
+
+ seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
+ seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
+ seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
+ sq_ctx->sqb_aura, sq_ctx->sq_int);
+ seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
+ sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
+
+ seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
+ sq_ctx->max_sqe_size, sq_ctx->cq_limit);
+ seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
+ sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
+ seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
+ sq_ctx->tail_offset, sq_ctx->smenq_offset);
+ seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
+ sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
+ seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
+ sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
+ seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
+/* Dumps given nix_sq's context */
+static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
+ return;
+ }
+ seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
+ sq_ctx->sqe_way_mask, sq_ctx->cq);
+ seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
+ sq_ctx->sdp_mcast, sq_ctx->substream);
+ seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
+ sq_ctx->qint_idx, sq_ctx->ena);
+
+ seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
+ sq_ctx->sqb_count, sq_ctx->default_chan);
+ seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
+ sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
+ seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
+ sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
+
+ seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
+ sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
+ seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
+ sq_ctx->sq_int, sq_ctx->sqb_aura);
+ seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
+ sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
+ seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
+ sq_ctx->smenq_offset, sq_ctx->tail_offset);
+ seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
+ sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
+ seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
+ sq_ctx->cq_limit, sq_ctx->max_sqe_size);
+
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
+ sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
+ seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
+ sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
+static void print_nix_cn10k_rq_ctx(struct seq_file *m,
+ struct nix_cn10k_rq_ctx_s *rq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->lenerr_dis);
+ seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
+ rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
+ seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
+ rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
+ seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
+ rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
+
+ seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->spb_aura, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->sso_tt);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
+ seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
+ rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
+
+ seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
+ seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
+ seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
+ rq_ctx->wqe_skip, rq_ctx->spb_ena);
+ seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
+ rq_ctx->lpb_sizem1, rq_ctx->first_skip);
+ seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
+ rq_ctx->later_skip, rq_ctx->xqe_imm_size);
+ seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
+ rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
+
+ seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
+ rq_ctx->xqe_drop, rq_ctx->xqe_pass);
+ seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
+ rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
+ seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
+ rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
+ seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
+
+ seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
+ rq_ctx->ltag, rq_ctx->good_utag);
+ seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
+ rq_ctx->bad_utag, rq_ctx->flow_tagw);
+ seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
+ rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
+ seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
+ rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
+ seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
+/* Dumps given nix_rq's context */
+static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
+ return;
+ }
+
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
+ rq_ctx->wqe_aura, rq_ctx->substream);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
+
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->sso_tt);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
+
+ seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
+ rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
+ seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
+ rq_ctx->xqe_imm_size, rq_ctx->later_skip);
+ seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
+ rq_ctx->first_skip, rq_ctx->lpb_sizem1);
+ seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
+ rq_ctx->spb_ena, rq_ctx->wqe_skip);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
+
+ seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
+ rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
+ seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+ seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
+ rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
+ seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
+ rq_ctx->xqe_pass, rq_ctx->xqe_drop);
+
+ seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
+ rq_ctx->qint_idx, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
+
+ seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
+ rq_ctx->flow_tagw, rq_ctx->bad_utag);
+ seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
+ rq_ctx->good_utag, rq_ctx->ltag);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
+/* Dumps given nix_cq's context */
+static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
+
+ seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
+
+ seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
+ seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
+ cq_ctx->avg_con, cq_ctx->cint_idx);
+ seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
+ cq_ctx->cq_err, cq_ctx->qint_idx);
+ seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
+ cq_ctx->bpid, cq_ctx->bp_ena);
+
+ seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
+ cq_ctx->update_time, cq_ctx->avg_level);
+ seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
+ cq_ctx->head, cq_ctx->tail);
+
+ seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
+ cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
+ seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
+ cq_ctx->qsize, cq_ctx->caching);
+ seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
+ cq_ctx->substream, cq_ctx->ena);
+ seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
+ cq_ctx->drop_ena, cq_ctx->drop);
+ seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
+}
+
+static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
+ void *unused, int ctype)
+{
+ void (*print_nix_ctx)(struct seq_file *filp,
+ struct nix_aq_enq_rsp *rsp) = NULL;
+ struct nix_hw *nix_hw = filp->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_aq_enq_req aq_req;
+ struct nix_aq_enq_rsp rsp;
+ char *ctype_string = NULL;
+ int qidx, rc, max_id = 0;
+ struct rvu_pfvf *pfvf;
+ int nixlf, id, all;
+ u16 pcifunc;
+
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
+ id = rvu->rvu_dbg.nix_cq_ctx.id;
+ all = rvu->rvu_dbg.nix_cq_ctx.all;
+ break;
+
+ case NIX_AQ_CTYPE_SQ:
+ nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
+ id = rvu->rvu_dbg.nix_sq_ctx.id;
+ all = rvu->rvu_dbg.nix_sq_ctx.all;
+ break;
+
+ case NIX_AQ_CTYPE_RQ:
+ nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
+ id = rvu->rvu_dbg.nix_rq_ctx.id;
+ all = rvu->rvu_dbg.nix_rq_ctx.all;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
+ seq_puts(filp, "SQ context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
+ seq_puts(filp, "RQ context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
+ seq_puts(filp, "CQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (ctype == NIX_AQ_CTYPE_SQ) {
+ max_id = pfvf->sq_ctx->qsize;
+ ctype_string = "sq";
+ print_nix_ctx = print_nix_sq_ctx;
+ } else if (ctype == NIX_AQ_CTYPE_RQ) {
+ max_id = pfvf->rq_ctx->qsize;
+ ctype_string = "rq";
+ print_nix_ctx = print_nix_rq_ctx;
+ } else if (ctype == NIX_AQ_CTYPE_CQ) {
+ max_id = pfvf->cq_ctx->qsize;
+ ctype_string = "cq";
+ print_nix_ctx = print_nix_cq_ctx;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = ctype;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ if (all)
+ id = 0;
+ else
+ max_id = id + 1;
+ for (qidx = id; qidx < max_id; qidx++) {
+ aq_req.qidx = qidx;
+ seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
+ ctype_string, nixlf, aq_req.qidx);
+ rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
+ if (rc) {
+ seq_puts(filp, "Failed to read the context\n");
+ return -EINVAL;
+ }
+ print_nix_ctx(filp, &rsp);
+ }
+ return 0;
+}
+
+static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
+ int id, int ctype, char *ctype_string,
+ struct seq_file *m)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct rvu_pfvf *pfvf;
+ int max_id = 0;
+ u16 pcifunc;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (ctype == NIX_AQ_CTYPE_SQ) {
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->sq_ctx->qsize;
+ } else if (ctype == NIX_AQ_CTYPE_RQ) {
+ if (!pfvf->rq_ctx) {
+ dev_warn(rvu->dev, "RQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->rq_ctx->qsize;
+ } else if (ctype == NIX_AQ_CTYPE_CQ) {
+ if (!pfvf->cq_ctx) {
+ dev_warn(rvu->dev, "CQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->cq_ctx->qsize;
+ }
+
+ if (id < 0 || id >= max_id) {
+ dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
+ ctype_string, max_id - 1);
+ return -EINVAL;
+ }
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_cq_ctx.id = id;
+ rvu->rvu_dbg.nix_cq_ctx.all = all;
+ break;
+
+ case NIX_AQ_CTYPE_SQ:
+ rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_sq_ctx.id = id;
+ rvu->rvu_dbg.nix_sq_ctx.all = all;
+ break;
+
+ case NIX_AQ_CTYPE_RQ:
+ rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_rq_ctx.id = id;
+ rvu->rvu_dbg.nix_rq_ctx.all = all;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos,
+ int ctype)
+{
+ struct seq_file *m = filp->private_data;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ char *cmd_buf, *ctype_string;
+ int nixlf, id = 0, ret;
+ bool all = false;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ switch (ctype) {
+ case NIX_AQ_CTYPE_SQ:
+ ctype_string = "sq";
+ break;
+ case NIX_AQ_CTYPE_RQ:
+ ctype_string = "rq";
+ break;
+ case NIX_AQ_CTYPE_CQ:
+ ctype_string = "cq";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+
+ if (!cmd_buf)
+ return count;
+
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
+ &nixlf, &id, &all);
+ if (ret < 0) {
+ dev_info(rvu->dev,
+ "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
+ ctype_string, ctype_string);
+ goto done;
+ } else {
+ ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
+ ctype_string, m);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_SQ);
+}
+
+static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
+
+static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_RQ);
+}
+
+static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
+
+static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_CQ);
+}
+
+static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
+
+static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
+ unsigned long *bmap, char *qtype)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ bitmap_print_to_pagebuf(false, buf, bmap, qsize);
+ seq_printf(filp, "%s context count : %d\n", qtype, qsize);
+ seq_printf(filp, "%s context ena/dis bitmap : %s\n",
+ qtype, buf);
+ kfree(buf);
+}
+
+static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
+{
+ if (!pfvf->cq_ctx)
+ seq_puts(filp, "cq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
+ "cq");
+
+ if (!pfvf->rq_ctx)
+ seq_puts(filp, "rq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
+ "rq");
+
+ if (!pfvf->sq_ctx)
+ seq_puts(filp, "sq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
+ "sq");
+}
+
+static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_qsize_write(filp, buffer, count, ppos,
+ BLKTYPE_NIX);
+}
+
+static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
+
+static void print_band_prof_ctx(struct seq_file *m,
+ struct nix_bandprof_s *prof)
+{
+ char *str;
+
+ switch (prof->pc_mode) {
+ case NIX_RX_PC_MODE_VLAN:
+ str = "VLAN";
+ break;
+ case NIX_RX_PC_MODE_DSCP:
+ str = "DSCP";
+ break;
+ case NIX_RX_PC_MODE_GEN:
+ str = "Generic";
+ break;
+ case NIX_RX_PC_MODE_RSVD:
+ str = "Reserved";
+ break;
+ }
+ seq_printf(m, "W0: pc_mode\t\t%s\n", str);
+ str = (prof->icolor == 3) ? "Color blind" :
+ (prof->icolor == 0) ? "Green" :
+ (prof->icolor == 1) ? "Yellow" : "Red";
+ seq_printf(m, "W0: icolor\t\t%s\n", str);
+ seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
+ seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
+ seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
+ seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
+ seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
+ seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
+ seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
+ seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
+
+ seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
+ str = (prof->lmode == 0) ? "byte" : "packet";
+ seq_printf(m, "W1: lmode\t\t%s\n", str);
+ seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
+ seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
+ seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
+ seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
+ str = (prof->gc_action == 0) ? "PASS" :
+ (prof->gc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: gc_action\t\t%s\n", str);
+ str = (prof->yc_action == 0) ? "PASS" :
+ (prof->yc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: yc_action\t\t%s\n", str);
+ str = (prof->rc_action == 0) ? "PASS" :
+ (prof->rc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: rc_action\t\t%s\n", str);
+ seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
+ seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+ seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
+
+ seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
+ seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
+ seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
+ seq_printf(m, "W4: green_pkt_pass\t%lld\n",
+ (u64)prof->green_pkt_pass);
+ seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
+ (u64)prof->yellow_pkt_pass);
+ seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
+ seq_printf(m, "W7: green_octs_pass\t%lld\n",
+ (u64)prof->green_octs_pass);
+ seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
+ (u64)prof->yellow_octs_pass);
+ seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
+ seq_printf(m, "W10: green_pkt_drop\t%lld\n",
+ (u64)prof->green_pkt_drop);
+ seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
+ (u64)prof->yellow_pkt_drop);
+ seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
+ seq_printf(m, "W13: green_octs_drop\t%lld\n",
+ (u64)prof->green_octs_drop);
+ seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
+ (u64)prof->yellow_octs_drop);
+ seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
+ seq_puts(m, "==============================\n");
+}
+
+static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_ipolicer *ipolicer;
+ int layer, prof_idx, idx, rc;
+ u16 pcifunc;
+ char *str;
+
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
+
+ seq_printf(m, "\n%s bandwidth profiles\n", str);
+ seq_puts(m, "=======================\n");
+
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (is_rsrc_free(&ipolicer->band_prof, idx))
+ continue;
+
+ prof_idx = (idx & 0x3FFF) | (layer << 14);
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ 0x00, NIX_AQ_CTYPE_BANDPROF,
+ prof_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of %s profile %d, err %d\n",
+ __func__, str, idx, rc);
+ return 0;
+ }
+ seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
+ pcifunc = ipolicer->pfvf_map[idx];
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(m, "Allocated to :: PF %d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(m, "Allocated to :: PF %d VF %d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ print_band_prof_ctx(m, &aq_rsp.prof);
+ }
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
+
+static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_ipolicer *ipolicer;
+ int layer;
+ char *str;
+
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
+ seq_puts(m, "\nBandwidth profile resource free count\n");
+ seq_puts(m, "=====================================\n");
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
+ ipolicer->band_prof.max,
+ rvu_rsrc_free_count(&ipolicer->band_prof));
+ }
+ seq_puts(m, "=====================================\n");
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
+
+static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
+{
+ struct nix_hw *nix_hw;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+
+ if (blkaddr == BLKADDR_NIX0) {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
+ nix_hw = &rvu->hw->nix[0];
+ } else {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
+ rvu->rvu_dbg.root);
+ nix_hw = &rvu->hw->nix[1];
+ }
+
+ debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_sq_ctx_fops);
+ debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_rq_ctx_fops);
+ debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_cq_ctx_fops);
+ debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_cache_fops);
+ debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_cache_fops);
+ debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_hits_miss_fops);
+ debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_hits_miss_fops);
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_ctx_fops);
+ debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_rsrc_fops);
+}
+
+static void rvu_dbg_npa_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
+
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_qsize_fops);
+ debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_aura_ctx_fops);
+ debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_pool_ctx_fops);
+ debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_cache_fops);
+ debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_hits_miss_fops);
+}
+
+#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
+ ({ \
+ u64 cnt; \
+ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
+ NIX_STATS_RX, &(cnt)); \
+ if (!err) \
+ seq_printf(s, "%s: %llu\n", name, cnt); \
+ cnt; \
+ })
+
+#define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
+ ({ \
+ u64 cnt; \
+ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
+ NIX_STATS_TX, &(cnt)); \
+ if (!err) \
+ seq_printf(s, "%s: %llu\n", name, cnt); \
+ cnt; \
+ })
+
+static int cgx_print_stats(struct seq_file *s, int lmac_id)
+{
+ struct cgx_link_user_info linfo;
+ struct mac_ops *mac_ops;
+ void *cgxd = s->private;
+ u64 ucast, mcast, bcast;
+ int stat = 0, err = 0;
+ u64 tx_stat, rx_stat;
+ struct rvu *rvu;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ mac_ops = get_mac_ops(cgxd);
+ /* There can be no CGX devices at all */
+ if (!mac_ops)
+ return 0;
+
+ /* Link status */
+ seq_puts(s, "\n=======Link Status======\n\n");
+ err = cgx_get_link_info(cgxd, lmac_id, &linfo);
+ if (err)
+ seq_puts(s, "Failed to read link status\n");
+ seq_printf(s, "\nLink is %s %d Mbps\n\n",
+ linfo.link_up ? "UP" : "DOWN", linfo.speed);
+
+ /* Rx stats */
+ seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
+ mac_ops->name);
+ ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
+ if (err)
+ return err;
+ mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
+ if (err)
+ return err;
+ bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
+ if (err)
+ return err;
+ seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
+ if (err)
+ return err;
+
+ /* Tx stats */
+ seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
+ mac_ops->name);
+ ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
+ if (err)
+ return err;
+ mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
+ if (err)
+ return err;
+ bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
+ if (err)
+ return err;
+ seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
+ PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
+ if (err)
+ return err;
+
+ /* Rx stats */
+ seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
+ if (err)
+ return err;
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
+ rx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
+ rx_stat);
+ stat++;
+ }
+
+ /* Tx stats */
+ stat = 0;
+ seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
+ if (err)
+ return err;
+
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
+ tx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
+ tx_stat);
+ stat++;
+ }
+
+ return err;
+}
+
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
+{
+ struct dentry *current_dir;
+ char *buf;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ buf = strrchr(current_dir->d_name.name, 'c');
+ if (!buf)
+ return -EINVAL;
+
+ return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ int lmac_id, err;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_stats(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+ struct pci_dev *pdev = NULL;
+ void *cgxd = s->private;
+ char *bcast, *mcast;
+ u16 index, domain;
+ u8 dmac[ETH_ALEN];
+ struct rvu *rvu;
+ u64 cfg, mac;
+ int pf;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ domain = 2;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ return 0;
+
+ cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+ bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+ mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+ seq_puts(s,
+ "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
+ seq_printf(s, "%s PF%d %9s %9s",
+ dev_name(&pdev->dev), pf, bcast, mcast);
+ if (cfg & CGX_DMAC_CAM_ACCEPT)
+ seq_printf(s, "%12s\n\n", "UNICAST");
+ else
+ seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+ seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
+
+ for (index = 0 ; index < 32 ; index++) {
+ cfg = cgx_read_dmac_entry(cgxd, index);
+ /* Display enabled dmac entries associated with current lmac */
+ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+ FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+ mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+ u64_to_ether_addr(mac, dmac);
+ seq_printf(s, "%7d %pM\n", index, dmac);
+ }
+ }
+
+ pci_dev_put(pdev);
+ return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+ int err, lmac_id;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_dmac_flt(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
+static void rvu_dbg_cgx_init(struct rvu *rvu)
+{
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap;
+ int i, lmac_id;
+ char dname[20];
+ void *cgx;
+
+ if (!cgx_get_cgxcnt_max())
+ return;
+
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ if (!mac_ops)
+ return;
+
+ rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
+ rvu->rvu_dbg.root);
+
+ for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
+ cgx = rvu_cgx_pdata(i, rvu);
+ if (!cgx)
+ continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgx);
+ /* cgx debugfs dir */
+ sprintf(dname, "%s%d", mac_ops->name, i);
+ rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
+ rvu->rvu_dbg.cgx_root);
+
+ for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ /* lmac debugfs dir */
+ sprintf(dname, "lmac%d", lmac_id);
+ rvu->rvu_dbg.lmac =
+ debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
+
+ debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_dmac_flt_fops);
+ }
+ }
+}
+
+/* NPC debugfs APIs */
+static void rvu_print_npc_mcam_info(struct seq_file *s,
+ u16 pcifunc, int blkaddr)
+{
+ struct rvu *rvu = s->private;
+ int entry_acnt, entry_ecnt;
+ int cntr_acnt, cntr_ecnt;
+
+ rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
+ &entry_acnt, &entry_ecnt);
+ rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
+ &cntr_acnt, &cntr_ecnt);
+ if (!entry_acnt && !cntr_acnt)
+ return;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+
+ if (entry_acnt) {
+ seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
+ seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
+ }
+ if (cntr_acnt) {
+ seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
+ seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
+ }
+}
+
+static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
+{
+ struct rvu *rvu = filp->private;
+ int pf, vf, numvfs, blkaddr;
+ struct npc_mcam *mcam;
+ u16 pcifunc, counters;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ mcam = &rvu->hw->mcam;
+ counters = rvu->hw->npc_counters;
+
+ seq_puts(filp, "\nNPC MCAM info:\n");
+ /* MCAM keywidth on receive and transmit sides */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ cfg = (cfg >> 32) & 0x07;
+ seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
+ "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
+ "224bits" : "448bits"));
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
+ cfg = (cfg >> 32) & 0x07;
+ seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
+ "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
+ "224bits" : "448bits"));
+
+ mutex_lock(&mcam->lock);
+ /* MCAM entries */
+ seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ mcam->total_entries - mcam->bmap_entries);
+ seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
+
+ /* MCAM counters */
+ seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ counters - mcam->counters.max);
+ seq_printf(filp, "\t\t Available \t: %d\n",
+ rvu_rsrc_free_count(&mcam->counters));
+
+ if (mcam->bmap_entries == mcam->bmap_fcnt) {
+ mutex_unlock(&mcam->lock);
+ return 0;
+ }
+
+ seq_puts(filp, "\n\t\t Current allocation\n");
+ seq_puts(filp, "\t\t====================\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
+
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ numvfs = (cfg >> 12) & 0xFF;
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
+ rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
+
+static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
+ void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct npc_mcam *mcam;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ mcam = &rvu->hw->mcam;
+
+ seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
+ seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
+
+static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
+ seq_printf(s, "\t%s ", npc_get_field_name(bit));
+ switch (bit) {
+ case NPC_LXMB:
+ if (rule->lxmb == 1)
+ seq_puts(s, "\tL2M nibble is set\n");
+ else
+ seq_puts(s, "\tL2B nibble is set\n");
+ break;
+ case NPC_DMAC:
+ seq_printf(s, "%pM ", rule->packet.dmac);
+ seq_printf(s, "mask %pM\n", rule->mask.dmac);
+ break;
+ case NPC_SMAC:
+ seq_printf(s, "%pM ", rule->packet.smac);
+ seq_printf(s, "mask %pM\n", rule->mask.smac);
+ break;
+ case NPC_ETYPE:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
+ break;
+ case NPC_OUTER_VID:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
+ seq_printf(s, "mask 0x%x\n",
+ ntohs(rule->mask.vlan_tci));
+ break;
+ case NPC_INNER_VID:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
+ seq_printf(s, "mask 0x%x\n",
+ ntohs(rule->mask.vlan_itci));
+ break;
+ case NPC_TOS:
+ seq_printf(s, "%d ", rule->packet.tos);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tos);
+ break;
+ case NPC_SIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4src);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
+ break;
+ case NPC_DIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
+ break;
+ case NPC_SIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6src);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
+ break;
+ case NPC_DIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6dst);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
+ break;
+ case NPC_IPFRAG_IPV6:
+ seq_printf(s, "0x%x ", rule->packet.next_header);
+ seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
+ break;
+ case NPC_IPFRAG_IPV4:
+ seq_printf(s, "0x%x ", rule->packet.ip_flag);
+ seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
+ break;
+ case NPC_SPORT_TCP:
+ case NPC_SPORT_UDP:
+ case NPC_SPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.sport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
+ break;
+ case NPC_DPORT_TCP:
+ case NPC_DPORT_UDP:
+ case NPC_DPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.dport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
+ break;
+ case NPC_IPSEC_SPI:
+ seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
+ seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
+ break;
+ default:
+ seq_puts(s, "\n");
+ break;
+ }
+ }
+}
+
+static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ if (is_npc_intf_tx(rule->intf)) {
+ switch (rule->tx_action.op) {
+ case NIX_TX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_DEFAULT:
+ seq_puts(s, "\taction: Unicast to default channel\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_CHAN:
+ seq_printf(s, "\taction: Unicast to channel %d\n",
+ rule->tx_action.index);
+ break;
+ case NIX_TX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ case NIX_TX_ACTIONOP_DROP_VIOL:
+ seq_puts(s, "\taction: Lockdown Violation Drop\n");
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (rule->rx_action.op) {
+ case NIX_RX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST:
+ seq_printf(s, "\taction: Direct to queue %d\n",
+ rule->rx_action.index);
+ break;
+ case NIX_RX_ACTIONOP_RSS:
+ seq_puts(s, "\taction: RSS\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST_IPSEC:
+ seq_puts(s, "\taction: Unicast ipsec\n");
+ break;
+ case NIX_RX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static const char *rvu_dbg_get_intf_name(int intf)
+{
+ switch (intf) {
+ case NIX_INTFX_RX(0):
+ return "NIX0_RX";
+ case NIX_INTFX_RX(1):
+ return "NIX1_RX";
+ case NIX_INTFX_TX(0):
+ return "NIX0_TX";
+ case NIX_INTFX_TX(1):
+ return "NIX1_TX";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
+{
+ struct rvu_npc_mcam_rule *iter;
+ struct rvu *rvu = s->private;
+ struct npc_mcam *mcam;
+ int pf, vf = -1;
+ bool enabled;
+ int blkaddr;
+ u16 target;
+ u64 hits;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ mcam = &rvu->hw->mcam;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\n\tInstalled by: PF%d ", pf);
+
+ if (iter->owner & RVU_PFVF_FUNC_MASK) {
+ vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+
+ seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
+ "RX" : "TX");
+ seq_printf(s, "\tinterface: %s\n",
+ rvu_dbg_get_intf_name(iter->intf));
+ seq_printf(s, "\tmcam entry: %d\n", iter->entry);
+
+ rvu_dbg_npc_mcam_show_flows(s, iter);
+ if (is_npc_intf_rx(iter->intf)) {
+ target = iter->rx_action.pf_func;
+ pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\tForward to: PF%d ", pf);
+
+ if (target & RVU_PFVF_FUNC_MASK) {
+ vf = (target & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+ seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
+ seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
+ }
+
+ rvu_dbg_npc_mcam_show_action(s, iter);
+
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
+ seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
+
+ if (!iter->has_cntr)
+ continue;
+ seq_printf(s, "\tcounter: %d\n", iter->cntr);
+
+ hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
+ seq_printf(s, "\thits: %lld\n", hits);
+ }
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+
+static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
+ struct npc_exact_table_entry *cam_entry;
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i, j;
+
+ u8 bitmap = 0;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Check if there is at least one entry in mem table */
+ if (!table->mem_tbl_entry_cnt)
+ goto dump_cam_table;
+
+ /* Print table headers */
+ seq_puts(s, "\n\tExact Match MEM Table\n");
+ seq_puts(s, "Index\t");
+
+ for (i = 0; i < table->mem_table.ways; i++) {
+ mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
+ struct npc_exact_table_entry, list);
+
+ seq_printf(s, "Way-%d\t\t\t\t\t", i);
+ }
+
+ seq_puts(s, "\n");
+ for (i = 0; i < table->mem_table.ways; i++)
+ seq_puts(s, "\tChan MAC \t");
+
+ seq_puts(s, "\n\n");
+
+ /* Print mem table entries */
+ for (i = 0; i < table->mem_table.depth; i++) {
+ bitmap = 0;
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!mem_entry[j])
+ continue;
+
+ if (mem_entry[j]->index != i)
+ continue;
+
+ bitmap |= BIT(j);
+ }
+
+ /* No valid entries */
+ if (!bitmap)
+ continue;
+
+ seq_printf(s, "%d\t", i);
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!(bitmap & BIT(j))) {
+ seq_puts(s, "nil\t\t\t\t\t");
+ continue;
+ }
+
+ seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
+ mem_entry[j]->mac);
+ mem_entry[j] = list_next_entry(mem_entry[j], list);
+ }
+ seq_puts(s, "\n");
+ }
+
+dump_cam_table:
+
+ if (!table->cam_tbl_entry_cnt)
+ goto done;
+
+ seq_puts(s, "\n\tExact Match CAM Table\n");
+ seq_puts(s, "index\tchan\tMAC\n");
+
+ /* Traverse cam table entries */
+ list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
+ seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
+ cam_entry->mac);
+ }
+
+done:
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
+
+static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i;
+
+ table = rvu->hw->table;
+
+ seq_puts(s, "\n\tExact Table Info\n");
+ seq_printf(s, "Exact Match Feature : %s\n",
+ rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return 0;
+
+ seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
+
+ seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
+
+ seq_puts(s, "\n\tMEM Table Info\n");
+ seq_printf(s, "Ways : %d\n", table->mem_table.ways);
+ seq_printf(s, "Depth : %d\n", table->mem_table.depth);
+ seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
+ seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
+ seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
+
+ seq_puts(s, "\n\tCAM Table Info\n");
+ seq_printf(s, "Depth : %d\n", table->cam_table.depth);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
+
+static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ struct npc_key_field *field;
+ u16 chan, pcifunc;
+ int blkaddr, i;
+ u64 cfg, cam1;
+ char *str;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ table = rvu->hw->table;
+
+ field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
+
+ seq_puts(s, "\n\t Exact Hit on drop status\n");
+ seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
+
+ for (i = 0; i < table->num_drop_rules; i++) {
+ pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
+
+ /* channel will be always in keyword 0 */
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
+ chan = field->kw_mask[0] & cam1;
+
+ str = (cfg & 1) ? "enabled" : "disabled";
+
+ seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(table->counter_idx[i])),
+ chan, str);
+ }
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
+
+static void rvu_dbg_npc_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
+
+ debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_info_fops);
+ debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_rules_fops);
+
+ debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_rx_miss_act_fops);
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return;
+
+ debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_entries_fops);
+
+ debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_info_fops);
+
+ debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_drop_cnt_fops);
+
+}
+
+static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
+{
+ struct cpt_ctx *ctx = filp->private;
+ u64 busy_sts = 0, free_sts = 0;
+ u32 e_min = 0, e_max = 0, e, i;
+ u16 max_ses, max_ies, max_aes;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ switch (eng_type) {
+ case CPT_AE_TYPE:
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes;
+ break;
+ case CPT_SE_TYPE:
+ e_min = 0;
+ e_max = max_ses;
+ break;
+ case CPT_IE_TYPE:
+ e_min = max_ses;
+ e_max = max_ses + max_ies;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (e = e_min, i = 0; e < e_max; e++, i++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
+ if (reg & 0x1)
+ busy_sts |= 1ULL << i;
+
+ if (reg & 0x2)
+ free_sts |= 1ULL << i;
+ }
+ seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
+ seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
+
+ return 0;
+}
+
+static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
+{
+ return cpt_eng_sts_display(filp, CPT_AE_TYPE);
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
+
+static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
+{
+ return cpt_eng_sts_display(filp, CPT_SE_TYPE);
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
+
+static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
+{
+ return cpt_eng_sts_display(filp, CPT_IE_TYPE);
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
+
+static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
+{
+ struct cpt_ctx *ctx = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
+ u32 e_max, e;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_max = max_ses + max_ies + max_aes;
+
+ seq_puts(filp, "===========================================\n");
+ for (e = 0; e < e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
+ seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
+ reg & 0xff);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
+ seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
+ seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
+ reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
+
+static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
+{
+ struct cpt_ctx *ctx = filp->private;
+ int blkaddr = ctx->blkaddr;
+ struct rvu *rvu = ctx->rvu;
+ struct rvu_block *block;
+ struct rvu_hwinfo *hw;
+ u64 reg;
+ u32 lf;
+
+ hw = rvu->hw;
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return -ENODEV;
+
+ seq_puts(filp, "===========================================\n");
+ for (lf = 0; lf < block->lf.max; lf++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift));
+ seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
+
+static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
+{
+ struct cpt_ctx *ctx = filp->private;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
+ u64 reg0, reg1;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
+ seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
+ seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
+
+static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
+{
+ struct cpt_ctx *ctx = filp->private;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ seq_printf(filp, "CPT instruction requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ seq_printf(filp, "CPT instruction latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ seq_printf(filp, "CPT NCB read requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ seq_printf(filp, "CPT NCB read latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
+ seq_printf(filp, "CPT active cycles pc %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ seq_printf(filp, "CPT clock count pc %llu\n", reg);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
+
+static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
+{
+ struct cpt_ctx *ctx;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+
+ if (blkaddr == BLKADDR_CPT0) {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+ ctx = &rvu->rvu_dbg.cpt_ctx[0];
+ ctx->blkaddr = BLKADDR_CPT0;
+ ctx->rvu = rvu;
+ } else {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
+ rvu->rvu_dbg.root);
+ ctx = &rvu->rvu_dbg.cpt_ctx[1];
+ ctx->blkaddr = BLKADDR_CPT1;
+ ctx->rvu = rvu;
+ }
+
+ debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_pc_fops);
+ debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_ae_sts_fops);
+ debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_se_sts_fops);
+ debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_ie_sts_fops);
+ debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_engines_info_fops);
+ debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_lfs_info_fops);
+ debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_err_info_fops);
+}
+
+static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
+{
+ if (!is_rvu_otx2(rvu))
+ return "cn10k";
+ else
+ return "octeontx2";
+}
+
+void rvu_dbg_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
+
+ debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rsrc_status_fops);
+
+ if (!is_rvu_otx2(rvu))
+ debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_lmtst_map_table_fops);
+
+ if (!cgx_get_cgxcnt_max())
+ goto create;
+
+ if (is_rvu_otx2(rvu))
+ debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+ else
+ debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+
+create:
+ rvu_dbg_npa_init(rvu);
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
+
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
+ rvu_dbg_cgx_init(rvu);
+ rvu_dbg_npc_init(rvu);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
+ rvu_dbg_mcs_init(rvu);
+}
+
+void rvu_dbg_exit(struct rvu *rvu)
+{
+ debugfs_remove_recursive(rvu->rvu_dbg.root);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
new file mode 100644
index 000000000..bffe04e6d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -0,0 +1,1695 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function Devlink
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include<linux/bitfield.h>
+
+#include "rvu.h"
+#include "rvu_reg.h"
+#include "rvu_struct.h"
+#include "rvu_npc_hash.h"
+
+#define DRV_NAME "octeontx2-af"
+
+static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_start(fmsg);
+}
+
+static int rvu_report_pair_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_pair_nest_end(fmsg);
+}
+
+static bool rvu_common_request_irq(struct rvu *rvu, int offset,
+ const char *name, irq_handler_t fn)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int rc;
+
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
+ rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
+ if (rc)
+ dev_warn(rvu->dev, "Failed to register %s irq\n", name);
+ else
+ rvu->irq_allocated[offset] = true;
+
+ return rvu->irq_allocated[offset];
+}
+
+static void rvu_nix_intr_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
+ "NIX_AF_RVU Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
+ nix_event_context->nix_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_gen_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
+ "NIX_AF_GEN Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
+ nix_event_context->nix_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_err_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
+ "NIX_AF_ERR Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_ras_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
+ "NIX_AF_RAS Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int offs, i, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+
+ if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
+ rvu_dl);
+ rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
+ }
+
+ for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_nix_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NIX AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NIX%d NIX_AF_INT vector offsets\n",
+ blkaddr - BLKADDR_NIX0);
+ return 0;
+ }
+ /* Register and enable NIX_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
+ "NIX_AF_RVU_INT",
+ rvu_nix_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
+ "NIX_AF_GEN_INT",
+ rvu_nix_af_rvu_gen_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
+ "NIX_AF_ERR_INT",
+ rvu_nix_af_rvu_err_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
+ "NIX_AF_RAS",
+ rvu_nix_af_rvu_ras_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_nix_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum nix_af_rvu_health health_reporter)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ u64 intr_val;
+ int err;
+
+ nix_event_context = ctx;
+ switch (health_reporter) {
+ case NIX_AF_RVU_INTR:
+ intr_val = nix_event_context->nix_af_rvu_int;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
+ nix_event_context->nix_af_rvu_int);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_GEN:
+ intr_val = nix_event_context->nix_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
+ nix_event_context->nix_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_ERR:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(6)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(5)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_RAS:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
+
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
+}
+
+static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
+}
+
+static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
+}
+
+static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
+}
+
+static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_nix_intr);
+RVU_REPORTERS(hw_nix_gen);
+RVU_REPORTERS(hw_nix_err);
+RVU_REPORTERS(hw_nix_ras);
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *rvu_reporters;
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_nix_health_reporter = rvu_reporters;
+ nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
+ if (!nix_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->nix_event_ctx = nix_event_context;
+ rvu_reporters->rvu_hw_nix_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
+
+ return 0;
+}
+
+static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_nix_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_nix_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *nix_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ nix_reporters = rvu_dl->rvu_nix_health_reporter;
+
+ if (!nix_reporters->rvu_hw_nix_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
+
+ rvu_nix_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
+ kfree(rvu_dl->rvu_nix_health_reporter);
+}
+
+static void rvu_npa_intr_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
+ "NPA_AF_RVU Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
+ npa_event_context->npa_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_gen_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
+ "NPA_AF_GEN Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
+ npa_event_context->npa_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_err_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
+ "NPA_AF_ERR Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
+ npa_event_context->npa_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_ras_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
+ "HW NPA_AF_RAS Error reported",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
+ npa_event_context->npa_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int i, offs, blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
+ offs = reg & 0x3FF;
+
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
+
+ for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_npa_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NPA AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NPA_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ /* Register and enable NPA_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU,
+ "NPA_AF_RVU_INT",
+ rvu_npa_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
+ "NPA_AF_RVU_GEN",
+ rvu_npa_af_gen_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
+ "NPA_AF_ERR_INT",
+ rvu_npa_af_err_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
+ "NPA_AF_RAS",
+ rvu_npa_af_ras_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_npa_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum npa_af_rvu_health health_reporter)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ unsigned int alloc_dis, free_dis;
+ u64 intr_val;
+ int err;
+
+ npa_event_context = ctx;
+ switch (health_reporter) {
+ case NPA_AF_RVU_GEN:
+ intr_val = npa_event_context->npa_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
+ npa_event_context->npa_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
+ if (err)
+ return err;
+ }
+
+ free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
+ if (free_dis & BIT(NPA_INPQ_NIX0_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX0_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX1_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX1_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_SSO)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_TIM)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_DPI)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_AURA_OP)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
+ if (err)
+ return err;
+ }
+
+ alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_SSO)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_TIM)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_DPI)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_ERR:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
+ npa_event_context->npa_af_rvu_err);
+ if (err)
+ return err;
+
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_RAS:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
+ npa_event_context->npa_af_rvu_ras);
+ if (err)
+ return err;
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_INTR:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
+ npa_event_context->npa_af_rvu_int);
+ if (err)
+ return err;
+ if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ return rvu_report_pair_end(fmsg);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
+}
+
+static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
+}
+
+static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
+}
+
+static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
+}
+
+static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_ras)
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_npa_intr);
+RVU_REPORTERS(hw_npa_gen);
+RVU_REPORTERS(hw_npa_err);
+RVU_REPORTERS(hw_npa_ras);
+
+static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_npa_health_reporters *rvu_reporters;
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_npa_health_reporter = rvu_reporters;
+ npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
+ if (!npa_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->npa_event_ctx = npa_event_context;
+ rvu_reporters->rvu_hw_npa_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
+
+ return 0;
+}
+
+static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_npa_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_npa_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_npa_health_reporters *npa_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ npa_reporters = rvu_dl->rvu_npa_health_reporter;
+
+ if (!npa_reporters->rvu_hw_npa_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
+
+ rvu_npa_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
+ kfree(rvu_dl->rvu_npa_health_reporter);
+}
+
+static int rvu_health_reporters_create(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+ int err;
+
+ rvu_dl = rvu->rvu_dl;
+ err = rvu_npa_health_reporters_create(rvu_dl);
+ if (err)
+ return err;
+
+ return rvu_nix_health_reporters_create(rvu_dl);
+}
+
+static void rvu_health_reporters_destroy(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+
+ if (!rvu->rvu_dl)
+ return;
+
+ rvu_dl = rvu->rvu_dl;
+ rvu_npa_health_reporters_destroy(rvu_dl);
+ rvu_nix_health_reporters_destroy(rvu_dl);
+}
+
+/* Devlink Params APIs */
+static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ int dwrr_mtu = val.vu32;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Setting DWRR_MTU is not supported on this silicon");
+ return -EOPNOTSUPP;
+ }
+
+ if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
+ (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
+ return -EINVAL;
+ }
+
+ nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
+ if (!nix_hw)
+ return -ENODEV;
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing DWRR MTU is not supported when there are active NIXLFs");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Make sure none of the PF/VF interfaces are initialized and retry");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 dwrr_mtu;
+
+ dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
+ rvu_write64(rvu, BLKADDR_NIX0,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
+
+ return 0;
+}
+
+static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 dwrr_mtu;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu)
+ return -EOPNOTSUPP;
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
+ ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ return 0;
+}
+
+enum rvu_af_dl_param_id {
+ RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
+};
+
+static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ bool enabled;
+
+ enabled = rvu_npc_exact_has_match_table(rvu);
+
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
+ enabled ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_npc_exact_disable_feature(rvu);
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 enable;
+
+ if (kstrtoull(val.vstr, 10, &enable)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 value is supported");
+ return -EINVAL;
+ }
+
+ if (enable != 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only disabling exact match feature is supported");
+ return -EINVAL;
+ }
+
+ if (rvu_npc_exact_can_disable_feature(rvu))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't disable exact match feature; Please try before any configuration");
+ return -EFAULT;
+}
+
+static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct npc_mcam *mcam;
+ u32 percent;
+
+ mcam = &rvu->hw->mcam;
+ percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
+ ctx->val.vu8 = (u8)percent;
+
+ return 0;
+}
+
+static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct npc_mcam *mcam;
+ u32 percent;
+
+ percent = ctx->val.vu8;
+ mcam = &rvu->hw->mcam;
+ mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
+ mcam->hprio_end = mcam->hprio_count;
+ mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
+ mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
+
+ return 0;
+}
+
+static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct npc_mcam *mcam;
+
+ /* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
+ if (val.vu8 < 12 || val.vu8 > 100) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mcam high zone percent must be between 12% to 100%");
+ return -EINVAL;
+ }
+
+ /* Do not allow user to modify the high priority zone entries while mcam entries
+ * have already been assigned.
+ */
+ mcam = &rvu->hw->mcam;
+ if (mcam->bmap_fcnt < mcam->bmap_entries) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mcam entries have already been assigned, can't resize");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param rvu_af_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
+ rvu_af_dl_dwrr_mtu_validate),
+};
+
+static const struct devlink_param rvu_af_dl_param_exact_match[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_npc_exact_feature_get,
+ rvu_af_npc_exact_feature_disable,
+ rvu_af_npc_exact_feature_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
+ "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_npc_mcam_high_zone_percent_get,
+ rvu_af_dl_npc_mcam_high_zone_percent_set,
+ rvu_af_dl_npc_mcam_high_zone_percent_validate),
+};
+
+/* Devlink switch mode */
+static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ *mode = rswitch->mode;
+
+ return 0;
+}
+
+static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (rswitch->mode == mode)
+ return 0;
+ rswitch->mode = mode;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ rvu_switch_enable(rvu);
+ else
+ rvu_switch_disable(rvu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops rvu_devlink_ops = {
+ .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
+ .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
+};
+
+int rvu_register_dl(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+ struct devlink *dl;
+ int err;
+
+ dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
+ rvu->dev);
+ if (!dl) {
+ dev_warn(rvu->dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ rvu_dl = devlink_priv(dl);
+ rvu_dl->dl = dl;
+ rvu_dl->rvu = rvu;
+ rvu->rvu_dl = rvu_dl;
+
+ err = rvu_health_reporters_create(rvu);
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink health reporter creation failed with error %d\n", err);
+ goto err_dl_health;
+ }
+
+ err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl_health;
+ }
+
+ /* Register exact match devlink only for CN10K-B */
+ if (!rvu_npc_exact_has_match_table(rvu))
+ goto done;
+
+ err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink exact match params register failed with error %d", err);
+ goto err_dl_exact_match;
+ }
+
+done:
+ devlink_register(dl);
+ return 0;
+
+err_dl_exact_match:
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
+err_dl_health:
+ rvu_health_reporters_destroy(rvu);
+ devlink_free(dl);
+ return err;
+}
+
+void rvu_unregister_dl(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct devlink *dl = rvu_dl->dl;
+
+ devlink_unregister(dl);
+
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
+ /* Unregister exact match devlink only for CN10K-B */
+ if (rvu_npc_exact_has_match_table(rvu))
+ devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
+
+ rvu_health_reporters_destroy(rvu);
+ devlink_free(dl);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
new file mode 100644
index 000000000..51efe88dc
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function Devlink
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef RVU_DEVLINK_H
+#define RVU_DEVLINK_H
+
+#define RVU_REPORTERS(_name) \
+static const struct devlink_health_reporter_ops rvu_ ## _name ## _reporter_ops = { \
+ .name = #_name, \
+ .recover = rvu_ ## _name ## _recover, \
+ .dump = rvu_ ## _name ## _dump, \
+}
+
+enum npa_af_rvu_health {
+ NPA_AF_RVU_INTR,
+ NPA_AF_RVU_GEN,
+ NPA_AF_RVU_ERR,
+ NPA_AF_RVU_RAS,
+};
+
+struct rvu_npa_event_ctx {
+ u64 npa_af_rvu_int;
+ u64 npa_af_rvu_gen;
+ u64 npa_af_rvu_err;
+ u64 npa_af_rvu_ras;
+};
+
+struct rvu_npa_health_reporters {
+ struct rvu_npa_event_ctx *npa_event_ctx;
+ struct devlink_health_reporter *rvu_hw_npa_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_npa_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_npa_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_npa_ras_reporter;
+ struct work_struct ras_work;
+};
+
+enum nix_af_rvu_health {
+ NIX_AF_RVU_INTR,
+ NIX_AF_RVU_GEN,
+ NIX_AF_RVU_ERR,
+ NIX_AF_RVU_RAS,
+};
+
+struct rvu_nix_event_ctx {
+ u64 nix_af_rvu_int;
+ u64 nix_af_rvu_gen;
+ u64 nix_af_rvu_err;
+ u64 nix_af_rvu_ras;
+};
+
+struct rvu_nix_health_reporters {
+ struct rvu_nix_event_ctx *nix_event_ctx;
+ struct devlink_health_reporter *rvu_hw_nix_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_nix_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_nix_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_nix_ras_reporter;
+ struct work_struct ras_work;
+};
+
+struct rvu_devlink {
+ struct devlink *dl;
+ struct rvu *rvu;
+ struct workqueue_struct *devlink_wq;
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+};
+
+/* Devlink APIs */
+int rvu_register_dl(struct rvu *rvu);
+void rvu_unregister_dl(struct rvu *rvu);
+
+#endif /* RVU_DEVLINK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
new file mode 100644
index 000000000..58744313f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -0,0 +1,5709 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "mcs.h"
+#include "cgx.h"
+#include "lmac_common.h"
+#include "rvu_npc_hash.h"
+
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
+static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
+ int type, int chan_id);
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add);
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr);
+static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc);
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof);
+static const char *nix_get_ctx_name(int ctype);
+
+enum mc_tbl_sz {
+ MC_TBL_SZ_256,
+ MC_TBL_SZ_512,
+ MC_TBL_SZ_1K,
+ MC_TBL_SZ_2K,
+ MC_TBL_SZ_4K,
+ MC_TBL_SZ_8K,
+ MC_TBL_SZ_16K,
+ MC_TBL_SZ_32K,
+ MC_TBL_SZ_64K,
+};
+
+enum mc_buf_cnt {
+ MC_BUF_CNT_8,
+ MC_BUF_CNT_16,
+ MC_BUF_CNT_32,
+ MC_BUF_CNT_64,
+ MC_BUF_CNT_128,
+ MC_BUF_CNT_256,
+ MC_BUF_CNT_512,
+ MC_BUF_CNT_1024,
+ MC_BUF_CNT_2048,
+};
+
+enum nix_makr_fmt_indexes {
+ NIX_MARK_CFG_IP_DSCP_RED,
+ NIX_MARK_CFG_IP_DSCP_YELLOW,
+ NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
+ NIX_MARK_CFG_IP_ECN_RED,
+ NIX_MARK_CFG_IP_ECN_YELLOW,
+ NIX_MARK_CFG_IP_ECN_YELLOW_RED,
+ NIX_MARK_CFG_VLAN_DEI_RED,
+ NIX_MARK_CFG_VLAN_DEI_YELLOW,
+ NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
+ NIX_MARK_CFG_MAX,
+};
+
+/* For now considering MC resources needed for broadcast
+ * pkt replication only. i.e 256 HWVFs + 12 PFs.
+ */
+#define MC_TBL_SIZE MC_TBL_SZ_512
+#define MC_BUF_CNT MC_BUF_CNT_128
+
+struct mce {
+ struct hlist_node node;
+ u16 pcifunc;
+};
+
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
+{
+ int i = 0;
+
+ /*If blkaddr is 0, return the first nix block address*/
+ if (blkaddr == 0)
+ return rvu->nix_blkaddr[blkaddr];
+
+ while (i + 1 < MAX_NIX_BLKS) {
+ if (rvu->nix_blkaddr[i] == blkaddr)
+ return rvu->nix_blkaddr[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return false;
+ return true;
+}
+
+int rvu_get_nixlf_count(struct rvu *rvu)
+{
+ int blkaddr = 0, max = 0;
+ struct rvu_block *block;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+ return max;
+}
+
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (*nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (nix_blkaddr)
+ *nix_blkaddr = blkaddr;
+
+ return 0;
+}
+
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr)
+{
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || *blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
+ if (!*nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+ return 0;
+}
+
+static void nix_mce_list_init(struct nix_mce_list *list, int max)
+{
+ INIT_HLIST_HEAD(&list->head);
+ list->count = 0;
+ list->max = max;
+}
+
+static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+{
+ int idx;
+
+ if (!mcast)
+ return 0;
+
+ idx = mcast->next_free_mce;
+ mcast->next_free_mce += count;
+ return idx;
+}
+
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+{
+ int nix_blkaddr = 0, i = 0;
+ struct rvu *rvu = hw->rvu;
+
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ while (nix_blkaddr) {
+ if (blkaddr == nix_blkaddr && hw->nix)
+ return &hw->nix[i];
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ i++;
+ }
+ return NULL;
+}
+
+int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
+{
+ if (hw->cap.nix_multiple_dwrr_mtu)
+ return NIX_AF_DWRR_MTUX(smq_link_type);
+
+ if (smq_link_type == SMQ_LINK_TYPE_SDP)
+ return NIX_AF_DWRR_SDP_MTU;
+
+ /* Here it's same reg for RPM and LBK */
+ return NIX_AF_DWRR_RPM_MTU;
+}
+
+u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
+{
+ dwrr_mtu &= 0x1FULL;
+
+ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
+ * Value of 4 is reserved for MTU value of 9728 bytes.
+ * Value of 5 is reserved for MTU value of 10240 bytes.
+ */
+ switch (dwrr_mtu) {
+ case 4:
+ return 9728;
+ case 5:
+ return 10240;
+ default:
+ return BIT_ULL(dwrr_mtu);
+ }
+
+ return 0;
+}
+
+u32 convert_bytes_to_dwrr_mtu(u32 bytes)
+{
+ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
+ * Value of 4 is reserved for MTU value of 9728 bytes.
+ * Value of 5 is reserved for MTU value of 10240 bytes.
+ */
+ if (bytes > BIT_ULL(16))
+ return 0;
+
+ switch (bytes) {
+ case 9728:
+ return 4;
+ case 10240:
+ return 5;
+ default:
+ return ilog2(bytes);
+ }
+
+ return 0;
+}
+
+static void nix_rx_sync(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ /* Sync all in flight RX packets to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
+
+ /* SW_SYNC ensures all existing transactions are finished and pkts
+ * are written to LLC/DRAM, queues should be teared down after
+ * successful SW_SYNC. Due to a HW errata, in some rare scenarios
+ * an existing transaction might end after SW_SYNC operation. To
+ * ensure operation is fully done, do the SW_SYNC twice.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
+}
+
+static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
+ int lvl, u16 pcifunc, u16 schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u16 map_func;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return false;
+
+ txsch = &nix_hw->txsch[lvl];
+ /* Check out of bounds */
+ if (schq >= txsch->schq.max)
+ return false;
+
+ mutex_lock(&rvu->rsrc_lock);
+ map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
+ mutex_unlock(&rvu->rsrc_lock);
+
+ /* TLs aggegating traffic are shared across PF and VFs */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+ return false;
+ else
+ return true;
+ }
+
+ if (map_func != pcifunc)
+ return false;
+
+ return true;
+}
+
+static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
+ struct nix_lf_alloc_rsp *rsp, bool loop)
+{
+ struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u16 req_chan_base, req_chan_end, req_chan_cnt;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct sdp_node_info *sdp_info;
+ int pkind, pf, vf, lbkid, vfid;
+ u8 cgx_id, lmac_id;
+ bool from_vf;
+ int err;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
+ type != NIX_INTF_TYPE_SDP)
+ return 0;
+
+ switch (type) {
+ case NIX_INTF_TYPE_CGX:
+ pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev,
+ "PF_Func 0x%x: Invalid pkind\n", pcifunc);
+ return -EINVAL;
+ }
+ pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
+
+ cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
+ rvu_npc_set_pkind(rvu, pkind, pfvf);
+
+ break;
+ case NIX_INTF_TYPE_LBK:
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+
+ /* If NIX1 block is present on the silicon then NIXes are
+ * assigned alternatively for lbk interfaces. NIX0 should
+ * send packets on lbk link 1 channels and NIX1 should send
+ * on lbk link 0 channels for the communication between
+ * NIX0 and NIX1.
+ */
+ lbkid = 0;
+ if (rvu->hw->lbk_links > 1)
+ lbkid = vf & 0x1 ? 0 : 1;
+
+ /* By default NIX0 is configured to send packet on lbk link 1
+ * (which corresponds to LBK1), same packet will receive on
+ * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
+ * (which corresponds to LBK2) packet will receive on NIX0 lbk
+ * link 1.
+ * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
+ * transmits and receives on lbk link 0, whick corresponds
+ * to LBK1 block, back to back connectivity between NIX and
+ * LBK can be achieved (which is similar to 96xx)
+ *
+ * RX TX
+ * NIX0 lbk link 1 (LBK2) 1 (LBK1)
+ * NIX0 lbk link 0 (LBK0) 0 (LBK0)
+ * NIX1 lbk link 0 (LBK1) 0 (LBK2)
+ * NIX1 lbk link 1 (LBK3) 1 (LBK3)
+ */
+ if (loop)
+ lbkid = !lbkid;
+
+ /* Note that AF's VFs work in pairs and talk over consecutive
+ * loopback channels.Therefore if odd number of AF VFs are
+ * enabled then the last VF remains with no pair.
+ */
+ pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
+ pfvf->tx_chan_base = vf & 0x1 ?
+ rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
+ rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ rsp->tx_link = hw->cgx_links + lbkid;
+ pfvf->lbkid = lbkid;
+ rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+
+ break;
+ case NIX_INTF_TYPE_SDP:
+ from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ sdp_info = parent_pf->sdp_info;
+ if (!sdp_info) {
+ dev_err(rvu->dev, "Invalid sdp_info pointer\n");
+ return -EINVAL;
+ }
+ if (from_vf) {
+ req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
+ sdp_info->num_pf_rings;
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ for (vfid = 0; vfid < vf; vfid++)
+ req_chan_base += sdp_info->vf_rings[vfid];
+ req_chan_cnt = sdp_info->vf_rings[vf];
+ req_chan_end = req_chan_base + req_chan_cnt - 1;
+ if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
+ req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
+ dev_err(rvu->dev,
+ "PF_Func 0x%x: Invalid channel base and count\n",
+ pcifunc);
+ return -EINVAL;
+ }
+ } else {
+ req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
+ req_chan_cnt = sdp_info->num_pf_rings;
+ }
+
+ pfvf->rx_chan_base = req_chan_base;
+ pfvf->rx_chan_cnt = req_chan_cnt;
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
+
+ rsp->tx_link = hw->cgx_links + hw->lbk_links;
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+ break;
+ }
+
+ /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
+ * RVU PF/VF's MAC address.
+ */
+ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, pfvf->mac_addr);
+
+ /* Add this PF_FUNC to bcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
+ if (err) {
+ dev_err(rvu->dev,
+ "Bcast list, failed to enable PF_FUNC 0x%x\n",
+ pcifunc);
+ return err;
+ }
+ /* Install MCAM rule matching Ethernet broadcast mac address */
+ rvu_npc_install_bcast_match_entry(rvu, pcifunc,
+ nixlf, pfvf->rx_chan_base);
+
+ pfvf->maxlen = NIC_HW_MIN_FRS;
+ pfvf->minlen = NIC_HW_MIN_FRS;
+
+ return 0;
+}
+
+static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int err;
+
+ pfvf->maxlen = 0;
+ pfvf->minlen = 0;
+
+ /* Remove this PF_FUNC from bcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
+ if (err) {
+ dev_err(rvu->dev,
+ "Bcast list, failed to disable PF_FUNC 0x%x\n",
+ pcifunc);
+ }
+
+ /* Free and disable any MCAM entries used by this NIX LF */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Disable DMAC filters used */
+ rvu_cgx_disable_dmac_entries(rvu, pcifunc);
+}
+
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, pf, type;
+ u16 chan_base, chan;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+
+ chan_base = pfvf->rx_chan_base + req->chan_base;
+ for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ cfg & ~BIT_ULL(16));
+ }
+ return 0;
+}
+
+static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
+ int type, int chan_id)
+{
+ int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
+ u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ lmac_chan_cnt = cfg & 0xFF;
+
+ cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
+ lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ sdp_chan_cnt = cfg & 0xFFF;
+ sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ /* Backpressure IDs range division
+ * CGX channles are mapped to (0 - 191) BPIDs
+ * LBK channles are mapped to (192 - 255) BPIDs
+ * SDP channles are mapped to (256 - 511) BPIDs
+ *
+ * Lmac channles and bpids mapped as follows
+ * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
+ * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
+ * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
+ */
+ switch (type) {
+ case NIX_INTF_TYPE_CGX:
+ if ((req->chan_base + req->chan_cnt) > 16)
+ return -EINVAL;
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+ /* Assign bpid based on cgx, lmac and chan id */
+ bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
+ (lmac_id * lmac_chan_cnt) + req->chan_base;
+
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+ if (bpid > cgx_bpid_cnt)
+ return -EINVAL;
+ break;
+
+ case NIX_INTF_TYPE_LBK:
+ if ((req->chan_base + req->chan_cnt) > 63)
+ return -EINVAL;
+ bpid = cgx_bpid_cnt + req->chan_base;
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+ if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
+ return -EINVAL;
+ break;
+ case NIX_INTF_TYPE_SDP:
+ if ((req->chan_base + req->chan_cnt) > 255)
+ return -EINVAL;
+
+ bpid = sdp_bpid_cnt + req->chan_base;
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+
+ if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return bpid;
+}
+
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int blkaddr, pf, type, chan_id = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u16 chan_base, chan;
+ s16 bpid, bpid_base;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (is_sdp_pfvf(pcifunc))
+ type = NIX_INTF_TYPE_SDP;
+
+ /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
+ type != NIX_INTF_TYPE_SDP)
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+
+ bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
+ chan_base = pfvf->rx_chan_base + req->chan_base;
+ bpid = bpid_base;
+
+ for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+ if (bpid < 0) {
+ dev_warn(rvu->dev, "Fail to enable backpressure\n");
+ return -EINVAL;
+ }
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ cfg &= ~GENMASK_ULL(8, 0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
+ chan_id++;
+ bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
+ }
+
+ for (chan = 0; chan < req->chan_cnt; chan++) {
+ /* Map channel and bpid assign to it */
+ rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
+ (bpid_base & 0x3FF);
+ if (req->bpid_per_chan)
+ bpid_base++;
+ }
+ rsp->chan_cnt = req->chan_cnt;
+
+ return 0;
+}
+
+static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
+ u64 format, bool v4, u64 *fidx)
+{
+ struct nix_lso_format field = {0};
+
+ /* IP's Length field */
+ field.layer = NIX_TXLAYER_OL3;
+ /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+ field.offset = v4 ? 2 : 4;
+ field.sizem1 = 1; /* i.e 2 bytes */
+ field.alg = NIX_LSOALG_ADD_PAYLEN;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+
+ /* No ID field in IPv6 header */
+ if (!v4)
+ return;
+
+ /* IP's ID field */
+ field.layer = NIX_TXLAYER_OL3;
+ field.offset = 4;
+ field.sizem1 = 1; /* i.e 2 bytes */
+ field.alg = NIX_LSOALG_ADD_SEGNUM;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+}
+
+static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
+ u64 format, u64 *fidx)
+{
+ struct nix_lso_format field = {0};
+
+ /* TCP's sequence number field */
+ field.layer = NIX_TXLAYER_OL4;
+ field.offset = 4;
+ field.sizem1 = 3; /* i.e 4 bytes */
+ field.alg = NIX_LSOALG_ADD_OFFSET;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+
+ /* TCP's flags field */
+ field.layer = NIX_TXLAYER_OL4;
+ field.offset = 12;
+ field.sizem1 = 1; /* 2 bytes */
+ field.alg = NIX_LSOALG_TCP_FLAGS;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+}
+
+static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ u64 cfg, idx, fidx = 0;
+
+ /* Get max HW supported format indices */
+ cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
+ nix_hw->lso.total = cfg;
+
+ /* Enable LSO */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
+ /* For TSO, set first and middle segment flags to
+ * mask out PSH, RST & FIN flags in TCP packet
+ */
+ cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
+ cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
+ rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
+
+ /* Setup default static LSO formats
+ *
+ * Configure format fields for TCPv4 segmentation offload
+ */
+ idx = NIX_LSO_FORMAT_IDX_TSOV4;
+ nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
+ nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+ /* Set rest of the fields to NOP */
+ for (; fidx < 8; fidx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+ }
+ nix_hw->lso.in_use++;
+
+ /* Configure format fields for TCPv6 segmentation offload */
+ idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ fidx = 0;
+ nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
+ nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+ /* Set rest of the fields to NOP */
+ for (; fidx < 8; fidx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+ }
+ nix_hw->lso.in_use++;
+}
+
+static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+ kfree(pfvf->rq_bmap);
+ kfree(pfvf->sq_bmap);
+ kfree(pfvf->cq_bmap);
+ if (pfvf->rq_ctx)
+ qmem_free(rvu->dev, pfvf->rq_ctx);
+ if (pfvf->sq_ctx)
+ qmem_free(rvu->dev, pfvf->sq_ctx);
+ if (pfvf->cq_ctx)
+ qmem_free(rvu->dev, pfvf->cq_ctx);
+ if (pfvf->rss_ctx)
+ qmem_free(rvu->dev, pfvf->rss_ctx);
+ if (pfvf->nix_qints_ctx)
+ qmem_free(rvu->dev, pfvf->nix_qints_ctx);
+ if (pfvf->cq_ints_ctx)
+ qmem_free(rvu->dev, pfvf->cq_ints_ctx);
+
+ pfvf->rq_bmap = NULL;
+ pfvf->cq_bmap = NULL;
+ pfvf->sq_bmap = NULL;
+ pfvf->rq_ctx = NULL;
+ pfvf->sq_ctx = NULL;
+ pfvf->cq_ctx = NULL;
+ pfvf->rss_ctx = NULL;
+ pfvf->nix_qints_ctx = NULL;
+ pfvf->cq_ints_ctx = NULL;
+}
+
+static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
+ struct rvu_pfvf *pfvf, int nixlf,
+ int rss_sz, int rss_grps, int hwctx_size,
+ u64 way_mask, bool tag_lsb_as_adder)
+{
+ int err, grp, num_indices;
+ u64 val;
+
+ /* RSS is not requested for this NIXLF */
+ if (!rss_sz)
+ return 0;
+ num_indices = rss_sz * rss_grps;
+
+ /* Alloc NIX RSS HW context memory and config the base */
+ err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
+ (u64)pfvf->rss_ctx->iova);
+
+ /* Config full RSS table size, enable RSS and caching */
+ val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
+
+ if (tag_lsb_as_adder)
+ val |= BIT_ULL(5);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
+ /* Config RSS group offset and sizes */
+ for (grp = 0; grp < rss_grps; grp++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
+ ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
+ return 0;
+}
+
+static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ struct nix_aq_inst_s *inst)
+{
+ struct admin_queue *aq = block->aq;
+ struct nix_aq_res_s *result;
+ int timeout = 1000;
+ u64 reg, head;
+ int ret;
+
+ result = (struct nix_aq_res_s *)aq->res->base;
+
+ /* Get current head pointer where to append this instruction */
+ reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
+ head = (reg >> 4) & AQ_PTR_MASK;
+
+ memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+ (void *)inst, aq->inst->entry_sz);
+ memset(result, 0, sizeof(*result));
+ /* sync into memory */
+ wmb();
+
+ /* Ring the doorbell and wait for result */
+ rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
+ while (result->compcode == NIX_AQ_COMP_NOTDONE) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ }
+
+ if (result->compcode != NIX_AQ_COMP_GOOD) {
+ /* TODO: Replace this with some error code */
+ if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
+ result->compcode == NIX_AQ_COMP_LOCKERR ||
+ result->compcode == NIX_AQ_COMP_CTX_POISON) {
+ ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
+ if (ret)
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
+ u16 *smq, u16 *smq_mask)
+{
+ struct nix_cn10k_aq_enq_req *aq_req;
+
+ if (!is_rvu_otx2(rvu)) {
+ aq_req = (struct nix_cn10k_aq_enq_req *)req;
+ *smq = aq_req->sq.smq;
+ *smq_mask = aq_req->sq_mask.smq;
+ } else {
+ *smq = req->sq.smq;
+ *smq_mask = req->sq_mask.smq;
+ }
+}
+
+static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, blkaddr, rc = 0;
+ struct nix_aq_inst_s inst;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+ struct rvu_pfvf *pfvf;
+ u16 smq, smq_mask;
+ void *ctx, *mask;
+ bool ena;
+ u64 cfg;
+
+ blkaddr = nix_hw->blkaddr;
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ if (!aq) {
+ dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
+ return NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+
+ /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
+ * operations done by AF itself.
+ */
+ if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
+ (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
+ if (!pfvf->nixlf || nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+ }
+
+ switch (req->ctype) {
+ case NIX_AQ_CTYPE_RQ:
+ /* Check if index exceeds max no of queues */
+ if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_SQ:
+ if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_CQ:
+ if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_RSS:
+ /* Check if RSS is enabled and qidx is within range */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
+ if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
+ (req->qidx >= (256UL << (cfg & 0xF))))
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_MCE:
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+
+ /* Check if index exceeds MCE list length */
+ if (!nix_hw->mcast.mce_ctx ||
+ (req->qidx >= (256UL << (cfg & 0xF))))
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+
+ /* Adding multicast lists for requests from PF/VFs is not
+ * yet supported, so ignore this.
+ */
+ if (rsp)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_BANDPROF:
+ if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
+ nix_hw, pcifunc))
+ rc = NIX_AF_ERR_INVALID_BANDPROF;
+ break;
+ default:
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ if (rc)
+ return rc;
+
+ nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
+ /* Check if SQ pointed SMQ belongs to this PF/VF or not */
+ if (req->ctype == NIX_AQ_CTYPE_SQ &&
+ ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
+ (req->op == NIX_AQ_INSTOP_WRITE &&
+ req->sq_mask.ena && req->sq.ena && smq_mask))) {
+ if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
+ pcifunc, smq))
+ return NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ memset(&inst, 0, sizeof(struct nix_aq_inst_s));
+ inst.lf = nixlf;
+ inst.cindex = req->qidx;
+ inst.ctype = req->ctype;
+ inst.op = req->op;
+ /* Currently we are not supporting enqueuing multiple instructions,
+ * so always choose first entry in result memory.
+ */
+ inst.res_addr = (u64)aq->res->iova;
+
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
+ /* Clean result + context memory */
+ memset(aq->res->base, 0, aq->res->entry_sz);
+ /* Context needs to be written at RES_ADDR + 128 */
+ ctx = aq->res->base + 128;
+ /* Mask needs to be written at RES_ADDR + 256 */
+ mask = aq->res->base + 256;
+
+ switch (req->op) {
+ case NIX_AQ_INSTOP_WRITE:
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(mask, &req->rq_mask,
+ sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(mask, &req->sq_mask,
+ sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(mask, &req->cq_mask,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(mask, &req->rss_mask,
+ sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(mask, &req->mce_mask,
+ sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(mask, &req->prof_mask,
+ sizeof(struct nix_bandprof_s));
+ fallthrough;
+ case NIX_AQ_INSTOP_INIT:
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
+ break;
+ case NIX_AQ_INSTOP_NOP:
+ case NIX_AQ_INSTOP_READ:
+ case NIX_AQ_INSTOP_LOCK:
+ case NIX_AQ_INSTOP_UNLOCK:
+ break;
+ default:
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Submit the instruction to AQ */
+ rc = nix_aq_enqueue_wait(rvu, block, &inst);
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
+ if (req->op == NIX_AQ_INSTOP_INIT) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
+ __set_bit(req->qidx, pfvf->rq_bmap);
+ if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
+ __set_bit(req->qidx, pfvf->sq_bmap);
+ if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
+ __set_bit(req->qidx, pfvf->cq_bmap);
+ }
+
+ if (req->op == NIX_AQ_INSTOP_WRITE) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ) {
+ ena = (req->rq.ena & req->rq_mask.ena) |
+ (test_bit(req->qidx, pfvf->rq_bmap) &
+ ~req->rq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->rq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->rq_bmap);
+ }
+ if (req->ctype == NIX_AQ_CTYPE_SQ) {
+ ena = (req->rq.ena & req->sq_mask.ena) |
+ (test_bit(req->qidx, pfvf->sq_bmap) &
+ ~req->sq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->sq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->sq_bmap);
+ }
+ if (req->ctype == NIX_AQ_CTYPE_CQ) {
+ ena = (req->rq.ena & req->cq_mask.ena) |
+ (test_bit(req->qidx, pfvf->cq_bmap) &
+ ~req->cq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->cq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->cq_bmap);
+ }
+ }
+
+ if (rsp) {
+ /* Copy read context into mailbox */
+ if (req->op == NIX_AQ_INSTOP_READ) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(&rsp->rq, ctx,
+ sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(&rsp->sq, ctx,
+ sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(&rsp->cq, ctx,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(&rsp->rss, ctx,
+ sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(&rsp->mce, ctx,
+ sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(&rsp->prof, ctx,
+ sizeof(struct nix_bandprof_s));
+ }
+ }
+
+ spin_unlock(&aq->lock);
+ return 0;
+}
+
+static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req, u8 ctype)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ int rc, word;
+
+ if (req->ctype != NIX_AQ_CTYPE_CQ)
+ return 0;
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ req->hdr.pcifunc, ctype, req->qidx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
+ __func__, nix_get_ctx_name(ctype), req->qidx,
+ req->hdr.pcifunc);
+ return rc;
+ }
+
+ /* Make copy of original context & mask which are required
+ * for resubmission
+ */
+ memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
+ memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
+
+ /* exclude fields which HW can update */
+ aq_req.cq_mask.cq_err = 0;
+ aq_req.cq_mask.wrptr = 0;
+ aq_req.cq_mask.tail = 0;
+ aq_req.cq_mask.head = 0;
+ aq_req.cq_mask.avg_level = 0;
+ aq_req.cq_mask.update_time = 0;
+ aq_req.cq_mask.substream = 0;
+
+ /* Context mask (cq_mask) holds mask value of fields which
+ * are changed in AQ WRITE operation.
+ * for example cq.drop = 0xa;
+ * cq_mask.drop = 0xff;
+ * Below logic performs '&' between cq and cq_mask so that non
+ * updated fields are masked out for request and response
+ * comparison
+ */
+ for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
+ word++) {
+ *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ }
+
+ if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
+ return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
+
+ return 0;
+}
+
+static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_hw *nix_hw;
+ int err, retries = 5;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+retry:
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+
+ /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
+ * As a work around perfrom CQ context read after each AQ write. If AQ
+ * read shows AQ write is not updated perform AQ write again.
+ */
+ if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
+ err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
+ if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
+ if (retries--)
+ goto retry;
+ else
+ return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
+ }
+ }
+
+ return err;
+}
+
+static const char *nix_get_ctx_name(int ctype)
+{
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ return "CQ";
+ case NIX_AQ_CTYPE_SQ:
+ return "SQ";
+ case NIX_AQ_CTYPE_RQ:
+ return "RQ";
+ case NIX_AQ_CTYPE_RSS:
+ return "RSS";
+ }
+ return "";
+}
+
+static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct nix_aq_enq_req aq_req;
+ unsigned long *bmap;
+ int qidx, q_cnt = 0;
+ int err = 0, rc;
+
+ if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
+ return NIX_AF_ERR_AQ_ENQUEUE;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+ if (req->ctype == NIX_AQ_CTYPE_CQ) {
+ aq_req.cq.ena = 0;
+ aq_req.cq_mask.ena = 1;
+ aq_req.cq.bp_ena = 0;
+ aq_req.cq_mask.bp_ena = 1;
+ q_cnt = pfvf->cq_ctx->qsize;
+ bmap = pfvf->cq_bmap;
+ }
+ if (req->ctype == NIX_AQ_CTYPE_SQ) {
+ aq_req.sq.ena = 0;
+ aq_req.sq_mask.ena = 1;
+ q_cnt = pfvf->sq_ctx->qsize;
+ bmap = pfvf->sq_bmap;
+ }
+ if (req->ctype == NIX_AQ_CTYPE_RQ) {
+ aq_req.rq.ena = 0;
+ aq_req.rq_mask.ena = 1;
+ q_cnt = pfvf->rq_ctx->qsize;
+ bmap = pfvf->rq_bmap;
+ }
+
+ aq_req.ctype = req->ctype;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+
+ for (qidx = 0; qidx < q_cnt; qidx++) {
+ if (!test_bit(qidx, bmap))
+ continue;
+ aq_req.qidx = qidx;
+ rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ if (rc) {
+ err = rc;
+ dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+ nix_get_ctx_name(req->ctype), qidx);
+ }
+ }
+
+ return err;
+}
+
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
+{
+ struct nix_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NIX_AQ_INSTOP_INIT)
+ return 0;
+
+ if (req->ctype == NIX_AQ_CTYPE_MCE ||
+ req->ctype == NIX_AQ_CTYPE_DYNO)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
+ lock_ctx_req.qidx = req->qidx;
+ err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
+ req->hdr.pcifunc,
+ nix_get_ctx_name(req->ctype), req->qidx);
+ return err;
+}
+
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_nix_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = nix_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, req, rsp);
+}
+#endif
+/* CN10K mbox handler */
+int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
+ struct nix_cn10k_aq_enq_req *req,
+ struct nix_cn10k_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
+ (struct nix_aq_enq_rsp *)rsp);
+}
+
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_lf_hwctx_disable(rvu, req);
+}
+
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
+ struct nix_lf_alloc_req *req,
+ struct nix_lf_alloc_rsp *rsp)
+{
+ int nixlf, qints, hwctx_size, intf, err, rc = 0;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, ctx_cfg;
+ int blkaddr;
+
+ if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
+ return NIX_AF_ERR_PARAM;
+
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
+ if (req->npa_func) {
+ /* If default, use 'this' NIXLF's PFFUNC */
+ if (req->npa_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
+ return NIX_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
+ if (req->sso_func) {
+ /* If default, use 'this' NIXLF's PFFUNC */
+ if (req->sso_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
+ return NIX_AF_INVAL_SSO_PF_FUNC;
+ }
+
+ /* If RSS is being enabled, check if requested config is valid.
+ * RSS table size should be power of two, otherwise
+ * RSS_GRP::OFFSET + adder might go beyond that group or
+ * won't be able to use entire table.
+ */
+ if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
+ !is_power_of_2(req->rss_sz)))
+ return NIX_AF_ERR_RSS_SIZE_INVALID;
+
+ if (req->rss_sz &&
+ (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
+ return NIX_AF_ERR_RSS_GRPS_INVALID;
+
+ /* Reset this NIX LF */
+ err = rvu_lf_reset(rvu, block, nixlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+ block->addr - BLKADDR_NIX0, nixlf);
+ return NIX_AF_ERR_LF_RESET;
+ }
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
+
+ /* Alloc NIX RQ HW context memory and config the base */
+ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->rq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
+ (u64)pfvf->rq_ctx->iova);
+
+ /* Set caching and queue count in HW */
+ cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
+
+ /* Alloc NIX SQ HW context memory and config the base */
+ hwctx_size = 1UL << (ctx_cfg & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->sq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
+ (u64)pfvf->sq_ctx->iova);
+
+ cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
+
+ /* Alloc NIX CQ HW context memory and config the base */
+ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->cq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
+ (u64)pfvf->cq_ctx->iova);
+
+ cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
+
+ /* Initialize receive side scaling (RSS) */
+ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
+ err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
+ req->rss_grps, hwctx_size, req->way_mask,
+ !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
+ if (err)
+ goto free_mem;
+
+ /* Alloc memory for CQINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 24) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
+ (u64)pfvf->cq_ints_ctx->iova);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
+ BIT_ULL(36) | req->way_mask << 20);
+
+ /* Alloc memory for QINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 12) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
+ (u64)pfvf->nix_qints_ctx->iova);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
+ BIT_ULL(36) | req->way_mask << 20);
+
+ /* Setup VLANX TPID's.
+ * Use VLAN1 for 802.1Q
+ * and VLAN0 for 802.1AD.
+ */
+ cfg = (0x8100ULL << 16) | 0x88A8ULL;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
+ /* Enable LMTST for this NIX LF */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
+
+ /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
+ if (req->npa_func)
+ cfg = req->npa_func;
+ if (req->sso_func)
+ cfg |= (u64)req->sso_func << 16;
+
+ cfg |= (u64)req->xqe_sz << 33;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
+
+ /* Config Rx pkt length, csum checks and apad enable / disable */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
+
+ /* Configure pkind for TX parse config */
+ cfg = NPC_TX_DEF_PKIND;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+
+ intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (is_sdp_pfvf(pcifunc))
+ intf = NIX_INTF_TYPE_SDP;
+
+ err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
+ !!(req->flags & NIX_LF_LBK_BLK_SEL));
+ if (err)
+ goto free_mem;
+
+ /* Disable NPC entries as NIXLF's contexts are not initialized yet */
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+
+ /* Configure RX VTAG Type 7 (strip) for vf vlan */
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
+ VTAGSIZE_T4 | VTAG_STRIP);
+
+ goto exit;
+
+free_mem:
+ nix_ctx_free(rvu, pfvf);
+ rc = -ENOMEM;
+
+exit:
+ /* Set macaddr of this PF/VF */
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+ /* set SQB size info */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
+ rsp->sqb_size = (cfg >> 34) & 0xFFFF;
+ rsp->rx_chan_base = pfvf->rx_chan_base;
+ rsp->tx_chan_base = pfvf->tx_chan_base;
+ rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
+ rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
+ rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
+ rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ /* Get HW supported stat count */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
+ rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
+ /* Get count of CQ IRQs and error IRQs supported per LF */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ rsp->qints = ((cfg >> 12) & 0xFFF);
+ rsp->cints = ((cfg >> 24) & 0xFFF);
+ rsp->cgx_links = hw->cgx_links;
+ rsp->lbk_links = hw->lbk_links;
+ rsp->sdp_links = hw->sdp_links;
+
+ return rc;
+}
+
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->flags & NIX_LF_DISABLE_FLOWS)
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ else
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Free any tx vtag def entries used by this NIX LF */
+ if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
+ nix_free_tx_vtag_entries(rvu, pcifunc);
+
+ nix_interface_deinit(rvu, pcifunc, nixlf);
+
+ /* Reset this NIX LF */
+ err = rvu_lf_reset(rvu, block, nixlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+ block->addr - BLKADDR_NIX0, nixlf);
+ return NIX_AF_ERR_LF_RESET;
+ }
+
+ nix_ctx_free(rvu, pfvf);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
+ struct nix_mark_format_cfg *req,
+ struct nix_mark_format_cfg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, rc;
+ u32 cfg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ cfg = (((u32)req->offset & 0x7) << 16) |
+ (((u32)req->y_mask & 0xF) << 12) |
+ (((u32)req->y_val & 0xF) << 8) |
+ (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
+
+ rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
+ if (rc < 0) {
+ dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
+ rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ return NIX_AF_ERR_MARK_CFG_FAIL;
+ }
+
+ rsp->mark_format_idx = rc;
+ return 0;
+}
+
+/* Handle shaper update specially for few revisions */
+static bool
+handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
+ int lvl, u64 reg, u64 regval)
+{
+ u64 regbase, oldval, sw_xoff = 0;
+ u64 dbgval, md_debug0 = 0;
+ unsigned long poll_tmo;
+ bool rate_reg = 0;
+ u32 schq;
+
+ regbase = reg & 0xFFFF;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+
+ /* Check for rate register */
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
+
+ rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
+ regbase == NIX_AF_TL2X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
+ regbase == NIX_AF_TL3X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
+ regbase == NIX_AF_TL4X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
+ rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
+ regbase == NIX_AF_MDQX_PIR(0));
+ break;
+ }
+
+ if (!rate_reg)
+ return false;
+
+ /* Nothing special to do when state is not toggled */
+ oldval = rvu_read64(rvu, blkaddr, reg);
+ if ((oldval & 0x1) == (regval & 0x1)) {
+ rvu_write64(rvu, blkaddr, reg, regval);
+ return true;
+ }
+
+ /* PIR/CIR disable */
+ if (!(regval & 0x1)) {
+ rvu_write64(rvu, blkaddr, sw_xoff, 1);
+ rvu_write64(rvu, blkaddr, reg, 0);
+ udelay(4);
+ rvu_write64(rvu, blkaddr, sw_xoff, 0);
+ return true;
+ }
+
+ /* PIR/CIR enable */
+ rvu_write64(rvu, blkaddr, sw_xoff, 1);
+ if (md_debug0) {
+ poll_tmo = jiffies + usecs_to_jiffies(10000);
+ /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
+ do {
+ if (time_after(jiffies, poll_tmo)) {
+ dev_err(rvu->dev,
+ "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
+ nixlf, schq, lvl);
+ goto exit;
+ }
+ usleep_range(1, 5);
+ dbgval = rvu_read64(rvu, blkaddr, md_debug0);
+ } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
+ }
+ rvu_write64(rvu, blkaddr, reg, regval);
+exit:
+ rvu_write64(rvu, blkaddr, sw_xoff, 0);
+ return true;
+}
+
+static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ u64 tlx_parent = 0, tlx_schedule = 0;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL2:
+ tlx_parent = NIX_AF_TL2X_PARENT(schq);
+ tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ tlx_parent = NIX_AF_TL3X_PARENT(schq);
+ tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ tlx_parent = NIX_AF_TL4X_PARENT(schq);
+ tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ /* no need to reset SMQ_CFG as HW clears this CSR
+ * on SMQ flush
+ */
+ tlx_parent = NIX_AF_MDQX_PARENT(schq);
+ tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
+ break;
+ default:
+ return;
+ }
+
+ if (tlx_parent)
+ rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
+
+ if (tlx_schedule)
+ rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
+}
+
+/* Disable shaping of pkts by a scheduler queue
+ * at a given scheduler level.
+ */
+static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
+ int nixlf, int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 cir_reg = 0, pir_reg = 0;
+ u64 cfg;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ cir_reg = NIX_AF_TL1X_CIR(schq);
+ pir_reg = 0; /* PIR not available at TL1 */
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ cir_reg = NIX_AF_TL2X_CIR(schq);
+ pir_reg = NIX_AF_TL2X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ cir_reg = NIX_AF_TL3X_CIR(schq);
+ pir_reg = NIX_AF_TL3X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ cir_reg = NIX_AF_TL4X_CIR(schq);
+ pir_reg = NIX_AF_TL4X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ cir_reg = NIX_AF_MDQX_CIR(schq);
+ pir_reg = NIX_AF_MDQX_PIR(schq);
+ break;
+ }
+
+ /* Shaper state toggle needs wait/poll */
+ if (hw->cap.nix_shaper_toggle_wait) {
+ if (cir_reg)
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ lvl, cir_reg, 0);
+ if (pir_reg)
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ lvl, pir_reg, 0);
+ return;
+ }
+
+ if (!cir_reg)
+ return;
+ cfg = rvu_read64(rvu, blkaddr, cir_reg);
+ rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
+
+ if (!pir_reg)
+ return;
+ cfg = rvu_read64(rvu, blkaddr, pir_reg);
+ rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
+}
+
+static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link_level;
+ int link;
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
+ return;
+
+ /* Reset TL4's SDP link config */
+ if (lvl == NIX_TXSCH_LVL_TL4)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
+
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl != link_level)
+ return;
+
+ /* Reset TL2's CGX or LBK link config */
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
+}
+
+static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 reg;
+
+ /* Skip this if shaping is not supported */
+ if (!hw->cap.nix_shaping)
+ return;
+
+ /* Clear level specific SW_XOFF */
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ reg = NIX_AF_TL1X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg = NIX_AF_TL2X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg = NIX_AF_TL3X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg = NIX_AF_TL4X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ reg = NIX_AF_MDQX_SW_XOFF(schq);
+ break;
+ default:
+ return;
+ }
+
+ rvu_write64(rvu, blkaddr, reg, 0x0);
+}
+
+static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+
+ if (is_afvf(pcifunc)) {/* LBK links */
+ return hw->cgx_links;
+ } else if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return (cgx_id * hw->lmac_per_cgx) + lmac_id;
+ }
+
+ /* SDP link */
+ return hw->cgx_links + hw->lbk_links;
+}
+
+static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
+ int link, int *start, int *end)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+
+ if (is_afvf(pcifunc)) { /* LBK links */
+ *start = hw->cap.nix_txsch_per_cgx_lmac * link;
+ *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
+ } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
+ *start = hw->cap.nix_txsch_per_cgx_lmac * link;
+ *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
+ } else { /* SDP link */
+ *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
+ (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
+ *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
+ }
+}
+
+static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
+ struct nix_hw *nix_hw,
+ struct nix_txsch_alloc_req *req)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int schq, req_schq, free_cnt;
+ struct nix_txsch *txsch;
+ int link, start, end;
+
+ txsch = &nix_hw->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
+
+ if (!req_schq)
+ return 0;
+
+ link = nix_get_tx_link(rvu, pcifunc);
+
+ /* For traffic aggregating scheduler level, one queue is enough */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (req_schq != 1)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+ return 0;
+ }
+
+ /* Get free SCHQ count and check if request can be accomodated */
+ if (hw->cap.nix_fixed_txschq_mapping) {
+ nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
+ schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
+ if (end <= txsch->schq.max && schq < end &&
+ !test_bit(schq, txsch->schq.bmap))
+ free_cnt = 1;
+ else
+ free_cnt = 0;
+ } else {
+ free_cnt = rvu_rsrc_free_count(&txsch->schq);
+ }
+
+ if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
+ req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
+ /* If contiguous queues are needed, check for availability */
+ if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
+ !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
+ return 0;
+}
+
+static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
+ struct nix_txsch_alloc_rsp *rsp,
+ int lvl, int start, int end)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = rsp->hdr.pcifunc;
+ int idx, schq;
+
+ /* For traffic aggregating levels, queue alloc is based
+ * on transmit link to which PF_FUNC is mapped to.
+ */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ /* A single TL queue is allocated */
+ if (rsp->schq_contig[lvl]) {
+ rsp->schq_contig[lvl] = 1;
+ rsp->schq_contig_list[lvl][0] = start;
+ }
+
+ /* Both contig and non-contig reqs doesn't make sense here */
+ if (rsp->schq_contig[lvl])
+ rsp->schq[lvl] = 0;
+
+ if (rsp->schq[lvl]) {
+ rsp->schq[lvl] = 1;
+ rsp->schq_list[lvl][0] = start;
+ }
+ return;
+ }
+
+ /* Adjust the queue request count if HW supports
+ * only one queue per level configuration.
+ */
+ if (hw->cap.nix_fixed_txschq_mapping) {
+ idx = pcifunc & RVU_PFVF_FUNC_MASK;
+ schq = start + idx;
+ if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
+ rsp->schq_contig[lvl] = 0;
+ rsp->schq[lvl] = 0;
+ return;
+ }
+
+ if (rsp->schq_contig[lvl]) {
+ rsp->schq_contig[lvl] = 1;
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_contig_list[lvl][0] = schq;
+ rsp->schq[lvl] = 0;
+ } else if (rsp->schq[lvl]) {
+ rsp->schq[lvl] = 1;
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_list[lvl][0] = schq;
+ }
+ return;
+ }
+
+ /* Allocate contiguous queue indices requesty first */
+ if (rsp->schq_contig[lvl]) {
+ schq = bitmap_find_next_zero_area(txsch->schq.bmap,
+ txsch->schq.max, start,
+ rsp->schq_contig[lvl], 0);
+ if (schq >= end)
+ rsp->schq_contig[lvl] = 0;
+ for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_contig_list[lvl][idx] = schq;
+ schq++;
+ }
+ }
+
+ /* Allocate non-contiguous queue indices */
+ if (rsp->schq[lvl]) {
+ idx = 0;
+ for (schq = start; schq < end; schq++) {
+ if (!test_bit(schq, txsch->schq.bmap)) {
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_list[lvl][idx++] = schq;
+ }
+ if (idx == rsp->schq[lvl])
+ break;
+ }
+ /* Update how many were allocated */
+ rsp->schq[lvl] = idx;
+ }
+}
+
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
+ struct nix_txsch_alloc_req *req,
+ struct nix_txsch_alloc_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int link, blkaddr, rc = 0;
+ int lvl, idx, start, end;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u32 *pfvf_map;
+ int nixlf;
+ u16 schq;
+
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ /* Check if request is valid as per HW capabilities
+ * and can be accomodated.
+ */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
+ if (rc)
+ goto err;
+ }
+
+ /* Allocate requested Tx scheduler queues */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ pfvf_map = txsch->pfvf_map;
+
+ if (!req->schq[lvl] && !req->schq_contig[lvl])
+ continue;
+
+ rsp->schq[lvl] = req->schq[lvl];
+ rsp->schq_contig[lvl] = req->schq_contig[lvl];
+
+ link = nix_get_tx_link(rvu, pcifunc);
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ start = link;
+ end = link;
+ } else if (hw->cap.nix_fixed_txschq_mapping) {
+ nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
+ } else {
+ start = 0;
+ end = txsch->schq.max;
+ }
+
+ nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
+
+ /* Reset queue config */
+ for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ schq = rsp->schq_contig_list[lvl][idx];
+ if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
+ NIX_TXSCHQ_CFG_DONE))
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+ }
+
+ for (idx = 0; idx < req->schq[lvl]; idx++) {
+ schq = rsp->schq_list[lvl][idx];
+ if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
+ NIX_TXSCHQ_CFG_DONE))
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+ }
+ }
+
+ rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
+ rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
+ rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
+ NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ goto exit;
+err:
+ rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
+ struct nix_smq_flush_ctx *smq_flush_ctx)
+{
+ struct nix_smq_tree_ctx *smq_tree_ctx;
+ u64 parent_off, regval;
+ u16 schq;
+ int lvl;
+
+ smq_flush_ctx->smq = smq;
+
+ schq = smq;
+ for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+ smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+ smq_flush_ctx->tl1_schq = schq;
+ smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
+ smq_tree_ctx->pir_off = 0;
+ smq_tree_ctx->pir_val = 0;
+ parent_off = 0;
+ } else if (lvl == NIX_TXSCH_LVL_TL2) {
+ smq_flush_ctx->tl2_schq = schq;
+ smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
+ parent_off = NIX_AF_TL2X_PARENT(schq);
+ } else if (lvl == NIX_TXSCH_LVL_TL3) {
+ smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
+ parent_off = NIX_AF_TL3X_PARENT(schq);
+ } else if (lvl == NIX_TXSCH_LVL_TL4) {
+ smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
+ parent_off = NIX_AF_TL4X_PARENT(schq);
+ } else if (lvl == NIX_TXSCH_LVL_MDQ) {
+ smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
+ parent_off = NIX_AF_MDQX_PARENT(schq);
+ }
+ /* save cir/pir register values */
+ smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
+ if (smq_tree_ctx->pir_off)
+ smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
+
+ /* get parent txsch node */
+ if (parent_off) {
+ regval = rvu_read64(rvu, blkaddr, parent_off);
+ schq = (regval >> 16) & 0x1FF;
+ }
+ }
+}
+
+static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
+ struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
+{
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u64 regoff;
+ int tl2;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ /* loop through all TL2s with matching PF_FUNC */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
+ /* skip the smq(flush) TL2 */
+ if (tl2 == smq_flush_ctx->tl2_schq)
+ continue;
+ /* skip unused TL2s */
+ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
+ continue;
+ /* skip if PF_FUNC doesn't match */
+ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
+ (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
+ ~RVU_PFVF_FUNC_MASK)))
+ continue;
+ /* enable/disable XOFF */
+ regoff = NIX_AF_TL2X_SW_XOFF(tl2);
+ if (enable)
+ rvu_write64(rvu, blkaddr, regoff, 0x1);
+ else
+ rvu_write64(rvu, blkaddr, regoff, 0x0);
+ }
+}
+
+static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
+ struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
+{
+ u64 cir_off, pir_off, cir_val, pir_val;
+ struct nix_smq_tree_ctx *smq_tree_ctx;
+ int lvl;
+
+ for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+ smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+ cir_off = smq_tree_ctx->cir_off;
+ cir_val = smq_tree_ctx->cir_val;
+ pir_off = smq_tree_ctx->pir_off;
+ pir_val = smq_tree_ctx->pir_val;
+
+ if (enable) {
+ rvu_write64(rvu, blkaddr, cir_off, cir_val);
+ if (lvl != NIX_TXSCH_LVL_TL1)
+ rvu_write64(rvu, blkaddr, pir_off, pir_val);
+ } else {
+ rvu_write64(rvu, blkaddr, cir_off, 0x0);
+ if (lvl != NIX_TXSCH_LVL_TL1)
+ rvu_write64(rvu, blkaddr, pir_off, 0x0);
+ }
+ }
+}
+
+static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
+{
+ struct nix_smq_flush_ctx *smq_flush_ctx;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+ int err, restore_tx_en = 0;
+ u64 cfg;
+
+ if (!is_rvu_otx2(rvu)) {
+ /* Skip SMQ flush if pkt count is zero */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
+ if (!cfg)
+ return 0;
+ }
+
+ /* enable cgx tx if disabled */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
+ }
+
+ /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
+ smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
+ if (!smq_flush_ctx)
+ return -ENOMEM;
+ nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
+ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
+ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
+ /* Disable backpressure from physical link,
+ * otherwise SMQ flush may stall.
+ */
+ rvu_cgx_enadis_rx_bp(rvu, pf, false);
+
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
+ if (err)
+ dev_info(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
+ nixlf, smq);
+
+ /* clear XOFF on TL2s */
+ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
+ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
+ kfree(smq_flush_ctx);
+
+ rvu_cgx_enadis_rx_bp(rvu, pf, true);
+ /* restore cgx tx state */
+ if (restore_tx_en)
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ return err;
+}
+
+static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, nixlf, lvl, schq, err;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u16 map_func;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
+ mutex_lock(&rvu->rsrc_lock);
+ for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
+ continue;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+ }
+ }
+ nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
+ nix_get_tx_link(rvu, pcifunc));
+
+ /* On PF cleanup, clear cfg done flag as
+ * PF would have changed default config.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
+ schq = nix_get_tx_link(rvu, pcifunc);
+ /* Do not clear pcifunc in txsch->pfvf_map[schq] because
+ * VF might be using this TL1 queue
+ */
+ map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
+ txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
+ }
+
+ /* Flush SMQs */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
+ }
+
+ /* Now free scheduler queues to free pool */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ /* TLs above aggregation level are shared across all PF
+ * and it's VFs, hence skip freeing them.
+ */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
+ continue;
+
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ if (err)
+ dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
+
+ return 0;
+}
+
+static int nix_txschq_free_one(struct rvu *rvu,
+ struct nix_txsch_free_req *req)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lvl, schq, nixlf, blkaddr;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u32 *pfvf_map;
+ int rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lvl = req->schq_lvl;
+ schq = req->schq;
+ txsch = &nix_hw->txsch[lvl];
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
+ return 0;
+
+ pfvf_map = txsch->pfvf_map;
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
+ rc = NIX_AF_ERR_TLX_INVALID;
+ goto err;
+ }
+
+ /* Clear SW_XOFF of this resource only.
+ * For SMQ level, all path XOFF's
+ * need to be made clear by user
+ */
+ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+
+ /* Flush if it is a SMQ. Onus of disabling
+ * TL2/3 queue links before SMQ flush is on user
+ */
+ if (lvl == NIX_TXSCH_LVL_SMQ &&
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
+ rc = NIX_AF_SMQ_FLUSH_FAILED;
+ goto err;
+ }
+
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+
+ /* Free the resource */
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+err:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
+ struct nix_txsch_free_req *req,
+ struct msg_rsp *rsp)
+{
+ if (req->flags & TXSCHQ_FREE_ALL)
+ return nix_txschq_free(rvu, req->hdr.pcifunc);
+ else
+ return nix_txschq_free_one(rvu, req);
+}
+
+static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
+ int lvl, u64 reg, u64 regval)
+{
+ u64 regbase = reg & 0xFFFF;
+ u16 schq, parent;
+
+ if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
+ return false;
+
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ /* Check if this schq belongs to this PF/VF or not */
+ if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
+ return false;
+
+ parent = (regval >> 16) & 0x1FF;
+ /* Validate MDQ's TL4 parent */
+ if (regbase == NIX_AF_MDQX_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
+ return false;
+
+ /* Validate TL4's TL3 parent */
+ if (regbase == NIX_AF_TL4X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
+ return false;
+
+ /* Validate TL3's TL2 parent */
+ if (regbase == NIX_AF_TL3X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
+ return false;
+
+ /* Validate TL2's TL1 parent */
+ if (regbase == NIX_AF_TL2X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
+ return false;
+
+ return true;
+}
+
+static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
+{
+ u64 regbase;
+
+ if (hw->cap.nix_shaping)
+ return true;
+
+ /* If shaping and coloring is not supported, then
+ * *_CIR and *_PIR registers should not be configured.
+ */
+ regbase = reg & 0xFFFF;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ if (regbase == NIX_AF_TL1X_CIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ if (regbase == NIX_AF_TL2X_CIR(0) ||
+ regbase == NIX_AF_TL2X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ if (regbase == NIX_AF_TL3X_CIR(0) ||
+ regbase == NIX_AF_TL3X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ if (regbase == NIX_AF_TL4X_CIR(0) ||
+ regbase == NIX_AF_TL4X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ if (regbase == NIX_AF_MDQX_CIR(0) ||
+ regbase == NIX_AF_MDQX_PIR(0))
+ return false;
+ break;
+ }
+ return true;
+}
+
+static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
+ u16 pcifunc, int blkaddr)
+{
+ u32 *pfvf_map;
+ int schq;
+
+ schq = nix_get_tx_link(rvu, pcifunc);
+ pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
+ /* Skip if PF has already done the config */
+ if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
+ return;
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
+ (TXSCH_TL1_DFLT_RR_PRIO << 1));
+
+ /* On OcteonTx2 the config was in bytes and newer silcons
+ * it's changed to weight.
+ */
+ if (!rvu->hw->cap.nix_common_dwrr_mtu)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ TXSCH_TL1_DFLT_RR_QTM);
+ else
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ CN10K_MAX_DWRR_WEIGHT);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
+ pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
+}
+
+/* Register offset - [15:0]
+ * Scheduler Queue number - [25:16]
+ */
+#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
+
+static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, struct nix_txschq_config *req,
+ struct nix_txschq_config *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx, schq;
+ u64 reg;
+
+ for (idx = 0; idx < req->num_regs; idx++) {
+ reg = req->reg[idx];
+ reg &= NIX_TX_SCHQ_MASK;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
+ !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+ rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
+ }
+ rsp->lvl = req->lvl;
+ rsp->num_regs = req->num_regs;
+ return 0;
+}
+
+void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
+ struct nix_txsch *txsch, bool enable)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lbk_link_start, lbk_links;
+ u8 pf = rvu_get_pf(pcifunc);
+ int schq;
+ u64 cfg;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
+ lbk_link_start = hw->cgx_links;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ /* Enable all LBK links with channel 63 by default so that
+ * packets can be sent to LBK with a NPC TX MCAM rule
+ */
+ lbk_links = hw->lbk_links;
+ while (lbk_links--)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ lbk_link_start +
+ lbk_links), cfg);
+ }
+}
+
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
+ struct nix_txschq_config *req,
+ struct nix_txschq_config *rsp)
+{
+ u64 reg, val, regval, schq_regbase, val_mask;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, err;
+ int nixlf, schq;
+ u32 *pfvf_map;
+
+ if (req->lvl >= NIX_TXSCH_LVL_CNT ||
+ req->num_regs > MAX_REGS_PER_MBOX_MSG)
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (req->read)
+ return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
+
+ txsch = &nix_hw->txsch[req->lvl];
+ pfvf_map = txsch->pfvf_map;
+
+ if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
+ pcifunc & RVU_PFVF_FUNC_MASK) {
+ mutex_lock(&rvu->rsrc_lock);
+ if (req->lvl == NIX_TXSCH_LVL_TL1)
+ nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+ }
+
+ for (idx = 0; idx < req->num_regs; idx++) {
+ reg = req->reg[idx];
+ reg &= NIX_TX_SCHQ_MASK;
+ regval = req->regval[idx];
+ schq_regbase = reg & 0xFFFF;
+ val_mask = req->regval_mask[idx];
+
+ if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
+ txsch->lvl, reg, regval))
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+
+ /* Check if shaping and coloring is supported */
+ if (!is_txschq_shaping_valid(hw, req->lvl, reg))
+ continue;
+
+ val = rvu_read64(rvu, blkaddr, reg);
+ regval = (val & val_mask) | (regval & ~val_mask);
+
+ /* Handle shaping state toggle specially */
+ if (hw->cap.nix_shaper_toggle_wait &&
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ req->lvl, reg, regval))
+ continue;
+
+ /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
+ if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ pcifunc, 0);
+ regval &= ~(0x7FULL << 24);
+ regval |= ((u64)nixlf << 24);
+ }
+
+ /* Clear 'BP_ENA' config, if it's not allowed */
+ if (!hw->cap.nix_tx_link_bp) {
+ if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
+ (schq_regbase & 0xFF00) ==
+ NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
+ regval &= ~BIT_ULL(13);
+ }
+
+ /* Mark config as done for TL1 by PF */
+ if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
+ schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ mutex_lock(&rvu->rsrc_lock);
+ pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
+ NIX_TXSCHQ_CFG_DONE);
+ mutex_unlock(&rvu->rsrc_lock);
+ }
+
+ /* SMQ flush is special hence split register writes such
+ * that flush first and write rest of the bits later.
+ */
+ if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
+ (regval & BIT_ULL(49))) {
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
+ regval &= ~BIT_ULL(49);
+ }
+ rvu_write64(rvu, blkaddr, reg, regval);
+ }
+
+ return 0;
+}
+
+static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ u64 regval = req->vtag_size;
+
+ if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
+ req->vtag_size > VTAGSIZE_T8)
+ return -EINVAL;
+
+ /* RX VTAG Type 7 reserved for vf vlan */
+ if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ return NIX_AF_ERR_RX_VTAG_INUSE;
+
+ if (req->rx.capture_vtag)
+ regval |= BIT_ULL(5);
+ if (req->rx.strip_vtag)
+ regval |= BIT_ULL(4);
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
+ return 0;
+}
+
+static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, int index)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (vlan->entry2pfvf_map[index] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
+
+ vlan->entry2pfvf_map[index] = 0;
+ rvu_free_rsrc(&vlan->rsrc, index);
+
+ return 0;
+}
+
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_txvlan *vlan;
+ struct nix_hw *nix_hw;
+ int index, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+ /* Scan all the entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < vlan->rsrc.max; index++) {
+ if (vlan->entry2pfvf_map[index] == pcifunc)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
+ }
+ mutex_unlock(&vlan->rsrc_lock);
+}
+
+static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
+ u64 vtag, u8 size)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+ u64 regval;
+ int index;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ index = rvu_alloc_rsrc(&vlan->rsrc);
+ if (index < 0) {
+ mutex_unlock(&vlan->rsrc_lock);
+ return index;
+ }
+
+ mutex_unlock(&vlan->rsrc_lock);
+
+ regval = size ? vtag : vtag << 32;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), size);
+
+ return index;
+}
+
+static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx0 = req->tx.vtag0_idx;
+ int idx1 = req->tx.vtag1_idx;
+ struct nix_txvlan *vlan;
+ int err = 0;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (req->tx.free_vtag0 && req->tx.free_vtag1)
+ if (vlan->entry2pfvf_map[idx0] != pcifunc ||
+ vlan->entry2pfvf_map[idx1] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ if (req->tx.free_vtag0) {
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
+ if (err)
+ goto exit;
+ }
+
+ if (req->tx.free_vtag1)
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
+
+exit:
+ mutex_unlock(&vlan->rsrc_lock);
+ return err;
+}
+
+static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req,
+ struct nix_vtag_config_rsp *rsp)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+ u16 pcifunc = req->hdr.pcifunc;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (req->tx.cfg_vtag0) {
+ rsp->vtag0_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag0, req->vtag_size);
+
+ if (rsp->vtag0_idx < 0)
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+
+ vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
+ }
+
+ if (req->tx.cfg_vtag1) {
+ rsp->vtag1_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag1, req->vtag_size);
+
+ if (rsp->vtag1_idx < 0)
+ goto err_free;
+
+ vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
+ }
+
+ return 0;
+
+err_free:
+ if (req->tx.cfg_vtag0)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
+
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+}
+
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
+ struct nix_vtag_config *req,
+ struct nix_vtag_config_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->cfg_type) {
+ /* rx vtag configuration */
+ err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
+ if (err)
+ return NIX_AF_ERR_PARAM;
+ } else {
+ /* tx vtag configuration */
+ if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
+ (req->tx.free_vtag0 || req->tx.free_vtag1))
+ return NIX_AF_ERR_PARAM;
+
+ if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
+ return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
+
+ if (req->tx.free_vtag0 || req->tx.free_vtag1)
+ return nix_tx_vtag_decfg(rvu, blkaddr, req);
+ }
+
+ return 0;
+}
+
+static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
+ int mce, u8 op, u16 pcifunc, int next, bool eol)
+{
+ struct nix_aq_enq_req aq_req;
+ int err;
+
+ aq_req.hdr.pcifunc = 0;
+ aq_req.ctype = NIX_AQ_CTYPE_MCE;
+ aq_req.op = op;
+ aq_req.qidx = mce;
+
+ /* Use RSS with RSS index 0 */
+ aq_req.mce.op = 1;
+ aq_req.mce.index = 0;
+ aq_req.mce.eol = eol;
+ aq_req.mce.pf_func = pcifunc;
+ aq_req.mce.next = next;
+
+ /* All fields valid */
+ *(u64 *)(&aq_req.mce_mask) = ~0ULL;
+
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
+ if (err) {
+ dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
+ rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ return err;
+ }
+ return 0;
+}
+
+static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
+ u16 pcifunc, bool add)
+{
+ struct mce *mce, *tail = NULL;
+ bool delete = false;
+
+ /* Scan through the current list */
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ /* If already exists, then delete */
+ if (mce->pcifunc == pcifunc && !add) {
+ delete = true;
+ break;
+ } else if (mce->pcifunc == pcifunc && add) {
+ /* entry already exists */
+ return 0;
+ }
+ tail = mce;
+ }
+
+ if (delete) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ return 0;
+ }
+
+ if (!add)
+ return 0;
+
+ /* Add a new one to the list, at the tail */
+ mce = kzalloc(sizeof(*mce), GFP_KERNEL);
+ if (!mce)
+ return -ENOMEM;
+ mce->pcifunc = pcifunc;
+ if (!tail)
+ hlist_add_head(&mce->node, &mce_list->head);
+ else
+ hlist_add_behind(&mce->node, &tail->node);
+ mce_list->count++;
+ return 0;
+}
+
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add)
+{
+ int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ struct mce *mce;
+
+ if (!mce_list)
+ return -EINVAL;
+
+ /* Get this PF/VF func's MCE index */
+ idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+
+ if (idx > (mce_idx + mce_list->max)) {
+ dev_err(rvu->dev,
+ "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
+ __func__, idx, mce_list->max,
+ pcifunc >> RVU_PFVF_PF_SHIFT);
+ return -EINVAL;
+ }
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast = &nix_hw->mcast;
+ mutex_lock(&mcast->mce_lock);
+
+ err = nix_update_mce_list_entry(mce_list, pcifunc, add);
+ if (err)
+ goto end;
+
+ /* Disable MCAM entry in NPC */
+ if (!mce_list->count) {
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
+ goto end;
+ }
+
+ /* Dump the updated list to HW */
+ idx = mce_idx;
+ last_idx = idx + mce_list->count - 1;
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (idx > last_idx)
+ break;
+
+ next_idx = idx + 1;
+ /* EOL should be set in last MCE */
+ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ (next_idx > last_idx) ? true : false);
+ if (err)
+ goto end;
+ idx++;
+ }
+
+end:
+ mutex_unlock(&mcast->mce_lock);
+ return err;
+}
+
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_pfvf *pfvf;
+
+ if (!hw->cap.nix_rx_multicast ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ return;
+ }
+
+ /* Get this PF/VF func's MCE index */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ if (type == NIXLF_BCAST_ENTRY) {
+ *mce_list = &pfvf->bcast_mce_list;
+ *mce_idx = pfvf->bcast_mce_idx;
+ } else if (type == NIXLF_ALLMULTI_ENTRY) {
+ *mce_list = &pfvf->mcast_mce_list;
+ *mce_idx = pfvf->mcast_mce_idx;
+ } else if (type == NIXLF_PROMISC_ENTRY) {
+ *mce_list = &pfvf->promisc_mce_list;
+ *mce_idx = pfvf->promisc_mce_idx;
+ } else {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ }
+}
+
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add)
+{
+ int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int pf;
+
+ /* skip multicast pkt replication for AF's VFs & SDP links */
+ if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
+ return 0;
+
+ if (!hw->cap.nix_rx_multicast)
+ return 0;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return -EINVAL;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ mcam_index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+ err = nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, mcam_index, add);
+ return err;
+}
+
+static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_mcast *mcast = &nix_hw->mcast;
+ int err, pf, numvfs, idx;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 cfg;
+
+ /* Skip PF0 (i.e AF) */
+ for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+ /* Get numVFs attached to this PF */
+ numvfs = (cfg >> 12) & 0xFF;
+
+ pfvf = &rvu->pf[pf];
+
+ /* This NIX0/1 block mapped to PF ? */
+ if (pfvf->nix_blkaddr != nix_hw->blkaddr)
+ continue;
+
+ /* save start idx of broadcast mce list */
+ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+
+ /* save start idx of multicast mce list */
+ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
+
+ /* save the start idx of promisc mce list */
+ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
+
+ for (idx = 0; idx < (numvfs + 1); idx++) {
+ /* idx-0 is for PF, followed by VFs */
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc |= idx;
+ /* Add dummy entries now, so that we don't have to check
+ * for whether AQ_OP should be INIT/WRITE later on.
+ * Will be updated when a NIXLF is attached/detached to
+ * these PF/VFs.
+ */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->bcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to multicast mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->mcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to promisc mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->promisc_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_mcast *mcast = &nix_hw->mcast;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int err, size;
+
+ size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
+ size = (1ULL << size);
+
+ /* Alloc memory for multicast/mirror replication entries */
+ err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
+ (256UL << MC_TBL_SIZE), size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
+ (u64)mcast->mce_ctx->iova);
+
+ /* Set max list length equal to max no of VFs per PF + PF itself */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
+ BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
+
+ /* Alloc memory for multicast replication buffers */
+ size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
+ err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
+ (8UL << MC_BUF_CNT), size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
+ (u64)mcast->mcast_buf->iova);
+
+ /* Alloc pkind for NIX internal RX multicast/mirror replay */
+ mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
+ BIT_ULL(63) | (mcast->replay_pkind << 24) |
+ BIT_ULL(20) | MC_BUF_CNT);
+
+ mutex_init(&mcast->mce_lock);
+
+ return nix_setup_mce_tables(rvu, nix_hw);
+}
+
+static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ int err;
+
+ /* Allocate resource bimap for tx vtag def registers*/
+ vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
+ err = rvu_alloc_bitmap(&vlan->rsrc);
+ if (err)
+ return -ENOMEM;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!vlan->entry2pfvf_map)
+ goto free_mem;
+
+ mutex_init(&vlan->rsrc_lock);
+ return 0;
+
+free_mem:
+ kfree(vlan->rsrc.bmap);
+ return -ENOMEM;
+}
+
+static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_txsch *txsch;
+ int err, lvl, schq;
+ u64 cfg, reg;
+
+ /* Get scheduler queue count of each type and alloc
+ * bitmap for each for alloc/free/attach operations.
+ */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ txsch->lvl = lvl;
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ reg = NIX_AF_MDQ_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg = NIX_AF_TL4_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg = NIX_AF_TL3_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg = NIX_AF_TL2_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ reg = NIX_AF_TL1_CONST;
+ break;
+ }
+ cfg = rvu_read64(rvu, blkaddr, reg);
+ txsch->schq.max = cfg & 0xFFFF;
+ err = rvu_alloc_bitmap(&txsch->schq);
+ if (err)
+ return err;
+
+ /* Allocate memory for scheduler queues to
+ * PF/VF pcifunc mapping info.
+ */
+ txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
+ sizeof(u32), GFP_KERNEL);
+ if (!txsch->pfvf_map)
+ return -ENOMEM;
+ for (schq = 0; schq < txsch->schq.max; schq++)
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
+ }
+
+ /* Setup a default value of 8192 as DWRR MTU */
+ if (rvu->hw->cap.nix_common_dwrr_mtu ||
+ rvu->hw->cap.nix_multiple_dwrr_mtu) {
+ rvu_write64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
+ convert_bytes_to_dwrr_mtu(8192));
+ rvu_write64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
+ convert_bytes_to_dwrr_mtu(8192));
+ rvu_write64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
+ convert_bytes_to_dwrr_mtu(8192));
+ }
+
+ return 0;
+}
+
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, u32 cfg)
+{
+ int fmt_idx;
+
+ for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
+ if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
+ return fmt_idx;
+ }
+ if (fmt_idx >= nix_hw->mark_format.total)
+ return -ERANGE;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
+ nix_hw->mark_format.cfg[fmt_idx] = cfg;
+ nix_hw->mark_format.in_use++;
+ return fmt_idx;
+}
+
+static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr)
+{
+ u64 cfgs[] = {
+ [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
+ [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
+ [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
+ [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
+ [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
+ [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
+ [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
+ [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
+ [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
+ };
+ int i, rc;
+ u64 total;
+
+ total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
+ nix_hw->mark_format.total = (u8)total;
+ nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
+ GFP_KERNEL);
+ if (!nix_hw->mark_format.cfg)
+ return -ENOMEM;
+ for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
+ rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
+ if (rc < 0)
+ dev_err(rvu->dev, "Err %d in setup mark format %d\n",
+ i, rc);
+ }
+
+ return 0;
+}
+
+static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* CN10K supports LBK FIFO size 72 KB */
+ if (rvu->hw->lbk_bufsize == 0x12000)
+ *max_mtu = CN10K_LBK_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ int fifo_size = rvu_cgx_get_fifolen(rvu);
+
+ /* RPM supports FIFO len 128 KB and RPM2 supports double the
+ * FIFO len to accommodate 8 LMACS
+ */
+ if (fifo_size == 0x20000 || fifo_size == 0x40000)
+ *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
+ struct nix_hw_info *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 dwrr_mtu;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
+
+ rsp->min_mtu = NIC_HW_MIN_FRS;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu &&
+ !rvu->hw->cap.nix_multiple_dwrr_mtu) {
+ /* Return '1' on OTx2 */
+ rsp->rpm_dwrr_mtu = 1;
+ rsp->sdp_dwrr_mtu = 1;
+ rsp->lbk_dwrr_mtu = 1;
+ return 0;
+ }
+
+ /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
+ dwrr_mtu = rvu_read64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
+ rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ dwrr_mtu = rvu_read64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
+ rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ dwrr_mtu = rvu_read64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
+ rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int i, nixlf, blkaddr, err;
+ u64 stats;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ /* Get stats count supported by HW */
+ stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ /* Reset tx stats */
+ for (i = 0; i < ((stats >> 24) & 0xFF); i++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
+
+ /* Reset rx stats */
+ for (i = 0; i < ((stats >> 32) & 0xFF); i++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
+
+ return 0;
+}
+
+/* Returns the ALG index to be set into NPC_RX_ACTION */
+static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
+{
+ int i;
+
+ /* Scan over exiting algo entries to find a match */
+ for (i = 0; i < nix_hw->flowkey.in_use; i++)
+ if (nix_hw->flowkey.flowkey[i] == flow_cfg)
+ return i;
+
+ return -ERANGE;
+}
+
+static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+{
+ int idx, nr_field, key_off, field_marker, keyoff_marker;
+ int max_key_off, max_bit_pos, group_member;
+ struct nix_rx_flowkey_alg *field;
+ struct nix_rx_flowkey_alg tmp;
+ u32 key_type, valid_key;
+ u32 l3_l4_src_dst;
+ int l4_key_offset = 0;
+
+ if (!alg)
+ return -EINVAL;
+
+#define FIELDS_PER_ALG 5
+#define MAX_KEY_OFF 40
+ /* Clear all fields */
+ memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
+
+ /* Each of the 32 possible flow key algorithm definitions should
+ * fall into above incremental config (except ALG0). Otherwise a
+ * single NPC MCAM entry is not sufficient for supporting RSS.
+ *
+ * If a different definition or combination needed then NPC MCAM
+ * has to be programmed to filter such pkts and it's action should
+ * point to this definition to calculate flowtag or hash.
+ *
+ * The `for loop` goes over _all_ protocol field and the following
+ * variables depicts the state machine forward progress logic.
+ *
+ * keyoff_marker - Enabled when hash byte length needs to be accounted
+ * in field->key_offset update.
+ * field_marker - Enabled when a new field needs to be selected.
+ * group_member - Enabled when protocol is part of a group.
+ */
+
+ /* Last 4 bits (31:28) are reserved to specify SRC, DST
+ * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
+ * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
+ * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
+ */
+ l3_l4_src_dst = flow_cfg;
+ /* Reset these 4 bits, so that these won't be part of key */
+ flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
+
+ keyoff_marker = 0; max_key_off = 0; group_member = 0;
+ nr_field = 0; key_off = 0; field_marker = 1;
+ field = &tmp; max_bit_pos = fls(flow_cfg);
+ for (idx = 0;
+ idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
+ key_off < MAX_KEY_OFF; idx++) {
+ key_type = BIT(idx);
+ valid_key = flow_cfg & key_type;
+ /* Found a field marker, reset the field values */
+ if (field_marker)
+ memset(&tmp, 0, sizeof(tmp));
+
+ field_marker = true;
+ keyoff_marker = true;
+ switch (key_type) {
+ case NIX_FLOW_KEY_TYPE_PORT:
+ field->sel_chan = true;
+ /* This should be set to 1, when SEL_CHAN is set */
+ field->bytesm1 = 1;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 9; /* offset */
+ field->bytesm1 = 0; /* 1 byte */
+ field->ltype_match = NPC_LT_LC_IP;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV4:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV4:
+ field->lid = NPC_LID_LC;
+ field->ltype_match = NPC_LT_LC_IP;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
+ field->lid = NPC_LID_LG;
+ field->ltype_match = NPC_LT_LG_TU_IP;
+ }
+ field->hdr_offset = 12; /* SIP offset */
+ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+
+ /* Only SIP */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
+ field->bytesm1 = 3; /* SIP, 4 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
+ /* Both SIP + DIP */
+ if (field->bytesm1 == 3) {
+ field->bytesm1 = 7; /* SIP + DIP, 8B */
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 16; /* DIP off */
+ field->bytesm1 = 3; /* DIP, 4 bytes */
+ }
+ }
+
+ field->ltype_mask = 0xF; /* Match only IPv4 */
+ keyoff_marker = false;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV6:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV6:
+ field->lid = NPC_LID_LC;
+ field->ltype_match = NPC_LT_LC_IP6;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
+ field->lid = NPC_LID_LG;
+ field->ltype_match = NPC_LT_LG_TU_IP6;
+ }
+ field->hdr_offset = 8; /* SIP offset */
+ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+
+ /* Only SIP */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
+ field->bytesm1 = 15; /* SIP, 16 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
+ /* Both SIP + DIP */
+ if (field->bytesm1 == 15) {
+ /* SIP + DIP, 32 bytes */
+ field->bytesm1 = 31;
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 24; /* DIP off */
+ field->bytesm1 = 15; /* DIP,16 bytes */
+ }
+ }
+ field->ltype_mask = 0xF; /* Match only IPv6 */
+ break;
+ case NIX_FLOW_KEY_TYPE_TCP:
+ case NIX_FLOW_KEY_TYPE_UDP:
+ case NIX_FLOW_KEY_TYPE_SCTP:
+ case NIX_FLOW_KEY_TYPE_INNR_TCP:
+ case NIX_FLOW_KEY_TYPE_INNR_UDP:
+ case NIX_FLOW_KEY_TYPE_INNR_SCTP:
+ field->lid = NPC_LID_LD;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
+ field->lid = NPC_LID_LH;
+ field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
+ field->bytesm1 = 1; /* SRC, 2 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
+ /* Both SRC + DST */
+ if (field->bytesm1 == 1) {
+ /* SRC + DST, 4 bytes */
+ field->bytesm1 = 3;
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 2; /* DST off */
+ field->bytesm1 = 1; /* DST, 2 bytes */
+ }
+ }
+
+ /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
+ * so no need to change the ltype_match, just change
+ * the lid for inner protocols
+ */
+ BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
+ (int)NPC_LT_LH_TU_TCP);
+ BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
+ (int)NPC_LT_LH_TU_UDP);
+ BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
+ (int)NPC_LT_LH_TU_SCTP);
+
+ if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
+ valid_key) {
+ field->ltype_match |= NPC_LT_LD_TCP;
+ group_member = true;
+ } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
+ valid_key) {
+ field->ltype_match |= NPC_LT_LD_UDP;
+ group_member = true;
+ } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
+ valid_key) {
+ field->ltype_match |= NPC_LT_LD_SCTP;
+ group_member = true;
+ }
+ field->ltype_mask = ~field->ltype_match;
+ if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
+ /* Handle the case where any of the group item
+ * is enabled in the group but not the final one
+ */
+ if (group_member) {
+ valid_key = true;
+ group_member = false;
+ }
+ } else {
+ field_marker = false;
+ keyoff_marker = false;
+ }
+
+ /* TCP/UDP/SCTP and ESP/AH falls at same offset so
+ * remember the TCP key offset of 40 byte hash key.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_TCP)
+ l4_key_offset = key_off;
+ break;
+ case NIX_FLOW_KEY_TYPE_NVGRE:
+ field->lid = NPC_LID_LD;
+ field->hdr_offset = 4; /* VSID offset */
+ field->bytesm1 = 2;
+ field->ltype_match = NPC_LT_LD_NVGRE;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_VXLAN:
+ case NIX_FLOW_KEY_TYPE_GENEVE:
+ field->lid = NPC_LID_LE;
+ field->bytesm1 = 2;
+ field->hdr_offset = 4;
+ field->ltype_mask = 0xF;
+ field_marker = false;
+ keyoff_marker = false;
+
+ if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
+ field->ltype_match |= NPC_LT_LE_VXLAN;
+ group_member = true;
+ }
+
+ if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
+ field->ltype_match |= NPC_LT_LE_GENEVE;
+ group_member = true;
+ }
+
+ if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
+ if (group_member) {
+ field->ltype_mask = ~field->ltype_match;
+ field_marker = true;
+ keyoff_marker = true;
+ valid_key = true;
+ group_member = false;
+ }
+ }
+ break;
+ case NIX_FLOW_KEY_TYPE_ETH_DMAC:
+ case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
+ field->lid = NPC_LID_LA;
+ field->ltype_match = NPC_LT_LA_ETHER;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
+ field->lid = NPC_LID_LF;
+ field->ltype_match = NPC_LT_LF_TU_ETHER;
+ }
+ field->hdr_offset = 0;
+ field->bytesm1 = 5; /* DMAC 6 Byte */
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV6_EXT:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 40; /* IPV6 hdr */
+ field->bytesm1 = 0; /* 1 Byte ext hdr*/
+ field->ltype_match = NPC_LT_LC_IP6_EXT;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_GTPU:
+ field->lid = NPC_LID_LE;
+ field->hdr_offset = 4;
+ field->bytesm1 = 3; /* 4 bytes TID*/
+ field->ltype_match = NPC_LT_LE_GTPU;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_VLAN:
+ field->lid = NPC_LID_LB;
+ field->hdr_offset = 2; /* Skip TPID (2-bytes) */
+ field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
+ field->ltype_match = NPC_LT_LB_CTAG;
+ field->ltype_mask = 0xF;
+ field->fn_mask = 1; /* Mask out the first nibble */
+ break;
+ case NIX_FLOW_KEY_TYPE_AH:
+ case NIX_FLOW_KEY_TYPE_ESP:
+ field->hdr_offset = 0;
+ field->bytesm1 = 7; /* SPI + sequence number */
+ field->ltype_mask = 0xF;
+ field->lid = NPC_LID_LE;
+ field->ltype_match = NPC_LT_LE_ESP;
+ if (key_type == NIX_FLOW_KEY_TYPE_AH) {
+ field->lid = NPC_LID_LD;
+ field->ltype_match = NPC_LT_LD_AH;
+ field->hdr_offset = 4;
+ keyoff_marker = false;
+ }
+ break;
+ }
+ field->ena = 1;
+
+ /* Found a valid flow key type */
+ if (valid_key) {
+ /* Use the key offset of TCP/UDP/SCTP fields
+ * for ESP/AH fields.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
+ key_type == NIX_FLOW_KEY_TYPE_AH)
+ key_off = l4_key_offset;
+ field->key_offset = key_off;
+ memcpy(&alg[nr_field], field, sizeof(*field));
+ max_key_off = max(max_key_off, field->bytesm1 + 1);
+
+ /* Found a field marker, get the next field */
+ if (field_marker)
+ nr_field++;
+ }
+
+ /* Found a keyoff marker, update the new key_off */
+ if (keyoff_marker) {
+ key_off += max_key_off;
+ max_key_off = 0;
+ }
+ }
+ /* Processed all the flow key types */
+ if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
+ return 0;
+ else
+ return NIX_AF_ERR_RSS_NOSPC_FIELD;
+}
+
+static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
+{
+ u64 field[FIELDS_PER_ALG];
+ struct nix_hw *hw;
+ int fid, rc;
+
+ hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ /* No room to add new flow hash algoritham */
+ if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
+ return NIX_AF_ERR_RSS_NOSPC_ALGO;
+
+ /* Generate algo fields for the given flow_cfg */
+ rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
+ if (rc)
+ return rc;
+
+ /* Update ALGX_FIELDX register with generated fields */
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
+ fid), field[fid]);
+
+ /* Store the flow_cfg for futher lookup */
+ rc = hw->flowkey.in_use;
+ hw->flowkey.flowkey[rc] = flow_cfg;
+ hw->flowkey.in_use++;
+
+ return rc;
+}
+
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg *req,
+ struct nix_rss_flowkey_cfg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int alg_idx, nixlf, blkaddr;
+ struct nix_hw *nix_hw;
+ int err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
+ /* Failed to get algo index from the exiting list, reserve new */
+ if (alg_idx < 0) {
+ alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
+ req->flowkey_cfg);
+ if (alg_idx < 0)
+ return alg_idx;
+ }
+ rsp->alg_idx = alg_idx;
+ rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
+ alg_idx, req->mcam_index);
+ return 0;
+}
+
+static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+{
+ u32 flowkey_cfg, minkey_cfg;
+ int alg, fid, rc;
+
+ /* Disable all flow key algx fieldx */
+ for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
+ 0);
+ }
+
+ /* IPv4/IPv6 SIP/DIPs */
+ flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ minkey_cfg = flowkey_cfg;
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_UDP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
+ struct nix_set_mac_addr *req,
+ struct msg_rsp *rsp)
+{
+ bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* untrusted VF can't overwrite admin(PF) changes */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
+ dev_warn(rvu->dev,
+ "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
+ return -EPERM;
+ }
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+
+ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, req->mac_addr);
+
+ if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
+ ether_addr_copy(pfvf->default_mac, req->mac_addr);
+
+ rvu_switch_update_rules(rvu, pcifunc);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
+ struct msg_req *req,
+ struct nix_get_mac_addr_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
+ struct msg_rsp *rsp)
+{
+ bool allmulti, promisc, nix_rx_multicast;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int nixlf, err;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
+ allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
+ pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
+
+ nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
+
+ if (is_vf(pcifunc) && !nix_rx_multicast &&
+ (promisc || allmulti)) {
+ dev_warn_ratelimited(rvu->dev,
+ "VF promisc/multicast not supported\n");
+ return 0;
+ }
+
+ /* untrusted VF can't configure promisc/allmulti */
+ if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (promisc || allmulti))
+ return 0;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (err)
+ return err;
+
+ if (nix_rx_multicast) {
+ /* add/del this PF_FUNC to/from mcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
+ allmulti);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to multicast list\n",
+ pcifunc);
+ return err;
+ }
+
+ /* add/del this PF_FUNC to/from promisc pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
+ promisc);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to promisc list\n",
+ pcifunc);
+ return err;
+ }
+ }
+
+ /* install/uninstall allmulti entry */
+ if (allmulti) {
+ rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
+ }
+
+ /* install/uninstall promisc entry */
+ if (promisc)
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+ else
+ if (!nix_rx_multicast)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+
+ return 0;
+}
+
+static void nix_find_link_frs(struct rvu *rvu,
+ struct nix_frs_cfg *req, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct rvu_pfvf *pfvf;
+ int maxlen, minlen;
+ int numvfs, hwvf;
+ int vf;
+
+ /* Update with requester's min/max lengths */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ pfvf->maxlen = req->maxlen;
+ if (req->update_minlen)
+ pfvf->minlen = req->minlen;
+
+ maxlen = req->maxlen;
+ minlen = req->update_minlen ? req->minlen : 0;
+
+ /* Get this PF's numVFs and starting hwvf */
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ /* For each VF, compare requested max/minlen */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ if (pfvf->maxlen > maxlen)
+ maxlen = pfvf->maxlen;
+ if (req->update_minlen &&
+ pfvf->minlen && pfvf->minlen < minlen)
+ minlen = pfvf->minlen;
+ }
+
+ /* Compare requested max/minlen with PF's max/minlen */
+ pfvf = &rvu->pf[pf];
+ if (pfvf->maxlen > maxlen)
+ maxlen = pfvf->maxlen;
+ if (req->update_minlen &&
+ pfvf->minlen && pfvf->minlen < minlen)
+ minlen = pfvf->minlen;
+
+ /* Update the request with max/min PF's and it's VF's max/min */
+ req->maxlen = maxlen;
+ if (req->update_minlen)
+ req->minlen = minlen;
+}
+
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ int blkaddr, link = -1;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ u8 cgx = 0, lmac = 0;
+ u16 max_mtu;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &max_mtu);
+
+ if (!req->sdp_link && req->maxlen > max_mtu)
+ return NIX_AF_ERR_FRS_INVALID;
+
+ if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
+ return NIX_AF_ERR_FRS_INVALID;
+
+ /* Check if config is for SDP link */
+ if (req->sdp_link) {
+ if (!hw->sdp_links)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+ link = hw->cgx_links + hw->lbk_links;
+ goto linkcfg;
+ }
+
+ /* Check if the request is from CGX mapped RVU PF */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ /* Get CGX and LMAC to which this PF is mapped and find link */
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
+ link = (cgx * hw->lmac_per_cgx) + lmac;
+ } else if (pf == 0) {
+ /* For VFs of PF0 ingress is LBK port, so config LBK link */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ link = hw->cgx_links + pfvf->lbkid;
+ }
+
+ if (link < 0)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+
+linkcfg:
+ nix_find_link_frs(rvu, req, pcifunc);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+ cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+ if (req->update_minlen)
+ cfg = (cfg & ~0xFFFFULL) | req->minlen;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+ struct msg_rsp *rsp)
+{
+ int nixlf, blkaddr, err;
+ u64 cfg;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
+ /* Set the interface configuration */
+ if (req->len_verify & BIT(0))
+ cfg |= BIT_ULL(41);
+ else
+ cfg &= ~BIT_ULL(41);
+
+ if (req->len_verify & BIT(1))
+ cfg |= BIT_ULL(40);
+ else
+ cfg &= ~BIT_ULL(40);
+
+ if (req->len_verify & NIX_RX_DROP_RE)
+ cfg |= BIT_ULL(32);
+ else
+ cfg &= ~BIT_ULL(32);
+
+ if (req->csum_verify & BIT(0))
+ cfg |= BIT_ULL(37);
+ else
+ cfg &= ~BIT_ULL(37);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
+
+ return 0;
+}
+
+static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
+{
+ return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
+}
+
+static void nix_link_config(struct rvu *rvu, int blkaddr,
+ struct nix_hw *nix_hw)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int cgx, lmac_cnt, slink, link;
+ u16 lbk_max_frs, lmac_max_frs;
+ unsigned long lmac_bmap;
+ u64 tx_credits, cfg;
+ u64 lmac_fifo_len;
+ int iter;
+
+ rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
+ rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
+
+ /* Set default min/max packet lengths allowed on NIX Rx links.
+ *
+ * With HW reset minlen value of 60byte, HW will treat ARP pkts
+ * as undersize and report them to SW as error pkts, hence
+ * setting it to 40 bytes.
+ */
+ for (link = 0; link < hw->cgx_links; link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
+ }
+
+ for (link = hw->cgx_links; link < hw->lbk_links; link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
+ }
+ if (hw->sdp_links) {
+ link = hw->cgx_links + hw->lbk_links;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ }
+
+ /* Get MCS external bypass status for CN10K-B */
+ if (mcs_get_blkcnt() == 1) {
+ /* Adjust for 2 credits when external bypass is disabled */
+ nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
+ }
+
+ /* Set credits for Tx links assuming max packet length allowed.
+ * This will be reconfigured based on MTU set for PF/VF.
+ */
+ for (cgx = 0; cgx < hw->cgx; cgx++) {
+ lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ /* Skip when cgx is not available or lmac cnt is zero */
+ if (lmac_cnt <= 0)
+ continue;
+ slink = cgx * hw->lmac_per_cgx;
+
+ /* Get LMAC id's from bitmap */
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
+ if (!lmac_fifo_len) {
+ dev_err(rvu->dev,
+ "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+ __func__, cgx, iter);
+ continue;
+ }
+ tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
+
+ link = iter + slink;
+ nix_hw->tx_credits[link] = tx_credits;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+ }
+ }
+
+ /* Set Tx credits for LBK link */
+ slink = hw->cgx_links;
+ for (link = slink; link < (slink + hw->lbk_links); link++) {
+ tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
+ nix_hw->tx_credits[link] = tx_credits;
+ /* Enable credits and set credit pkt count to max allowed */
+ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
+ }
+}
+
+static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
+{
+ int idx, err;
+ u64 status;
+
+ /* Start X2P bus calibration */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
+ /* Wait for calibration to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_STATUS, BIT_ULL(10), false);
+ if (err) {
+ dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
+ return err;
+ }
+
+ status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
+ /* Check if CGX devices are ready */
+ for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
+ /* Skip when cgx port is not available */
+ if (!rvu_cgx_pdata(idx, rvu) ||
+ (status & (BIT_ULL(16 + idx))))
+ continue;
+ dev_err(rvu->dev,
+ "CGX%d didn't respond to NIX X2P calibration\n", idx);
+ err = -EBUSY;
+ }
+
+ /* Check if LBK is ready */
+ if (!(status & BIT_ULL(19))) {
+ dev_err(rvu->dev,
+ "LBK didn't respond to NIX X2P calibration\n");
+ err = -EBUSY;
+ }
+
+ /* Clear 'calibrate_x2p' bit */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
+ if (err || (status & 0x3FFULL))
+ dev_err(rvu->dev,
+ "NIX X2P calibration failed, status 0x%llx\n", status);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+ int err;
+
+ /* Set admin queue endianness */
+ cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
+#ifdef __BIG_ENDIAN
+ cfg |= BIT_ULL(8);
+ rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#else
+ cfg &= ~BIT_ULL(8);
+ rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#endif
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
+ cfg &= ~0x3FFEULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of SQB aka SQEs */
+ cfg |= 0x04ULL;
+#endif
+ rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
+
+ /* Result structure can be followed by RQ/SQ/CQ context at
+ * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+ * operation type. Alloc sufficient result memory for all operations.
+ */
+ err = rvu_aq_alloc(rvu, &block->aq,
+ Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
+ ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
+ rvu_write64(rvu, block->addr,
+ NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
+ return 0;
+}
+
+static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 hw_const;
+
+ hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ /* On OcteonTx2 DWRR quantum is directly configured into each of
+ * the transmit scheduler queues. And PF/VF drivers were free to
+ * config any value upto 2^24.
+ * On CN10K, HW is modified, the quantum configuration at scheduler
+ * queues is in terms of weight. And SW needs to setup a base DWRR MTU
+ * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
+ * 'DWRR MTU * weight' to get the quantum.
+ *
+ * Check if HW uses a common MTU for all DWRR quantum configs.
+ * On OcteonTx2 this register field is '0'.
+ */
+ if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
+ hw->cap.nix_common_dwrr_mtu = true;
+
+ if (hw_const & BIT_ULL(61))
+ hw->cap.nix_multiple_dwrr_mtu = true;
+}
+
+static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ const struct npc_lt_def_cfg *ltdefs;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr = nix_hw->blkaddr;
+ struct rvu_block *block;
+ int err;
+ u64 cfg;
+
+ block = &hw->block[blkaddr];
+
+ if (is_rvu_96xx_B0(rvu)) {
+ /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+ }
+
+ ltdefs = rvu->kpu.lt_def;
+ /* Calibrate X2P bus to check if CGX/LBK links are fine */
+ err = nix_calibrate_x2p(rvu, blkaddr);
+ if (err)
+ return err;
+
+ /* Setup capabilities of the NIX block */
+ rvu_nix_setup_capabilities(rvu, blkaddr);
+
+ /* Initialize admin queue */
+ err = nix_aq_init(rvu, block);
+ if (err)
+ return err;
+
+ /* Restore CINT timer delay to HW reset values */
+ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
+
+ /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
+ cfg |= 1ULL;
+ if (!is_rvu_otx2(rvu))
+ cfg |= NIX_PTP_1STEP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
+
+ if (!is_rvu_otx2(rvu))
+ rvu_nix_block_cn10k_init(rvu, nix_hw);
+
+ if (is_block_implemented(hw, blkaddr)) {
+ err = nix_setup_txschq(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_mcast(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_txvlan(rvu, nix_hw);
+ if (err)
+ return err;
+
+ /* Configure segmentation offload formats */
+ nix_setup_lso(rvu, nix_hw, blkaddr);
+
+ /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
+ * This helps HW protocol checker to identify headers
+ * and validate length and checksums.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
+ (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
+ ltdefs->rx_ol2.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
+ (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
+ (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
+ (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
+ (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
+ (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
+ ltdefs->rx_otcp.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
+ (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
+ ltdefs->rx_itcp.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
+ (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
+ ltdefs->rx_oudp.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
+ (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
+ ltdefs->rx_iudp.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
+ (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
+ ltdefs->rx_osctp.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
+ (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
+ ltdefs->rx_isctp.ltype_mask);
+
+ if (!is_rvu_otx2(rvu)) {
+ /* Enable APAD calculation for other protocols
+ * matching APAD0 and APAD1 lt def registers.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
+ (ltdefs->rx_apad0.valid << 11) |
+ (ltdefs->rx_apad0.lid << 8) |
+ (ltdefs->rx_apad0.ltype_match << 4) |
+ ltdefs->rx_apad0.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
+ (ltdefs->rx_apad1.valid << 11) |
+ (ltdefs->rx_apad1.lid << 8) |
+ (ltdefs->rx_apad1.ltype_match << 4) |
+ ltdefs->rx_apad1.ltype_mask);
+
+ /* Receive ethertype defination register defines layer
+ * information in NPC_RESULT_S to identify the Ethertype
+ * location in L2 header. Used for Ethertype overwriting
+ * in inline IPsec flow.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
+ (ltdefs->rx_et[0].offset << 12) |
+ (ltdefs->rx_et[0].valid << 11) |
+ (ltdefs->rx_et[0].lid << 8) |
+ (ltdefs->rx_et[0].ltype_match << 4) |
+ ltdefs->rx_et[0].ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
+ (ltdefs->rx_et[1].offset << 12) |
+ (ltdefs->rx_et[1].valid << 11) |
+ (ltdefs->rx_et[1].lid << 8) |
+ (ltdefs->rx_et[1].ltype_match << 4) |
+ ltdefs->rx_et[1].ltype_mask);
+ }
+
+ err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+ if (err)
+ return err;
+
+ nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
+ sizeof(u64), GFP_KERNEL);
+ if (!nix_hw->tx_credits)
+ return -ENOMEM;
+
+ /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
+ nix_link_config(rvu, blkaddr, nix_hw);
+
+ /* Enable Channel backpressure */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
+ }
+ return 0;
+}
+
+int rvu_nix_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_hw *nix_hw;
+ int blkaddr = 0, err;
+ int i = 0;
+
+ hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
+ GFP_KERNEL);
+ if (!hw->nix)
+ return -ENOMEM;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ nix_hw = &hw->nix[i];
+ nix_hw->rvu = rvu;
+ nix_hw->blkaddr = blkaddr;
+ err = rvu_nix_block_init(rvu, nix_hw);
+ if (err)
+ return err;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ i++;
+ }
+
+ return 0;
+}
+
+static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
+ struct rvu_block *block)
+{
+ struct nix_txsch *txsch;
+ struct nix_mcast *mcast;
+ struct nix_txvlan *vlan;
+ struct nix_hw *nix_hw;
+ int lvl;
+
+ rvu_aq_free(rvu, block->aq);
+
+ if (is_block_implemented(rvu->hw, blkaddr)) {
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ kfree(txsch->schq.bmap);
+ }
+
+ kfree(nix_hw->tx_credits);
+
+ nix_ipolicer_freemem(rvu, nix_hw);
+
+ vlan = &nix_hw->txvlan;
+ kfree(vlan->rsrc.bmap);
+ mutex_destroy(&vlan->rsrc_lock);
+
+ mcast = &nix_hw->mcast;
+ qmem_free(rvu->dev, mcast->mce_ctx);
+ qmem_free(rvu->dev, mcast->mcast_buf);
+ mutex_destroy(&mcast->mce_lock);
+ }
+}
+
+void rvu_nix_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &hw->block[blkaddr];
+ rvu_nix_block_freemem(rvu, blkaddr, block);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+}
+
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (err)
+ return err;
+
+ rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+
+ npc_mcam_enable_flows(rvu, pcifunc);
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
+ rvu_switch_update_rules(rvu, pcifunc);
+
+ return rvu_cgx_start_stop_io(rvu, pcifunc, true);
+}
+
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (err)
+ return err;
+
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
+ err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
+ if (err)
+ return err;
+
+ rvu_cgx_tx_enable(rvu, pcifunc, true);
+
+ return 0;
+}
+
+#define RX_SA_BASE GENMASK_ULL(52, 7)
+
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct hwctx_disable_req ctx_req;
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ u64 sa_base;
+ void *cgxd;
+ int err;
+
+ ctx_req.hdr.pcifunc = pcifunc;
+
+ /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
+ nix_interface_deinit(rvu, pcifunc, nixlf);
+ nix_rx_sync(rvu, blkaddr);
+ nix_txschq_free(rvu, pcifunc);
+
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
+ rvu_cgx_start_stop_io(rvu, pcifunc, false);
+
+ if (pfvf->sq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_SQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "SQ ctx disable failed\n");
+ }
+
+ if (pfvf->rq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_RQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "RQ ctx disable failed\n");
+ }
+
+ if (pfvf->cq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_CQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "CQ ctx disable failed\n");
+ }
+
+ /* reset HW config done for Switch headers */
+ rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
+ (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
+
+ /* Disabling CGX and NPC config done for PTP */
+ if (pfvf->hw_rx_tstamp_en) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
+ pfvf->hw_rx_tstamp_en = false;
+ }
+
+ /* reset priority flow control config */
+ rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
+
+ /* reset 802.3x flow control config */
+ rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
+
+ nix_ctx_free(rvu, pfvf);
+
+ nix_free_all_bandprof(rvu, pcifunc);
+
+ sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
+ if (FIELD_GET(RX_SA_BASE, sa_base)) {
+ err = rvu_cpt_ctx_flush(rvu, pcifunc);
+ if (err)
+ dev_err(rvu->dev,
+ "CPT ctx flush failed with error: %d\n", err);
+ }
+}
+
+#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
+
+static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, pf;
+ int nixlf;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+
+ if (enable)
+ cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
+ else
+ cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
+int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
+ struct nix_lso_format_cfg *req,
+ struct nix_lso_format_cfg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, idx, f;
+ u64 reg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ /* Find existing matching LSO format, if any */
+ for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
+ for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
+ reg = rvu_read64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, f));
+ if (req->fields[f] != (reg & req->field_mask))
+ break;
+ }
+
+ if (f == NIX_LSO_FIELD_MAX)
+ break;
+ }
+
+ if (idx < nix_hw->lso.in_use) {
+ /* Match found */
+ rsp->lso_format_idx = idx;
+ return 0;
+ }
+
+ if (nix_hw->lso.in_use == nix_hw->lso.total)
+ return NIX_AF_ERR_LSO_CFG_FAIL;
+
+ rsp->lso_format_idx = nix_hw->lso.in_use++;
+
+ for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
+ req->fields[f]);
+
+ return 0;
+}
+
+#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
+#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
+#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
+#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
+
+#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
+#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
+#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+
+#define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
+#define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
+#define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
+
+static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
+ int blkaddr)
+{
+ u8 cpt_idx, cpt_blkaddr;
+ u64 val;
+
+ cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ if (req->enable) {
+ val = 0;
+ /* Enable context prefetching */
+ if (!is_rvu_otx2(rvu))
+ val |= BIT_ULL(51);
+
+ /* Set OPCODE and EGRP */
+ val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
+
+ /* Set CPT queue for inline IPSec */
+ val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
+ val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
+ req->inst_qsel.cpt_pf_func);
+
+ if (!is_rvu_otx2(rvu)) {
+ cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
+ BLKADDR_CPT1;
+ val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
+ }
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ val);
+
+ /* Set CPT credit */
+ val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
+ if ((val & 0x3FFFFF) != 0x3FFFFF)
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF - val);
+
+ val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
+ val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
+ val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ 0x0);
+ val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
+ if ((val & 0x3FFFFF) != 0x3FFFFF)
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF - val);
+ }
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_cfg *req,
+ struct msg_rsp *rsp)
+{
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
+ struct msg_req *req,
+ struct nix_inline_ipsec_cfg *rsp)
+
+{
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
+ rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
+ rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
+ rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
+ rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
+
+ val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
+ rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
+ rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
+ rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_lf_cfg *req,
+ struct msg_rsp *rsp)
+{
+ int lf, blkaddr, err;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->enable) {
+ /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
+ val = (u64)req->ipsec_cfg0.tt << 44 |
+ (u64)req->ipsec_cfg0.tag_const << 20 |
+ (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
+ req->ipsec_cfg0.lenm1_max;
+
+ if (blkaddr == BLKADDR_NIX1)
+ val |= BIT_ULL(46);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
+
+ /* Set SA_IDX_W and SA_IDX_MAX */
+ val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
+ req->ipsec_cfg1.sa_idx_max;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
+
+ /* Set SA base address */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ req->sa_base_addr);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ 0x0);
+ }
+
+ return 0;
+}
+
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
+{
+ bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+
+ /* overwrite vf mac address with default_mac */
+ if (from_vf)
+ ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
+}
+
+/* NIX ingress policers or bandwidth profiles APIs */
+static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
+{
+ struct npc_lt_def_cfg defs, *ltdefs;
+
+ ltdefs = &defs;
+ memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
+
+ /* Extract PCP and DEI fields from outer VLAN from byte offset
+ * 2 from the start of LB_PTR (ie TAG).
+ * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
+ * fields are considered when 'Tunnel enable' is set in profile.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
+ (2UL << 12) | (ltdefs->ovlan.lid << 8) |
+ (ltdefs->ovlan.ltype_match << 4) |
+ ltdefs->ovlan.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
+ (2UL << 12) | (ltdefs->ivlan.lid << 8) |
+ (ltdefs->ivlan.ltype_match << 4) |
+ ltdefs->ivlan.ltype_mask);
+
+ /* DSCP field in outer and tunneled IPv4 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
+ (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
+ (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
+
+ /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
+ (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
+ (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
+}
+
+static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
+ int layer, int prof_idx)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ int rc;
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+
+ aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_INIT;
+
+ /* Context is all zeros, submit to AQ */
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc)
+ dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
+ layer, prof_idx);
+ return rc;
+}
+
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_ipolicer *ipolicer;
+ int err, layer, prof_idx;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ if (!(cfg & BIT_ULL(61))) {
+ hw->cap.ipolicer = false;
+ return 0;
+ }
+
+ hw->cap.ipolicer = true;
+ nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
+ sizeof(*ipolicer), GFP_KERNEL);
+ if (!nix_hw->ipolicer)
+ return -ENOMEM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+ switch (layer) {
+ case BAND_PROF_LEAF_LAYER:
+ ipolicer->band_prof.max = cfg & 0XFFFF;
+ break;
+ case BAND_PROF_MID_LAYER:
+ ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
+ break;
+ case BAND_PROF_TOP_LAYER:
+ ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
+ break;
+ }
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ err = rvu_alloc_bitmap(&ipolicer->band_prof);
+ if (err)
+ return err;
+
+ ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->pfvf_map)
+ return -ENOMEM;
+
+ ipolicer->match_id = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->match_id)
+ return -ENOMEM;
+
+ for (prof_idx = 0;
+ prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ /* Set AF as current owner for INIT ops to succeed */
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+
+ /* There is no enable bit in the profile context,
+ * so no context disable. So let's INIT them here
+ * so that PF/VF later on have to just do WRITE to
+ * setup policer rates and config.
+ */
+ err = nix_init_policer_context(rvu, nix_hw,
+ layer, prof_idx);
+ if (err)
+ return err;
+ }
+
+ /* Allocate memory for maintaining ref_counts for MID level
+ * profiles, this will be needed for leaf layer profiles'
+ * aggregation.
+ */
+ if (layer != BAND_PROF_MID_LAYER)
+ continue;
+
+ ipolicer->ref_count = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->ref_count)
+ return -ENOMEM;
+ }
+
+ /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
+ rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
+
+ nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
+
+ return 0;
+}
+
+static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer;
+
+ if (!rvu->hw->cap.ipolicer)
+ return;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ kfree(ipolicer->band_prof.bmap);
+ }
+}
+
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer, hi_layer, prof_idx;
+
+ /* Bits [15:14] in profile index represent layer */
+ layer = (req->qidx >> 14) & 0x03;
+ prof_idx = req->qidx & 0x3FFF;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ if (prof_idx >= ipolicer->band_prof.max)
+ return -EINVAL;
+
+ /* Check if the profile is allocated to the requesting PCIFUNC or not
+ * with the exception of AF. AF is allowed to read and update contexts.
+ */
+ if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ /* If this profile is linked to higher layer profile then check
+ * if that profile is also allocated to the requesting PCIFUNC
+ * or not.
+ */
+ if (!req->prof.hl_en)
+ return 0;
+
+ /* Leaf layer profile can link only to mid layer and
+ * mid layer to top layer.
+ */
+ if (layer == BAND_PROF_LEAF_LAYER)
+ hi_layer = BAND_PROF_MID_LAYER;
+ else if (layer == BAND_PROF_MID_LAYER)
+ hi_layer = BAND_PROF_TOP_LAYER;
+ else
+ return -EINVAL;
+
+ ipolicer = &nix_hw->ipolicer[hi_layer];
+ prof_idx = req->prof.band_prof_id;
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
+ struct nix_bandprof_alloc_req *req,
+ struct nix_bandprof_alloc_rsp *rsp)
+{
+ int blkaddr, layer, prof, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+
+ prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (prof < 0)
+ break;
+ rsp->prof_count[layer]++;
+ rsp->prof_idx[layer][idx] = prof;
+ ipolicer->pfvf_map[prof] = pcifunc;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, layer, prof_idx, err;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the profiles allocated to the PCIFUNC */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ if (ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ struct nix_bandprof_free_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr, layer, prof_idx, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (req->free_all)
+ return nix_free_all_bandprof(rvu, pcifunc);
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free the requested profile indices */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = pcifunc;
+ aq_req->ctype = ctype;
+ aq_req->op = NIX_AQ_INSTOP_READ;
+ aq_req->qidx = qidx;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u32 leaf_prof, u16 mid_prof)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = 0x00;
+ aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req->op = NIX_AQ_INSTOP_WRITE;
+ aq_req->qidx = leaf_prof;
+
+ aq_req->prof.band_prof_id = mid_prof;
+ aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
+ aq_req->prof.hl_en = 1;
+ aq_req->prof_mask.hl_en = 1;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id)
+{
+ int leaf_prof, mid_prof, leaf_match;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, rc;
+
+ if (!rvu->hw->cap.ipolicer)
+ return 0;
+
+ rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (rc)
+ return rc;
+
+ /* Fetch the RQ's context to see if policing is enabled */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
+ NIX_AQ_CTYPE_RQ, rq_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
+ __func__, rq_idx, pcifunc);
+ return rc;
+ }
+
+ if (!aq_rsp.rq.policer_ena)
+ return 0;
+
+ /* Get the bandwidth profile ID mapped to this RQ */
+ leaf_prof = aq_rsp.rq.band_prof_id;
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
+ ipolicer->match_id[leaf_prof] = match_id;
+
+ /* Check if any other leaf profile is marked with same match_id */
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (idx == leaf_prof)
+ continue;
+ if (ipolicer->match_id[idx] != match_id)
+ continue;
+
+ leaf_match = idx;
+ break;
+ }
+
+ if (idx == ipolicer->band_prof.max)
+ return 0;
+
+ /* Fetch the matching profile's context to check if it's already
+ * mapped to a mid level profile.
+ */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_match);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_match);
+ return rc;
+ }
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ if (aq_rsp.prof.hl_en) {
+ /* Get Mid layer prof index and map leaf_prof index
+ * also such that flows that are being steered
+ * to different RQs and marked with same match_id
+ * are rate limited in a aggregate fashion
+ */
+ mid_prof = aq_rsp.prof.band_prof_id;
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+
+ /* Allocate a mid layer profile and
+ * map both 'leaf_prof' and 'leaf_match' profiles to it.
+ */
+ mutex_lock(&rvu->rsrc_lock);
+ mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (mid_prof < 0) {
+ dev_err(rvu->dev,
+ "%s: Unable to allocate mid layer profile\n", __func__);
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ ipolicer->ref_count[mid_prof] = 0;
+
+ /* Initialize mid layer profile same as 'leaf_prof' */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ goto exit;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req.hdr.pcifunc = 0x00;
+ aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+ memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
+ /* Clear higher layer enable bit in the mid profile, just in case */
+ aq_req.prof.hl_en = 0;
+ aq_req.prof_mask.hl_en = 1;
+
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to INIT context of mid layer profile %d\n",
+ __func__, mid_prof);
+ goto exit;
+ }
+
+ /* Map both leaf profiles to this mid layer profile */
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_match, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_match, mid_prof);
+ ipolicer->ref_count[mid_prof]--;
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+exit:
+ return rc;
+}
+
+/* Called with mutex rsrc_lock */
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ u16 mid_prof;
+ int rc;
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ return;
+ }
+
+ if (!aq_rsp.prof.hl_en)
+ return;
+
+ mid_prof = aq_rsp.prof.band_prof_id;
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ ipolicer->ref_count[mid_prof]--;
+ /* If ref_count is zero, free mid layer profile */
+ if (!ipolicer->ref_count[mid_prof]) {
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
+ }
+}
+
+int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
+ struct nix_bandprof_get_hwinfo_rsp *rsp)
+{
+ struct nix_ipolicer *ipolicer;
+ int blkaddr, layer, err;
+ struct nix_hw *nix_hw;
+ u64 tu;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ /* Return number of bandwidth profiles free at each layer */
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ /* Set the policer timeunit in nanosec */
+ tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
+ rsp->policer_timeunit = (tu + 1) * 100;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
new file mode 100644
index 000000000..4f5ca5ab1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+
+static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ struct npa_aq_inst_s *inst)
+{
+ struct admin_queue *aq = block->aq;
+ struct npa_aq_res_s *result;
+ int timeout = 1000;
+ u64 reg, head;
+
+ result = (struct npa_aq_res_s *)aq->res->base;
+
+ /* Get current head pointer where to append this instruction */
+ reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
+ head = (reg >> 4) & AQ_PTR_MASK;
+
+ memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+ (void *)inst, aq->inst->entry_sz);
+ memset(result, 0, sizeof(*result));
+ /* sync into memory */
+ wmb();
+
+ /* Ring the doorbell and wait for result */
+ rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
+ while (result->compcode == NPA_AQ_COMP_NOTDONE) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ }
+
+ if (result->compcode != NPA_AQ_COMP_GOOD) {
+ /* TODO: Replace this with some error code */
+ if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
+ result->compcode == NPA_AQ_COMP_LOCKERR ||
+ result->compcode == NPA_AQ_COMP_CTX_POISON) {
+ if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, npalf, rc = 0;
+ struct npa_aq_inst_s inst;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+ struct rvu_pfvf *pfvf;
+ void *ctx, *mask;
+ bool ena;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
+ return NPA_AF_ERR_AQ_ENQUEUE;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ if (!aq) {
+ dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
+ return NPA_AF_ERR_AQ_ENQUEUE;
+ }
+
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ memset(&inst, 0, sizeof(struct npa_aq_inst_s));
+ inst.cindex = req->aura_id;
+ inst.lf = npalf;
+ inst.ctype = req->ctype;
+ inst.op = req->op;
+ /* Currently we are not supporting enqueuing multiple instructions,
+ * so always choose first entry in result memory.
+ */
+ inst.res_addr = (u64)aq->res->iova;
+
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
+ /* Clean result + context memory */
+ memset(aq->res->base, 0, aq->res->entry_sz);
+ /* Context needs to be written at RES_ADDR + 128 */
+ ctx = aq->res->base + 128;
+ /* Mask needs to be written at RES_ADDR + 256 */
+ mask = aq->res->base + 256;
+
+ switch (req->op) {
+ case NPA_AQ_INSTOP_WRITE:
+ /* Copy context and write mask */
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ memcpy(mask, &req->aura_mask,
+ sizeof(struct npa_aura_s));
+ memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+ } else {
+ memcpy(mask, &req->pool_mask,
+ sizeof(struct npa_pool_s));
+ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+ }
+ break;
+ case NPA_AQ_INSTOP_INIT:
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
+ rc = NPA_AF_ERR_AQ_FULL;
+ break;
+ }
+ /* Set pool's context address */
+ req->aura.pool_addr = pfvf->pool_ctx->iova +
+ (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
+ memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+ } else { /* POOL's context */
+ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+ }
+ break;
+ case NPA_AQ_INSTOP_NOP:
+ case NPA_AQ_INSTOP_READ:
+ case NPA_AQ_INSTOP_LOCK:
+ case NPA_AQ_INSTOP_UNLOCK:
+ break;
+ default:
+ rc = NPA_AF_ERR_AQ_FULL;
+ break;
+ }
+
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Submit the instruction to AQ */
+ rc = npa_aq_enqueue_wait(rvu, block, &inst);
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Set aura bitmap if aura hw context is enabled */
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
+ __set_bit(req->aura_id, pfvf->aura_bmap);
+ if (req->op == NPA_AQ_INSTOP_WRITE) {
+ ena = (req->aura.ena & req->aura_mask.ena) |
+ (test_bit(req->aura_id, pfvf->aura_bmap) &
+ ~req->aura_mask.ena);
+ if (ena)
+ __set_bit(req->aura_id, pfvf->aura_bmap);
+ else
+ __clear_bit(req->aura_id, pfvf->aura_bmap);
+ }
+ }
+
+ /* Set pool bitmap if pool hw context is enabled */
+ if (req->ctype == NPA_AQ_CTYPE_POOL) {
+ if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
+ __set_bit(req->aura_id, pfvf->pool_bmap);
+ if (req->op == NPA_AQ_INSTOP_WRITE) {
+ ena = (req->pool.ena & req->pool_mask.ena) |
+ (test_bit(req->aura_id, pfvf->pool_bmap) &
+ ~req->pool_mask.ena);
+ if (ena)
+ __set_bit(req->aura_id, pfvf->pool_bmap);
+ else
+ __clear_bit(req->aura_id, pfvf->pool_bmap);
+ }
+ }
+ spin_unlock(&aq->lock);
+
+ if (rsp) {
+ /* Copy read context into mailbox */
+ if (req->op == NPA_AQ_INSTOP_READ) {
+ if (req->ctype == NPA_AQ_CTYPE_AURA)
+ memcpy(&rsp->aura, ctx,
+ sizeof(struct npa_aura_s));
+ else
+ memcpy(&rsp->pool, ctx,
+ sizeof(struct npa_pool_s));
+ }
+ }
+
+ return 0;
+}
+
+static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct npa_aq_enq_req aq_req;
+ unsigned long *bmap;
+ int id, cnt = 0;
+ int err = 0, rc;
+
+ if (!pfvf->pool_ctx || !pfvf->aura_ctx)
+ return NPA_AF_ERR_AQ_ENQUEUE;
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+ if (req->ctype == NPA_AQ_CTYPE_POOL) {
+ aq_req.pool.ena = 0;
+ aq_req.pool_mask.ena = 1;
+ cnt = pfvf->pool_ctx->qsize;
+ bmap = pfvf->pool_bmap;
+ } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ aq_req.aura.ena = 0;
+ aq_req.aura_mask.ena = 1;
+ aq_req.aura.bp_ena = 0;
+ aq_req.aura_mask.bp_ena = 1;
+ cnt = pfvf->aura_ctx->qsize;
+ bmap = pfvf->aura_bmap;
+ }
+
+ aq_req.ctype = req->ctype;
+ aq_req.op = NPA_AQ_INSTOP_WRITE;
+
+ for (id = 0; id < cnt; id++) {
+ if (!test_bit(id, bmap))
+ continue;
+ aq_req.aura_id = id;
+ rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
+ if (rc) {
+ err = rc;
+ dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", id);
+ }
+ }
+
+ return err;
+}
+
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
+{
+ struct npa_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NPA_AQ_INSTOP_INIT)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
+ lock_ctx_req.aura_id = req->aura_id;
+ err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
+ req->hdr.pcifunc,
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", req->aura_id);
+ return err;
+}
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_npa_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = npa_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ return rvu_npa_aq_enq_inst(rvu, req, rsp);
+}
+#endif
+
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp)
+{
+ return npa_lf_hwctx_disable(rvu, req);
+}
+
+static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+ kfree(pfvf->aura_bmap);
+ pfvf->aura_bmap = NULL;
+
+ qmem_free(rvu->dev, pfvf->aura_ctx);
+ pfvf->aura_ctx = NULL;
+
+ kfree(pfvf->pool_bmap);
+ pfvf->pool_bmap = NULL;
+
+ qmem_free(rvu->dev, pfvf->pool_ctx);
+ pfvf->pool_ctx = NULL;
+
+ qmem_free(rvu->dev, pfvf->npa_qints_ctx);
+ pfvf->npa_qints_ctx = NULL;
+}
+
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
+ struct npa_lf_alloc_req *req,
+ struct npa_lf_alloc_rsp *rsp)
+{
+ int npalf, qints, hwctx_size, err, rc = 0;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, ctx_cfg;
+ int blkaddr;
+
+ if (req->aura_sz > NPA_AURA_SZ_MAX ||
+ req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
+ return NPA_AF_ERR_PARAM;
+
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Reset this NPA LF */
+ err = rvu_lf_reset(rvu, block, npalf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+ return NPA_AF_ERR_LF_RESET;
+ }
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
+
+ /* Alloc memory for aura HW contexts */
+ hwctx_size = 1UL << (ctx_cfg & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
+ NPA_AURA_COUNT(req->aura_sz), hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+ GFP_KERNEL);
+ if (!pfvf->aura_bmap)
+ goto free_mem;
+
+ /* Alloc memory for pool HW contexts */
+ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+ GFP_KERNEL);
+ if (!pfvf->pool_bmap)
+ goto free_mem;
+
+ /* Get no of queue interrupts supported */
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+ qints = (cfg >> 28) & 0xFFF;
+
+ /* Alloc memory for Qints HW contexts */
+ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
+ /* Clear way partition mask and set aura offset to '0' */
+ cfg &= ~(BIT_ULL(34) - 1);
+ /* Set aura size & enable caching of contexts */
+ cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
+
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
+
+ /* Configure aura HW context's base */
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
+ (u64)pfvf->aura_ctx->iova);
+
+ /* Enable caching of qints hw context */
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
+ BIT_ULL(36) | req->way_mask << 20);
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
+ (u64)pfvf->npa_qints_ctx->iova);
+
+ goto exit;
+
+free_mem:
+ npa_ctx_free(rvu, pfvf);
+ rc = -ENOMEM;
+
+exit:
+ /* set stack page info */
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+ rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
+ rsp->stack_pg_bytes = cfg & 0xFF;
+ rsp->qints = (cfg >> 28) & 0xFFF;
+ if (!is_rvu_otx2(rvu)) {
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
+ rsp->cache_lines = (cfg >> 1) & 0x3F;
+ }
+ return rc;
+}
+
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int npalf, err;
+ int blkaddr;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Reset this NPA LF */
+ err = rvu_lf_reset(rvu, block, npalf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+ return NPA_AF_ERR_LF_RESET;
+ }
+
+ npa_ctx_free(rvu, pfvf);
+
+ return 0;
+}
+
+static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+ int err;
+
+ /* Set admin queue endianness */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
+#ifdef __BIG_ENDIAN
+ cfg |= BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#else
+ cfg &= ~BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#endif
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
+ cfg &= ~0x03DULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of stack pages */
+ cfg |= 0x10ULL;
+#endif
+ rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+
+ /* For CN10K NPA BATCH DMA set 35 cache lines */
+ if (!is_rvu_otx2(rvu)) {
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
+ cfg &= ~0x7EULL;
+ cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
+ }
+ /* Result structure can be followed by Aura/Pool context at
+ * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+ * operation type. Alloc sufficient result memory for all operations.
+ */
+ err = rvu_aq_alloc(rvu, &block->aq,
+ Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
+ ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
+ rvu_write64(rvu, block->addr,
+ NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
+ return 0;
+}
+
+int rvu_npa_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ /* Initialize admin queue */
+ return npa_aq_init(rvu, &hw->block[blkaddr]);
+}
+
+void rvu_npa_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ rvu_aq_free(rvu, block->aq);
+}
+
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct hwctx_disable_req ctx_req;
+
+ /* Disable all pools */
+ ctx_req.hdr.pcifunc = pcifunc;
+ ctx_req.ctype = NPA_AQ_CTYPE_POOL;
+ npa_lf_hwctx_disable(rvu, &ctx_req);
+
+ /* Disable all auras */
+ ctx_req.ctype = NPA_AQ_CTYPE_AURA;
+ npa_lf_hwctx_disable(rvu, &ctx_req);
+
+ npa_ctx_free(rvu, pfvf);
+}
+
+/* Due to an Hardware errata, in some corner cases, AQ context lock
+ * operations can result in a NDC way getting into an illegal state
+ * of not valid but locked.
+ *
+ * This API solves the problem by clearing the lock bit of the NDC block.
+ * The operation needs to be done for each line of all the NDC banks.
+ */
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
+{
+ int bank, max_bank, line, max_line, err;
+ u64 reg, ndc_af_const;
+
+ /* Set the ENABLE bit(63) to '0' */
+ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
+ rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
+
+ /* Poll until the BUSY bits(47:32) are set to '0' */
+ err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
+ if (err) {
+ dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
+ return err;
+ }
+
+ ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+ max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
+ for (bank = 0; bank < max_bank; bank++) {
+ for (line = 0; line < max_line; line++) {
+ /* Check if 'cache line valid bit(63)' is not set
+ * but 'cache line lock bit(60)' is set and on
+ * success, reset the lock bit(60).
+ */
+ reg = rvu_read64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line));
+ if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
+ rvu_write64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line),
+ reg & ~BIT_ULL(60));
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
new file mode 100644
index 000000000..0bcf3e559
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -0,0 +1,3463 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+#include "npc_profile.h"
+#include "rvu_npc_hash.h"
+
+#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
+#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
+
+#define NPC_PARSE_RESULT_DMAC_OFFSET 8
+#define NPC_HW_TSTAMP_OFFSET 8ULL
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
+
+#define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8))
+
+static const char def_pfl_name[] = "default";
+
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc);
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+ u16 pcifunc);
+
+bool is_npc_intf_tx(u8 intf)
+{
+ return !!(intf & 0x1);
+}
+
+bool is_npc_intf_rx(u8 intf)
+{
+ return !(intf & 0x1);
+}
+
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ return intf < hw->npc_intfs;
+}
+
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
+{
+ /* Due to a HW issue in these silicon versions, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (is_rvu_96xx_B0(rvu))
+ return nibble_ena;
+ return 0;
+}
+
+static int npc_mcam_verify_pf_func(struct rvu *rvu,
+ struct mcam_entry *entry_data, u8 intf,
+ u16 pcifunc)
+{
+ u16 pf_func, pf_func_mask;
+
+ if (is_npc_intf_rx(intf))
+ return 0;
+
+ pf_func_mask = (entry_data->kw_mask[0] >> 32) &
+ NPC_KEX_PF_FUNC_MASK;
+ pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
+
+ pf_func = be16_to_cpu((__force __be16)pf_func);
+ if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
+ ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
+ (pcifunc & ~RVU_PFVF_FUNC_MASK)))
+ return -EINVAL;
+
+ return 0;
+}
+
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
+{
+ int blkaddr;
+ u64 val = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Config CPI base for the PKIND */
+ val = pkind | 1ULL << 62;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val);
+}
+
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ u32 map;
+ int i;
+
+ for (i = 0; i < pkind->rsrc.max; i++) {
+ map = pkind->pfchan_map[i];
+ if (((map >> 16) & 0x3F) == pf)
+ return i;
+ }
+ return -1;
+}
+
+#define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20)
+
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
+{
+ int pkind, blkaddr;
+ u64 val;
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev, "%s: pkind not mapped\n", __func__);
+ return -EINVAL;
+ }
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ val &= ~NPC_AF_ACTION0_PTR_ADVANCE;
+ /* If timestamp is enabled then configure NPC to shift 8 bytes */
+ if (enable)
+ val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE,
+ NPC_HW_TSTAMP_OFFSET);
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+
+ return 0;
+}
+
+static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
+ int nixlf)
+{
+ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam);
+ struct rvu *rvu = hw->rvu;
+ int blkaddr = 0, max = 0;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* Given a PF/VF and NIX LF number calculate the unicast mcam
+ * entry index based on the NIX block assigned to the PF/VF.
+ */
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ if (pfvf->nix_blkaddr == blkaddr)
+ break;
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+
+ return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF;
+}
+
+int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
+ u16 pcifunc, int nixlf, int type)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int index;
+
+ /* Check if this is for a PF */
+ if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Reserved entries exclude PF0 */
+ pf--;
+ index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF);
+ /* Broadcast address matching entry should be first so
+ * that the packet can be replicated to all VFs.
+ */
+ if (type == NIXLF_BCAST_ENTRY)
+ return index;
+ else if (type == NIXLF_ALLMULTI_ENTRY)
+ return index + 1;
+ else if (type == NIXLF_PROMISC_ENTRY)
+ return index + 2;
+ }
+
+ return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf);
+}
+
+int npc_get_bank(struct npc_mcam *mcam, int index)
+{
+ int bank = index / mcam->banksize;
+
+ /* 0,1 & 2,3 banks are combined for this keysize */
+ if (mcam->keysize == NPC_MCAM_KEY_X2)
+ return bank ? 2 : 0;
+
+ return bank;
+}
+
+bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+ u64 cfg;
+
+ index &= (mcam->banksize - 1);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank));
+ return (cfg & 1);
+}
+
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
+{
+ int bank = npc_get_bank(mcam, index);
+ int actbank = bank;
+
+ index &= (mcam->banksize - 1);
+ for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(index, bank),
+ enable ? 1 : 0);
+ }
+}
+
+static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+ int actbank = bank;
+
+ index &= (mcam->banksize - 1);
+ for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0);
+ }
+}
+
+static void npc_get_keyword(struct mcam_entry *entry, int idx,
+ u64 *cam0, u64 *cam1)
+{
+ u64 kw_mask = 0x00;
+
+#define CAM_MASK(n) (BIT_ULL(n) - 1)
+
+ /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and
+ * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1.
+ *
+ * Also, only 48 bits of BANKX_CAMX_W1 are valid.
+ */
+ switch (idx) {
+ case 0:
+ /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */
+ *cam1 = entry->kw[0];
+ kw_mask = entry->kw_mask[0];
+ break;
+ case 1:
+ /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */
+ *cam1 = entry->kw[1] & CAM_MASK(48);
+ kw_mask = entry->kw_mask[1] & CAM_MASK(48);
+ break;
+ case 2:
+ /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48>
+ * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0>
+ */
+ *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16);
+ *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16);
+ kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16);
+ kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16);
+ break;
+ case 3:
+ /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48>
+ * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0>
+ */
+ *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16);
+ *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16);
+ kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16);
+ kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16);
+ break;
+ case 4:
+ /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32>
+ * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0>
+ */
+ *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32);
+ *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32);
+ kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32);
+ kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32);
+ break;
+ case 5:
+ /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32>
+ * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0>
+ */
+ *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32);
+ *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32);
+ kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32);
+ kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32);
+ break;
+ case 6:
+ /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16>
+ * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0>
+ */
+ *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48);
+ *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48);
+ kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48);
+ kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48);
+ break;
+ case 7:
+ /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */
+ *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48);
+ kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48);
+ break;
+ }
+
+ *cam1 &= kw_mask;
+ *cam0 = ~*cam1 & kw_mask;
+}
+
+static void npc_fill_entryword(struct mcam_entry *entry, int idx,
+ u64 cam0, u64 cam1)
+{
+ /* Similar to npc_get_keyword, but fills mcam_entry structure from
+ * CAM registers.
+ */
+ switch (idx) {
+ case 0:
+ entry->kw[0] = cam1;
+ entry->kw_mask[0] = cam1 ^ cam0;
+ break;
+ case 1:
+ entry->kw[1] = cam1;
+ entry->kw_mask[1] = cam1 ^ cam0;
+ break;
+ case 2:
+ entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[2] = (cam1 >> 16) & CAM_MASK(48);
+ entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48);
+ break;
+ case 3:
+ entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[3] = (cam1 >> 16) & CAM_MASK(32);
+ entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32);
+ break;
+ case 4:
+ entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[4] = (cam1 >> 32) & CAM_MASK(32);
+ entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32);
+ break;
+ case 5:
+ entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[5] = (cam1 >> 32) & CAM_MASK(16);
+ entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16);
+ break;
+ case 6:
+ entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw[6] = (cam1 >> 48) & CAM_MASK(16);
+ entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16);
+ break;
+ case 7:
+ entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ break;
+ }
+}
+
+static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pf_func)
+{
+ int bank, nixlf, index;
+
+ /* get ucast entry rule entry index */
+ if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) {
+ dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n",
+ __func__, pf_func);
+ /* Action 0 is drop */
+ return 0;
+ }
+
+ index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
+ NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, struct mcam_entry *entry,
+ bool *enable)
+{
+ struct rvu_npc_mcam_rule *rule;
+ u16 owner, target_func;
+ struct rvu_pfvf *pfvf;
+ u64 rx_action;
+
+ owner = mcam->entry2pfvf_map[index];
+ target_func = (entry->action >> 4) & 0xffff;
+ /* do nothing when target is LBK/PF or owner is not PF */
+ if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ (owner & RVU_PFVF_FUNC_MASK) ||
+ !(target_func & RVU_PFVF_FUNC_MASK))
+ return;
+
+ /* save entry2target_pffunc */
+ pfvf = rvu_get_pfvf(rvu, target_func);
+ mcam->entry2target_pffunc[index] = target_func;
+
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target_func) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ *enable = false;
+
+ /* fix up not needed for the rules added by user(ntuple filters) */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ return;
+ }
+
+ /* copy VF default entry action to the VF mcam entry */
+ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
+ target_func);
+ if (rx_action)
+ entry->action = rx_action;
+}
+
+static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u8 intf,
+ struct mcam_entry *entry, bool enable)
+{
+ int bank = npc_get_bank(mcam, index);
+ int kw = 0, actbank, actindex;
+ u8 tx_intf_mask = ~intf & 0x3;
+ u8 tx_intf = intf;
+ u64 cam0, cam1;
+
+ actbank = bank; /* Save bank id, to set action later on */
+ actindex = index;
+ index &= (mcam->banksize - 1);
+
+ /* Disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
+
+ /* Clear mcam entry to avoid writes being suppressed by NPC */
+ npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex);
+
+ /* CAM1 takes the comparison value and
+ * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
+ * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
+ * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1
+ * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare.
+ */
+ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ /* Interface should be set in all banks */
+ if (is_npc_intf_tx(intf)) {
+ /* Last bit must be set and rest don't care
+ * for TX interfaces
+ */
+ tx_intf_mask = 0x1;
+ tx_intf = intf & tx_intf_mask;
+ tx_intf_mask = ~tx_intf & tx_intf_mask;
+ }
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
+ tx_intf);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
+ tx_intf_mask);
+
+ /* Set the match key */
+ npc_get_keyword(entry, kw, &cam0, &cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0);
+
+ npc_get_keyword(entry, kw + 1, &cam0, &cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
+ }
+
+ /* PF installing VF rule */
+ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
+
+ /* Set 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
+
+ /* Set TAG 'action' */
+ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank),
+ entry->vtag_action);
+
+ /* Enable the entry */
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
+}
+
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src,
+ struct mcam_entry *entry, u8 *intf, u8 *ena)
+{
+ int sbank = npc_get_bank(mcam, src);
+ int bank, kw = 0;
+ u64 cam0, cam1;
+
+ src &= (mcam->banksize - 1);
+ bank = sbank;
+
+ for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0));
+ npc_fill_entryword(entry, kw, cam0, cam1);
+
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0));
+ npc_fill_entryword(entry, kw + 1, cam0, cam1);
+ }
+
+ entry->action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ entry->vtag_action =
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ *intf = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3;
+ *ena = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1;
+}
+
+static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, u16 dest)
+{
+ int dbank = npc_get_bank(mcam, dest);
+ int sbank = npc_get_bank(mcam, src);
+ u64 cfg, sreg, dreg;
+ int bank, i;
+
+ src &= (mcam->banksize - 1);
+ dest &= (mcam->banksize - 1);
+
+ /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */
+ for (bank = 0; bank < mcam->banks_per_entry; bank++) {
+ sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0);
+ dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0);
+ for (i = 0; i < 6; i++) {
+ cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8));
+ rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg);
+ }
+ }
+
+ /* Copy action */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg);
+
+ /* Copy TAG action */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg);
+
+ /* Enable or disable */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
+}
+
+static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+
+ index &= (mcam->banksize - 1);
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 *mac_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action action = { 0 };
+ int blkaddr, index;
+
+ /* AF's and SDP VFs work in promiscuous mode */
+ if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Ucast rule should not be installed if DMAC
+ * extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf))
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ /* Don't change the action if entry is already enabled
+ * Otherwise RSS action may get overwritten.
+ */
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, index);
+ } else {
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
+
+ req.default_rule = 1;
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.chan_mask = 0xFFFU;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = action.pf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 chan_cnt)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, ucast_idx, index;
+ struct nix_rx_action action = { 0 };
+ u64 relaxed_mask;
+ u8 flow_key_alg;
+
+ if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ if (is_cgx_vf(rvu, pcifunc))
+ index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for promisc also
+ */
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ }
+
+ flow_key_alg = action.flow_key_alg;
+
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ *(u64 *)&action = 0;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ action.index = pfvf->promisc_mce_idx;
+ }
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu)) {
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ } else {
+ req.chan_mask = 0xFFFU;
+ }
+
+ if (chan_cnt > 1) {
+ if (!is_power_of_2(chan_cnt)) {
+ dev_err(rvu->dev,
+ "%s: channel count more than 1, must be power of 2\n", __func__);
+ return;
+ }
+ relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1,
+ ilog2(chan_cnt));
+ req.chan_mask &= relaxed_mask;
+ }
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan)
+{
+ struct rvu_pfvf *pfvf;
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Skip LBK VFs */
+ if (is_afvf(pcifunc))
+ return;
+
+ /* If pkt replication is not supported,
+ * then only PF is allowed to add a bcast match entry.
+ */
+ if (!hw->cap.nix_rx_multicast && is_vf(pcifunc))
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Bcast rule should not be installed if both DMAC
+ * and LXMB extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
+ !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf))
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+
+ if (!hw->cap.nix_rx_multicast) {
+ /* Early silicon doesn't support pkt replication,
+ * so install entry with UCAST action, so that PF
+ * receives all broadcast packets.
+ */
+ req.op = NIX_RX_ACTIONOP_UCAST;
+ } else {
+ req.op = NIX_RX_ACTIONOP_MCAST;
+ req.index = pfvf->bcast_mce_idx;
+ }
+
+ eth_broadcast_addr((u8 *)&req.packet.dmac);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.chan_mask = 0xFFFU;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, ucast_idx, index;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ struct nix_rx_action action = { 0 };
+ struct rvu_pfvf *pfvf;
+ u8 flow_key_alg;
+ u16 vf_func;
+
+ /* Only CGX PF/VF can add allmulticast entry */
+ if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ vf_func = pcifunc & RVU_PFVF_FUNC_MASK;
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Mcast rule should not be installed if both DMAC
+ * and LXMB extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
+ !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf))
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for multicast entry also
+ */
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ flow_key_alg = action.flow_key_alg;
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
+
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
+ *(u64 *)&action = 0;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ action.index = pfvf->mcast_mce_idx;
+ }
+
+ mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ ether_addr_copy(req.mask.dmac, mac_addr);
+ req.features = BIT_ULL(NPC_DMAC);
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu))
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ else
+ req.chan_mask = 0xFFFU;
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc | vf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc, u64 rx_action)
+{
+ int actindex, index, bank, entry;
+ struct rvu_npc_mcam_rule *rule;
+ bool enable, update;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc) {
+ update = true;
+ /* update not needed for the rules added via ntuple filters */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ update = false;
+ }
+ if (!update)
+ continue;
+ bank = npc_get_bank(mcam, index);
+ actindex = index;
+ entry = index & (mcam->banksize - 1);
+
+ /* read vf flow entry enable status */
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr,
+ actindex);
+ /* disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex,
+ false);
+ /* update 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(entry, bank),
+ rx_action);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ actindex, true);
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action,
+ struct rvu_pfvf *pfvf, int mcam_index, int blkaddr,
+ int alg_idx)
+
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int bank, op_rss;
+
+ if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index))
+ return;
+
+ op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list);
+
+ bank = npc_get_bank(mcam, mcam_index);
+ mcam_index &= (mcam->banksize - 1);
+
+ /* If Rx action is MCAST update only RSS algorithm index */
+ if (!op_rss) {
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank));
+
+ action.flow_key_alg = alg_idx;
+ }
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action);
+}
+
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action action;
+ int blkaddr, index, bank;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Check if this is for reserved default entry */
+ if (mcam_index < 0) {
+ if (group != DEFAULT_RSS_CONTEXT_GROUP)
+ return;
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ } else {
+ /* TODO: validate this mcam index */
+ index = mcam_index;
+ }
+
+ if (index >= mcam->total_entries)
+ return;
+
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ /* Ignore if no action was set earlier */
+ if (!*(u64 *)&action)
+ return;
+
+ action.op = NIX_RX_ACTIONOP_RSS;
+ action.pf_func = pcifunc;
+ action.index = group;
+ action.flow_key_alg = alg_idx;
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+
+ /* update the VF flow rule action with the VF default entry action */
+ if (mcam_index < 0)
+ npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc,
+ *(u64 *)&action);
+
+ /* update the action change in default rule */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->def_ucast_rule)
+ pfvf->def_ucast_rule->rx_action = action;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ /* If PF's promiscuous entry is enabled,
+ * Set RSS action for that entry as well
+ */
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
+ alg_idx);
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+ /* If PF's allmulti entry is enabled,
+ * Set RSS action for that entry as well
+ */
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
+ alg_idx);
+}
+
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int index, blkaddr, mce_idx;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+
+ /* disable MCAM entry when packet replication is not supported by hw */
+ if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ return;
+ }
+
+ /* return incase mce list is not enabled */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ if (hw->cap.nix_rx_multicast && is_vf(pcifunc) &&
+ type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list)
+ return;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, index, enable);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int index, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Ucast MCAM match entry of this PF/VF */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+
+ /* Nothing to do for VFs, on platforms where pkt replication
+ * is not supported
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast)
+ return;
+
+ /* add/delete pf_func to broadcast MCE list */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY, enable);
+}
+
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ if (nixlf < 0)
+ return;
+
+ npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+
+ /* Delete multicast and promisc MCAM entries */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY, false);
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_PROMISC_ENTRY, false);
+}
+
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+
+ mutex_lock(&mcam->lock);
+
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->intf != intf)
+ continue;
+
+ if (rule->entry != entry)
+ continue;
+
+ rule->enable = enable;
+ mutex_unlock(&mcam->lock);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ entry, enable);
+
+ return true;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return false;
+}
+
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ if (nixlf < 0)
+ return;
+
+ /* Enables only broadcast match entry. Promisc/Allmulti are enabled
+ * in set_rx_mode mbox handler.
+ */
+ npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
+}
+
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Disable MCAM entries directing traffic to this 'pcifunc' */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == pcifunc &&
+ rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, false);
+ rule->enable = false;
+ /* Indicate that default rule is disabled */
+ if (rule->default_rule) {
+ pfvf->def_ucast_rule = NULL;
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+
+ npc_mcam_disable_flows(rvu, pcifunc);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Free all MCAM entries owned by this 'pcifunc' */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+
+ /* Free all MCAM counters owned by this 'pcifunc' */
+ npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+
+ /* Delete MCAM entries owned by this 'pcifunc' */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->owner == pcifunc && !rule->default_rule) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex, u8 intf)
+{
+ int lid, lt, ld, fl;
+
+ if (is_npc_intf_tx(intf))
+ return;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_RX]);
+
+ /* Program LDATA */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_RX]
+ [lid][lt][ld]);
+ }
+ }
+ /* Program LFLAGS */
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
+ mkex->intf_ld_flags[NIX_INTF_RX]
+ [ld][fl]);
+ }
+}
+
+static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex, u8 intf)
+{
+ int lid, lt, ld, fl;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_TX]);
+
+ /* Program LDATA */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_TX]
+ [lid][lt][ld]);
+ }
+ }
+ /* Program LFLAGS */
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
+ mkex->intf_ld_flags[NIX_INTF_TX]
+ [ld][fl]);
+ }
+}
+
+static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+ int ld;
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
+ mkex->kex_ld_flags[ld]);
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_rx(rvu, blkaddr, mkex, intf);
+ npc_program_mkex_tx(rvu, blkaddr, mkex, intf);
+ }
+
+ /* Programme mkex hash profile */
+ npc_program_mkex_hash(rvu, blkaddr);
+}
+
+static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
+ u64 *size)
+{
+ u64 prfl_addr, prfl_sz;
+
+ if (!rvu->fwdata)
+ return -EINVAL;
+
+ prfl_addr = rvu->fwdata->mcam_addr;
+ prfl_sz = rvu->fwdata->mcam_sz;
+
+ if (!prfl_addr || !prfl_sz)
+ return -EINVAL;
+
+ *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz);
+ if (!(*prfl_img_addr))
+ return -ENOMEM;
+
+ *size = prfl_sz;
+
+ return 0;
+}
+
+/* strtoull of "mkexprof" with base:36 */
+#define MKEX_END_SIGN 0xdeadbeef
+
+static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
+ const char *mkex_profile)
+{
+ struct device *dev = &rvu->pdev->dev;
+ struct npc_mcam_kex *mcam_kex;
+ void __iomem *mkex_prfl_addr = NULL;
+ u64 prfl_sz;
+ int ret;
+
+ /* If user not selected mkex profile */
+ if (rvu->kpu_fwdata_sz ||
+ !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
+ goto program_mkex;
+
+ /* Setting up the mapping for mkex profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz);
+ if (ret < 0)
+ goto program_mkex;
+
+ mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr;
+
+ while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
+ /* Compare with mkex mod_param name string */
+ if (mcam_kex->mkex_sign == MKEX_SIGN &&
+ !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
+ /* Due to an errata (35786) in A0/B0 pass silicon,
+ * parse nibble enable configuration has to be
+ * identical for both Rx and Tx interfaces.
+ */
+ if (!is_rvu_96xx_B0(rvu) ||
+ mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX])
+ rvu->kpu.mkex = mcam_kex;
+ goto program_mkex;
+ }
+
+ mcam_kex++;
+ prfl_sz -= sizeof(struct npc_mcam_kex);
+ }
+ dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile);
+
+program_mkex:
+ dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name);
+ /* Program selected mkex profile */
+ npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex);
+ if (mkex_prfl_addr)
+ iounmap(mkex_prfl_addr);
+}
+
+static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
+ const struct npc_kpu_profile_action *kpuaction,
+ int kpu, int entry, bool pkind)
+{
+ struct npc_kpu_action0 action0 = {0};
+ struct npc_kpu_action1 action1 = {0};
+ u64 reg;
+
+ action1.errlev = kpuaction->errlev;
+ action1.errcode = kpuaction->errcode;
+ action1.dp0_offset = kpuaction->dp0_offset;
+ action1.dp1_offset = kpuaction->dp1_offset;
+ action1.dp2_offset = kpuaction->dp2_offset;
+
+ if (pkind)
+ reg = NPC_AF_PKINDX_ACTION1(entry);
+ else
+ reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry);
+
+ rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1);
+
+ action0.byp_count = kpuaction->bypass_count;
+ action0.capture_ena = kpuaction->cap_ena;
+ action0.parse_done = kpuaction->parse_done;
+ action0.next_state = kpuaction->next_state;
+ action0.capture_lid = kpuaction->lid;
+ action0.capture_ltype = kpuaction->ltype;
+ action0.capture_flags = kpuaction->flags;
+ action0.ptr_advance = kpuaction->ptr_advance;
+ action0.var_len_offset = kpuaction->offset;
+ action0.var_len_mask = kpuaction->mask;
+ action0.var_len_right = kpuaction->right;
+ action0.var_len_shift = kpuaction->shift;
+
+ if (pkind)
+ reg = NPC_AF_PKINDX_ACTION0(entry);
+ else
+ reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry);
+
+ rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0);
+}
+
+static void npc_config_kpucam(struct rvu *rvu, int blkaddr,
+ const struct npc_kpu_profile_cam *kpucam,
+ int kpu, int entry)
+{
+ struct npc_kpu_cam cam0 = {0};
+ struct npc_kpu_cam cam1 = {0};
+
+ cam1.state = kpucam->state & kpucam->state_mask;
+ cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask;
+ cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask;
+ cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask;
+
+ cam0.state = ~kpucam->state & kpucam->state_mask;
+ cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask;
+ cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask;
+ cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask;
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1);
+}
+
+static inline u64 enable_mask(int count)
+{
+ return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL));
+}
+
+static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
+ const struct npc_kpu_profile *profile)
+{
+ int entry, num_entries, max_entries;
+ u64 entry_mask;
+
+ if (profile->cam_entries != profile->action_entries) {
+ dev_err(rvu->dev,
+ "KPU%d: CAM and action entries [%d != %d] not equal\n",
+ kpu, profile->cam_entries, profile->action_entries);
+ }
+
+ max_entries = rvu->hw->npc_kpu_entries;
+
+ /* Program CAM match entries for previous KPU extracted data */
+ num_entries = min_t(int, profile->cam_entries, max_entries);
+ for (entry = 0; entry < num_entries; entry++)
+ npc_config_kpucam(rvu, blkaddr,
+ &profile->cam[entry], kpu, entry);
+
+ /* Program this KPU's actions */
+ num_entries = min_t(int, profile->action_entries, max_entries);
+ for (entry = 0; entry < num_entries; entry++)
+ npc_config_kpuaction(rvu, blkaddr, &profile->action[entry],
+ kpu, entry, false);
+
+ /* Enable all programmed entries */
+ num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ entry_mask = enable_mask(num_entries);
+ /* Disable first KPU_MAX_CST_ENT entries for built-in profile */
+ if (!rvu->kpu.custom)
+ entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask);
+ if (num_entries > 64) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
+ enable_mask(num_entries - 64));
+ }
+
+ /* Enable this KPU */
+ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01);
+}
+
+static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
+{
+ profile->custom = 0;
+ profile->name = def_pfl_name;
+ profile->version = NPC_KPU_PROFILE_VER;
+ profile->ikpu = ikpu_action_entries;
+ profile->pkinds = ARRAY_SIZE(ikpu_action_entries);
+ profile->kpu = npc_kpu_profiles;
+ profile->kpus = ARRAY_SIZE(npc_kpu_profiles);
+ profile->lt_def = &npc_lt_defaults;
+ profile->mkex = &npc_mkex_default;
+ profile->mkex_hash = &npc_mkex_hash_default;
+
+ return 0;
+}
+
+static int npc_apply_custom_kpu(struct rvu *rvu,
+ struct npc_kpu_profile_adapter *profile)
+{
+ size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0;
+ struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata;
+ struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_fwdata *fw_kpu;
+ int entries;
+ u16 kpu, entry;
+
+ if (rvu->kpu_fwdata_sz < hdr_sz) {
+ dev_warn(rvu->dev, "Invalid KPU profile size\n");
+ return -EINVAL;
+ }
+ if (le64_to_cpu(fw->signature) != KPU_SIGN) {
+ dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n",
+ fw->signature);
+ return -EINVAL;
+ }
+ /* Verify if the using known profile structure */
+ if (NPC_KPU_VER_MAJ(profile->version) >
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev, "Not supported Major version: %d > %d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile is aligned with the required kernel changes */
+ if (NPC_KPU_VER_MIN(profile->version) <
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev,
+ "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile fits the HW */
+ if (fw->kpus > profile->kpus) {
+ dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus,
+ profile->kpus);
+ return -EINVAL;
+ }
+
+ profile->custom = 1;
+ profile->name = fw->name;
+ profile->version = le64_to_cpu(fw->version);
+ profile->mkex = &fw->mkex;
+ profile->lt_def = &fw->lt_def;
+
+ for (kpu = 0; kpu < fw->kpus; kpu++) {
+ fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset);
+ if (fw_kpu->entries > KPU_MAX_CST_ENT)
+ dev_warn(rvu->dev,
+ "Too many custom entries on KPU%d: %d > %d\n",
+ kpu, fw_kpu->entries, KPU_MAX_CST_ENT);
+ entries = min(fw_kpu->entries, KPU_MAX_CST_ENT);
+ cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
+ offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
+ action = (struct npc_kpu_profile_action *)(fw->data + offset);
+ offset += fw_kpu->entries * sizeof(*action);
+ if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
+ dev_warn(rvu->dev,
+ "Profile size mismatch on KPU%i parsing.\n",
+ kpu + 1);
+ return -EINVAL;
+ }
+ for (entry = 0; entry < entries; entry++) {
+ profile->kpu[kpu].cam[entry] = cam[entry];
+ profile->kpu[kpu].action[entry] = action[entry];
+ }
+ }
+
+ return 0;
+}
+
+static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr,
+ u64 prfl_sz, const char *kpu_profile)
+{
+ struct npc_kpu_profile_fwdata *kpu_data = NULL;
+ int rc = -EINVAL;
+
+ kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr;
+ if (le64_to_cpu(kpu_data->signature) == KPU_SIGN &&
+ !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kpu_data;
+ rvu->kpu_fwdata_sz = prfl_sz;
+ rvu->kpu_prfl_addr = prfl_addr;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
+ const char *kpu_profile)
+{
+ struct npc_coalesced_kpu_prfl *img_data = NULL;
+ int i = 0, rc = -EINVAL;
+ void __iomem *kpu_prfl_addr;
+ u16 offset;
+
+ img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
+ if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
+ !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) {
+ /* Loaded profile is a single KPU profile. */
+ rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr,
+ prfl_sz, kpu_profile);
+ goto done;
+ }
+
+ /* Loaded profile is coalesced image, offset of first KPU profile.*/
+ offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) +
+ (img_data->num_prfl * sizeof(uint16_t));
+ /* Check if mapped image is coalesced image. */
+ while (i < img_data->num_prfl) {
+ /* Profile image offsets are rounded up to next 8 multiple.*/
+ offset = ALIGN_8B_CEIL(offset);
+ kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr +
+ offset);
+ rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr,
+ img_data->prfl_sz[i], kpu_profile);
+ if (!rc)
+ break;
+ /* Calculating offset of profile image based on profile size.*/
+ offset += img_data->prfl_sz[i];
+ i++;
+ }
+done:
+ return rc;
+}
+
+static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile)
+{
+ int ret = -EINVAL;
+ u64 prfl_sz;
+
+ /* Setting up the mapping for NPC profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz);
+ if (ret < 0)
+ goto done;
+
+ /* Detect if profile is coalesced or single KPU profile and load */
+ ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile);
+ if (ret == 0)
+ goto done;
+
+ /* Cleaning up if KPU profile image from fwdata is not valid. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ rvu->kpu_fwdata = NULL;
+ }
+
+done:
+ return ret;
+}
+
+static void npc_load_kpu_profile(struct rvu *rvu)
+{
+ struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+ const char *kpu_profile = rvu->kpu_pfl_name;
+ const struct firmware *fw = NULL;
+ bool retry_fwdb = false;
+
+ /* If user not specified profile customization */
+ if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN))
+ goto revert_to_default;
+ /* First prepare default KPU, then we'll customize top entries. */
+ npc_prepare_default_kpu(profile);
+
+ /* Order of preceedence for load loading NPC profile (high to low)
+ * Firmware binary in filesystem.
+ * Firmware database method.
+ * Default KPU profile.
+ */
+ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
+ if (rvu->kpu_fwdata) {
+ memcpy(rvu->kpu_fwdata, fw->data, fw->size);
+ rvu->kpu_fwdata_sz = fw->size;
+ }
+ release_firmware(fw);
+ retry_fwdb = true;
+ goto program_kpu;
+ }
+
+load_image_fwdb:
+ /* Loading the KPU profile using firmware database */
+ if (npc_load_kpu_profile_fwdb(rvu, kpu_profile))
+ goto revert_to_default;
+
+program_kpu:
+ /* Apply profile customization if firmware was loaded. */
+ if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) {
+ /* If image from firmware filesystem fails to load or invalid
+ * retry with firmware database method.
+ */
+ if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) {
+ /* Loading image from firmware database failed. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ } else {
+ kfree(rvu->kpu_fwdata);
+ }
+ rvu->kpu_fwdata = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ if (retry_fwdb) {
+ retry_fwdb = false;
+ goto load_image_fwdb;
+ }
+ }
+
+ dev_warn(rvu->dev,
+ "Can't load KPU profile %s. Using default.\n",
+ kpu_profile);
+ kfree(rvu->kpu_fwdata);
+ rvu->kpu_fwdata = NULL;
+ goto revert_to_default;
+ }
+
+ dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n",
+ profile->name, NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version));
+
+ return;
+
+revert_to_default:
+ npc_prepare_default_kpu(profile);
+}
+
+static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int num_pkinds, num_kpus, idx;
+
+ /* Disable all KPUs and their entries */
+ for (idx = 0; idx < hw->npc_kpus; idx++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL);
+ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
+ }
+
+ /* Load and customize KPU profile. */
+ npc_load_kpu_profile(rvu);
+
+ /* First program IKPU profile i.e PKIND configs.
+ * Check HW max count to avoid configuring junk or
+ * writing to unsupported CSR addresses.
+ */
+ num_pkinds = rvu->kpu.pkinds;
+ num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
+
+ for (idx = 0; idx < num_pkinds; idx++)
+ npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
+
+ /* Program KPU CAM and Action profiles */
+ num_kpus = rvu->kpu.kpus;
+ num_kpus = min_t(int, hw->npc_kpus, num_kpus);
+
+ for (idx = 0; idx < num_kpus; idx++)
+ npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]);
+}
+
+static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
+{
+ int nixlf_count = rvu_get_nixlf_count(rvu);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int rsvd, err;
+ u16 index;
+ int cntr;
+ u64 cfg;
+
+ /* Actual number of MCAM entries vary by entry size */
+ cfg = (rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
+ mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize;
+ mcam->keysize = cfg;
+
+ /* Number of banks combined per MCAM entry */
+ if (cfg == NPC_MCAM_KEY_X4)
+ mcam->banks_per_entry = 4;
+ else if (cfg == NPC_MCAM_KEY_X2)
+ mcam->banks_per_entry = 2;
+ else
+ mcam->banks_per_entry = 1;
+
+ /* Reserve one MCAM entry for each of the NIX LF to
+ * guarantee space to install default matching DMAC rule.
+ * Also reserve 2 MCAM entries for each PF for default
+ * channel based matching or 'bcast & promisc' matching to
+ * support BCAST and PROMISC modes of operation for PFs.
+ * PF0 is excluded.
+ */
+ rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) +
+ ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF);
+ if (mcam->total_entries <= rsvd) {
+ dev_warn(rvu->dev,
+ "Insufficient NPC MCAM size %d for pkt I/O, exiting\n",
+ mcam->total_entries);
+ return -ENOMEM;
+ }
+
+ mcam->bmap_entries = mcam->total_entries - rsvd;
+ mcam->nixlf_offset = mcam->bmap_entries;
+ mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
+
+ /* Allocate bitmaps for managing MCAM entries */
+ mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries),
+ sizeof(long), GFP_KERNEL);
+ if (!mcam->bmap)
+ return -ENOMEM;
+
+ mcam->bmap_reverse = devm_kcalloc(rvu->dev,
+ BITS_TO_LONGS(mcam->bmap_entries),
+ sizeof(long), GFP_KERNEL);
+ if (!mcam->bmap_reverse)
+ return -ENOMEM;
+
+ mcam->bmap_fcnt = mcam->bmap_entries;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2pfvf_map)
+ return -ENOMEM;
+
+ /* Reserve 1/8th of MCAM entries at the bottom for low priority
+ * allocations and another 1/8th at the top for high priority
+ * allocations.
+ */
+ mcam->lprio_count = mcam->bmap_entries / 8;
+ if (mcam->lprio_count > BITS_PER_LONG)
+ mcam->lprio_count = round_down(mcam->lprio_count,
+ BITS_PER_LONG);
+ mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
+ mcam->hprio_count = mcam->lprio_count;
+ mcam->hprio_end = mcam->hprio_count;
+
+ /* Allocate bitmap for managing MCAM counters and memory
+ * for saving counter to RVU PFFUNC allocation mapping.
+ */
+ err = rvu_alloc_bitmap(&mcam->counters);
+ if (err)
+ return err;
+
+ mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->cntr2pfvf_map)
+ goto free_mem;
+
+ /* Alloc memory for MCAM entry to counter mapping and for tracking
+ * counter's reference count.
+ */
+ mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2cntr_map)
+ goto free_mem;
+
+ mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->cntr_refcnt)
+ goto free_mem;
+
+ /* Alloc memory for saving target device of mcam rule */
+ mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2target_pffunc)
+ goto free_mem;
+
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++)
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+
+ mutex_init(&mcam->lock);
+
+ return 0;
+
+free_mem:
+ kfree(mcam->counters.bmap);
+ return -ENOMEM;
+}
+
+static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 npc_const, npc_const1;
+ u64 npc_const2 = 0;
+
+ npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1);
+ if (npc_const1 & BIT_ULL(63))
+ npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
+
+ pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
+ hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
+ hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
+ hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
+ hw->npc_intfs = npc_const & 0xFULL;
+ hw->npc_counters = (npc_const >> 48) & 0xFFFFULL;
+
+ mcam->banks = (npc_const >> 44) & 0xFULL;
+ mcam->banksize = (npc_const >> 28) & 0xFFFFULL;
+ hw->npc_stat_ena = BIT_ULL(9);
+ /* Extended set */
+ if (npc_const2) {
+ hw->npc_ext_set = true;
+ /* 96xx supports only match_stats and npc_counters
+ * reflected in NPC_AF_CONST reg.
+ * STAT_SEL and ENA are at [0:8] and 9 bit positions.
+ * 98xx has both match_stat and ext and npc_counter
+ * reflected in NPC_AF_CONST2
+ * STAT_SEL_EXT added at [12:14] bit position.
+ * cn10k supports only ext and hence npc_counters in
+ * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2.
+ * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63
+ */
+ if (!hw->npc_counters)
+ hw->npc_stat_ena = BIT_ULL(63);
+ hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL;
+ mcam->banksize = npc_const2 & 0xFFFFULL;
+ }
+
+ mcam->counters.max = hw->npc_counters;
+}
+
+static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 nibble_ena, rx_kex, tx_kex;
+ u8 intf;
+
+ /* Reserve last counter for MCAM RX miss action which is set to
+ * drop packet. This way we will know how many pkts didn't match
+ * any MCAM entry.
+ */
+ mcam->counters.max--;
+ mcam->rx_miss_act_cntr = mcam->counters.max;
+
+ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
+ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
+ nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
+
+ nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
+ if (nibble_ena) {
+ tx_kex &= ~NPC_PARSE_NIBBLE;
+ tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
+ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
+ }
+
+ /* Configure RX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_tx(intf))
+ continue;
+
+ /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ rx_kex);
+
+ /* If MCAM lookup doesn't result in a match, drop the received
+ * packet. And map this action to a counter to count dropped
+ * packets.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP);
+
+ /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9]
+ * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0]
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_STAT_ACT(intf),
+ ((mcam->rx_miss_act_cntr >> 9) << 12) |
+ hw->npc_stat_ena | mcam->rx_miss_act_cntr);
+ }
+
+ /* Configure TX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_rx(intf))
+ continue;
+
+ /* Extract Ltypes LID_LA to LID_LE */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ tx_kex);
+
+ /* Set TX miss action to UCAST_DEFAULT i.e
+ * transmit the packet on NIX LF SQ's default channel.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf),
+ NIX_TX_ACTIONOP_UCAST_DEFAULT);
+ }
+}
+
+int rvu_npc_init(struct rvu *rvu)
+{
+ struct npc_kpu_profile_adapter *kpu = &rvu->kpu;
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, entry, bank, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ rvu_npc_hw_init(rvu, blkaddr);
+
+ /* First disable all MCAM entries, to stop traffic towards NIXLFs */
+ for (bank = 0; bank < mcam->banks; bank++) {
+ for (entry = 0; entry < mcam->banksize; entry++)
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
+ }
+
+ err = rvu_alloc_bitmap(&pkind->rsrc);
+ if (err)
+ return err;
+ /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
+ * no need to configure PKIND for all LBKs separately.
+ */
+ rvu_alloc_rsrc(&pkind->rsrc);
+
+ /* Allocate mem for pkind to PF and channel mapping info */
+ pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
+ sizeof(u32), GFP_KERNEL);
+ if (!pkind->pfchan_map)
+ return -ENOMEM;
+
+ /* Configure KPU profile */
+ npc_parser_profile_init(rvu, blkaddr);
+
+ /* Config Outer L2, IPv4's NPC layer info */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2,
+ (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) |
+ kpu->lt_def->pck_ol2.ltype_mask);
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
+ (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) |
+ kpu->lt_def->pck_oip4.ltype_mask);
+
+ /* Config Inner IPV4 NPC layer info */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
+ (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) |
+ kpu->lt_def->pck_iip4.ltype_mask);
+
+ /* Enable below for Rx pkts.
+ * - Outer IPv4 header checksum validation.
+ * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B].
+ * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M].
+ * - Inner IPv4 header checksum validation.
+ * - Set non zero checksum error code value
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
+ rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
+ ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) |
+ BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1));
+
+ rvu_npc_setup_interfaces(rvu, blkaddr);
+
+ npc_config_secret_key(rvu, blkaddr);
+ /* Configure MKEX profile */
+ npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
+
+ err = npc_mcam_rsrcs_init(rvu, blkaddr);
+ if (err)
+ return err;
+
+ err = npc_flow_steering_init(rvu, blkaddr);
+ if (err) {
+ dev_err(rvu->dev,
+ "Incorrect mkex profile loaded using default mkex\n");
+ npc_load_mkex_profile(rvu, blkaddr, def_pfl_name);
+ }
+
+ return 0;
+}
+
+void rvu_npc_freemem(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ kfree(pkind->rsrc.bmap);
+ kfree(mcam->counters.bmap);
+ if (rvu->kpu_prfl_addr)
+ iounmap(rvu->kpu_prfl_addr);
+ else
+ kfree(rvu->kpu_fwdata);
+ mutex_destroy(&mcam->lock);
+}
+
+void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int entry;
+
+ *alloc_cnt = 0;
+ *enable_cnt = 0;
+
+ for (entry = 0; entry < mcam->bmap_entries; entry++) {
+ if (mcam->entry2pfvf_map[entry] == pcifunc) {
+ (*alloc_cnt)++;
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry))
+ (*enable_cnt)++;
+ }
+ }
+}
+
+void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int cntr;
+
+ *alloc_cnt = 0;
+ *enable_cnt = 0;
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+ if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+ (*alloc_cnt)++;
+ if (mcam->cntr_refcnt[cntr])
+ (*enable_cnt)++;
+ }
+ }
+}
+
+static int npc_mcam_verify_entry(struct npc_mcam *mcam,
+ u16 pcifunc, int entry)
+{
+ /* verify AF installed entries */
+ if (is_pffunc_af(pcifunc))
+ return 0;
+ /* Verify if entry is valid and if it is indeed
+ * allocated to the requesting PFFUNC.
+ */
+ if (entry >= mcam->bmap_entries)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (pcifunc != mcam->entry2pfvf_map[entry])
+ return NPC_MCAM_PERM_DENIED;
+
+ return 0;
+}
+
+static int npc_mcam_verify_counter(struct npc_mcam *mcam,
+ u16 pcifunc, int cntr)
+{
+ /* Verify if counter is valid and if it is indeed
+ * allocated to the requesting PFFUNC.
+ */
+ if (cntr >= mcam->counters.max)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (pcifunc != mcam->cntr2pfvf_map[cntr])
+ return NPC_MCAM_PERM_DENIED;
+
+ return 0;
+}
+
+static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 entry, u16 cntr)
+{
+ u16 index = entry & (mcam->banksize - 1);
+ u32 bank = npc_get_bank(mcam, entry);
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Set mapping and increment counter's refcnt */
+ mcam->entry2cntr_map[entry] = cntr;
+ mcam->cntr_refcnt[cntr]++;
+ /* Enable stats */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
+ ((cntr >> 9) << 12) | hw->npc_stat_ena | cntr);
+}
+
+static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
+ struct npc_mcam *mcam,
+ int blkaddr, u16 entry, u16 cntr)
+{
+ u16 index = entry & (mcam->banksize - 1);
+ u32 bank = npc_get_bank(mcam, entry);
+
+ /* Remove mapping and reduce counter's refcnt */
+ mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP;
+ mcam->cntr_refcnt[cntr]--;
+ /* Disable stats */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00);
+}
+
+/* Sets MCAM entry in bitmap as used. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index)
+{
+ u16 entry, rentry;
+
+ entry = index;
+ rentry = mcam->bmap_entries - index - 1;
+
+ __set_bit(entry, mcam->bmap);
+ __set_bit(rentry, mcam->bmap_reverse);
+ mcam->bmap_fcnt--;
+}
+
+/* Sets MCAM entry in bitmap as free. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index)
+{
+ u16 entry, rentry;
+
+ entry = index;
+ rentry = mcam->bmap_entries - index - 1;
+
+ __clear_bit(entry, mcam->bmap);
+ __clear_bit(rentry, mcam->bmap_reverse);
+ mcam->bmap_fcnt++;
+}
+
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc)
+{
+ u16 index, cntr;
+
+ /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2pfvf_map[index] == pcifunc) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ /* Free the entry in bitmap */
+ npc_mcam_clear_bit(mcam, index);
+ /* Disable the entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[index];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam,
+ blkaddr, index,
+ cntr);
+ mcam->entry2target_pffunc[index] = 0x0;
+ }
+ }
+}
+
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+ u16 pcifunc)
+{
+ u16 cntr;
+
+ /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */
+ for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+ if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+ mcam->cntr_refcnt[cntr] = 0;
+ rvu_free_rsrc(&mcam->counters, cntr);
+ /* This API is expected to be called after freeing
+ * MCAM entries, which inturn will remove
+ * 'entry to counter' mapping.
+ * No need to do it again.
+ */
+ }
+ }
+}
+
+/* Find area of contiguous free entries of size 'nr'.
+ * If not found return max contiguous free entries available.
+ */
+static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start,
+ u16 nr, u16 *max_area)
+{
+ u16 max_area_start = 0;
+ u16 index, next, end;
+
+ *max_area = 0;
+
+again:
+ index = find_next_zero_bit(map, size, start);
+ if (index >= size)
+ return max_area_start;
+
+ end = ((index + nr) >= size) ? size : index + nr;
+ next = find_next_bit(map, end, index);
+ if (*max_area < (next - index)) {
+ *max_area = next - index;
+ max_area_start = index;
+ }
+
+ if (next < end) {
+ start = next + 1;
+ goto again;
+ }
+
+ return max_area_start;
+}
+
+/* Find number of free MCAM entries available
+ * within range i.e in between 'start' and 'end'.
+ */
+static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end)
+{
+ u16 index, next;
+ u16 fcnt = 0;
+
+again:
+ if (start >= end)
+ return fcnt;
+
+ index = find_next_zero_bit(map, end, start);
+ if (index >= end)
+ return fcnt;
+
+ next = find_next_bit(map, end, index);
+ if (next <= end) {
+ fcnt += next - index;
+ start = next + 1;
+ goto again;
+ }
+
+ fcnt += end - index;
+ return fcnt;
+}
+
+static void
+npc_get_mcam_search_range_priority(struct npc_mcam *mcam,
+ struct npc_mcam_alloc_entry_req *req,
+ u16 *start, u16 *end, bool *reverse)
+{
+ u16 fcnt;
+
+ if (req->priority == NPC_MCAM_HIGHER_PRIO)
+ goto hprio;
+
+ /* For a low priority entry allocation
+ * - If reference entry is not in hprio zone then
+ * search range: ref_entry to end.
+ * - If reference entry is in hprio zone and if
+ * request can be accomodated in non-hprio zone then
+ * search range: 'start of middle zone' to 'end'
+ * - else search in reverse, so that less number of hprio
+ * zone entries are allocated.
+ */
+
+ *reverse = false;
+ *start = req->ref_entry + 1;
+ *end = mcam->bmap_entries;
+
+ if (req->ref_entry >= mcam->hprio_end)
+ return;
+
+ fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->hprio_end, mcam->bmap_entries);
+ if (fcnt > req->count)
+ *start = mcam->hprio_end;
+ else
+ *reverse = true;
+ return;
+
+hprio:
+ /* For a high priority entry allocation, search is always
+ * in reverse to preserve hprio zone entries.
+ * - If reference entry is not in lprio zone then
+ * search range: 0 to ref_entry.
+ * - If reference entry is in lprio zone and if
+ * request can be accomodated in middle zone then
+ * search range: 'hprio_end' to 'lprio_start'
+ */
+
+ *reverse = true;
+ *start = 0;
+ *end = req->ref_entry;
+
+ if (req->ref_entry <= mcam->lprio_start)
+ return;
+
+ fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->hprio_end, mcam->lprio_start);
+ if (fcnt < req->count)
+ return;
+ *start = mcam->hprio_end;
+ *end = mcam->lprio_start;
+}
+
+static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp)
+{
+ u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+ u16 fcnt, hp_fcnt, lp_fcnt;
+ u16 start, end, index;
+ int entry, next_start;
+ bool reverse = false;
+ unsigned long *bmap;
+ u16 max_contig;
+
+ mutex_lock(&mcam->lock);
+
+ /* Check if there are any free entries */
+ if (!mcam->bmap_fcnt) {
+ mutex_unlock(&mcam->lock);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+
+ /* MCAM entries are divided into high priority, middle and
+ * low priority zones. Idea is to not allocate top and lower
+ * most entries as much as possible, this is to increase
+ * probability of honouring priority allocation requests.
+ *
+ * Two bitmaps are used for mcam entry management,
+ * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'.
+ * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'.
+ *
+ * Reverse bitmap is used to allocate entries
+ * - when a higher priority entry is requested
+ * - when available free entries are less.
+ * Lower priority ones out of avaialble free entries are always
+ * chosen when 'high vs low' question arises.
+ */
+
+ /* Get the search range for priority allocation request */
+ if (req->priority) {
+ npc_get_mcam_search_range_priority(mcam, req,
+ &start, &end, &reverse);
+ goto alloc;
+ }
+
+ /* For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF and NOT a priority
+ * allocation request then allocate low priority entries.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
+
+ /* Find out the search range for non-priority allocation request
+ *
+ * Get MCAM free entry count in middle zone.
+ */
+ lp_fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->lprio_start,
+ mcam->bmap_entries);
+ hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end);
+ fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt;
+
+ /* Check if request can be accomodated in the middle zone */
+ if (fcnt > req->count) {
+ start = mcam->hprio_end;
+ end = mcam->lprio_start;
+ } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) {
+ /* Expand search zone from half of hprio zone to
+ * half of lprio zone.
+ */
+ start = mcam->hprio_end / 2;
+ end = mcam->bmap_entries - (mcam->lprio_count / 2);
+ reverse = true;
+ } else {
+ /* Not enough free entries, search all entries in reverse,
+ * so that low priority ones will get used up.
+ */
+lprio_alloc:
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
+ }
+
+alloc:
+ if (reverse) {
+ bmap = mcam->bmap_reverse;
+ start = mcam->bmap_entries - start;
+ end = mcam->bmap_entries - end;
+ swap(start, end);
+ } else {
+ bmap = mcam->bmap;
+ }
+
+ if (req->contig) {
+ /* Allocate requested number of contiguous entries, if
+ * unsuccessful find max contiguous entries available.
+ */
+ index = npc_mcam_find_zero_area(bmap, end, start,
+ req->count, &max_contig);
+ rsp->count = max_contig;
+ if (reverse)
+ rsp->entry = mcam->bmap_entries - index - max_contig;
+ else
+ rsp->entry = index;
+ } else {
+ /* Allocate requested number of non-contiguous entries,
+ * if unsuccessful allocate as many as possible.
+ */
+ rsp->count = 0;
+ next_start = start;
+ for (entry = 0; entry < req->count; entry++) {
+ index = find_next_zero_bit(bmap, end, next_start);
+ if (index >= end)
+ break;
+
+ next_start = start + (index - start) + 1;
+
+ /* Save the entry's index */
+ if (reverse)
+ index = mcam->bmap_entries - index - 1;
+ entry_list[entry] = index;
+ rsp->count++;
+ }
+ }
+
+ /* If allocating requested no of entries is unsucessful,
+ * expand the search range to full bitmap length and retry.
+ */
+ if (!req->priority && (rsp->count < req->count) &&
+ ((end - start) != mcam->bmap_entries)) {
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
+ goto alloc;
+ }
+
+ /* For priority entry allocation requests, if allocation is
+ * failed then expand search to max possible range and retry.
+ */
+ if (req->priority && rsp->count < req->count) {
+ if (req->priority == NPC_MCAM_LOWER_PRIO &&
+ (start != (req->ref_entry + 1))) {
+ start = req->ref_entry + 1;
+ end = mcam->bmap_entries;
+ reverse = false;
+ goto alloc;
+ } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) &&
+ ((end - start) != req->ref_entry)) {
+ start = 0;
+ end = req->ref_entry;
+ reverse = true;
+ goto alloc;
+ }
+ }
+
+ /* Copy MCAM entry indices into mbox response entry_list.
+ * Requester always expects indices in ascending order, so
+ * reverse the list if reverse bitmap is used for allocation.
+ */
+ if (!req->contig && rsp->count) {
+ index = 0;
+ for (entry = rsp->count - 1; entry >= 0; entry--) {
+ if (reverse)
+ rsp->entry_list[index++] = entry_list[entry];
+ else
+ rsp->entry_list[entry] = entry_list[entry];
+ }
+ }
+
+ /* Mark the allocated entries as used and set nixlf mapping */
+ for (entry = 0; entry < rsp->count; entry++) {
+ index = req->contig ?
+ (rsp->entry + entry) : rsp->entry_list[entry];
+ npc_mcam_set_bit(mcam, index);
+ mcam->entry2pfvf_map[index] = pcifunc;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ /* Update available free count in mbox response */
+ rsp->free_count = mcam->bmap_fcnt;
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+/* Marks bitmaps to reserved the mcam slot */
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ npc_mcam_set_bit(mcam, entry_idx);
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ rsp->entry = NPC_MCAM_ENTRY_INVALID;
+ rsp->free_count = 0;
+
+ /* Check if ref_entry is within range */
+ if (req->priority && req->ref_entry >= mcam->bmap_entries) {
+ dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
+ __func__, req->ref_entry);
+ return NPC_MCAM_INVALID_REQ;
+ }
+
+ /* ref_entry can't be '0' if requested priority is high.
+ * Can't be last entry if requested priority is low.
+ */
+ if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
+ ((req->ref_entry == (mcam->bmap_entries - 1)) &&
+ req->priority == NPC_MCAM_LOWER_PRIO))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Since list of allocated indices needs to be sent to requester,
+ * max number of non-contiguous entries per mbox msg is limited.
+ */
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) {
+ dev_err(rvu->dev,
+ "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n",
+ __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES);
+ return NPC_MCAM_INVALID_REQ;
+ }
+
+ /* Alloc request from PFFUNC with no NIXLF attached should be denied */
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_ALLOC_DENIED;
+
+ return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
+}
+
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+ struct npc_mcam_free_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc = 0;
+ u16 cntr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Free request from PFFUNC with no NIXLF attached, ignore */
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ if (req->all)
+ goto free_all;
+
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (rc)
+ goto exit;
+
+ mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2target_pffunc[req->entry] = 0x0;
+ npc_mcam_clear_bit(mcam, req->entry);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[req->entry];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, cntr);
+
+ goto exit;
+
+free_all:
+ /* Free up all entries allocated to requesting PFFUNC */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu,
+ struct npc_mcam_read_entry_req *req,
+ struct npc_mcam_read_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (!rc) {
+ npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry,
+ &rsp->entry_data,
+ &rsp->intf, &rsp->enable);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+ struct npc_mcam_write_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+ u8 nix_intf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (rc)
+ goto exit;
+
+ if (req->set_cntr &&
+ npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ if (!is_npc_interface_valid(rvu, req->intf)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ if (!is_pffunc_af(pcifunc) &&
+ npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ /* For AF installed rules, the nix_intf should be set to target NIX */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ nix_intf = req->intf;
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf,
+ &req->entry_data, req->enable_entry);
+
+ if (req->set_cntr)
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, req->cntr);
+
+ rc = 0;
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ mutex_unlock(&mcam->lock);
+ if (rc)
+ return rc;
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ mutex_unlock(&mcam->lock);
+ if (rc)
+ return rc;
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+ struct npc_mcam_shift_entry_req *req,
+ struct npc_mcam_shift_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 old_entry, new_entry;
+ int blkaddr, rc = 0;
+ u16 index, cntr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (req->shift_count > NPC_MCAM_MAX_SHIFTS)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < req->shift_count; index++) {
+ old_entry = req->curr_entry[index];
+ new_entry = req->new_entry[index];
+
+ /* Check if both old and new entries are valid and
+ * does belong to this PFFUNC or not.
+ */
+ rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry);
+ if (rc)
+ break;
+
+ rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry);
+ if (rc)
+ break;
+
+ /* new_entry should not have a counter mapped */
+ if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) {
+ rc = NPC_MCAM_PERM_DENIED;
+ break;
+ }
+
+ /* Disable the new_entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false);
+
+ /* Copy rule from old entry to new entry */
+ npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry);
+
+ /* Copy counter mapping, if any */
+ cntr = mcam->entry2cntr_map[old_entry];
+ if (cntr != NPC_MCAM_INVALID_MAP) {
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ old_entry, cntr);
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ new_entry, cntr);
+ }
+
+ /* Enable new_entry and disable old_entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false);
+ }
+
+ /* If shift has failed then report the failed index */
+ if (index != req->shift_count) {
+ rc = NPC_MCAM_PERM_DENIED;
+ rsp->failed_entry_idx = index;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 max_contig, cntr;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* If the request is from a PFFUNC with no NIXLF attached, ignore */
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Since list of allocated counter IDs needs to be sent to requester,
+ * max number of non-contiguous counters per mbox msg is limited.
+ */
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ /* Check if unused counters are available or not */
+ if (!rvu_rsrc_free_count(&mcam->counters)) {
+ mutex_unlock(&mcam->lock);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+
+ rsp->count = 0;
+
+ if (req->contig) {
+ /* Allocate requested number of contiguous counters, if
+ * unsuccessful find max contiguous entries available.
+ */
+ index = npc_mcam_find_zero_area(mcam->counters.bmap,
+ mcam->counters.max, 0,
+ req->count, &max_contig);
+ rsp->count = max_contig;
+ rsp->cntr = index;
+ for (cntr = index; cntr < (index + max_contig); cntr++) {
+ __set_bit(cntr, mcam->counters.bmap);
+ mcam->cntr2pfvf_map[cntr] = pcifunc;
+ }
+ } else {
+ /* Allocate requested number of non-contiguous counters,
+ * if unsuccessful allocate as many as possible.
+ */
+ for (cntr = 0; cntr < req->count; cntr++) {
+ index = rvu_alloc_rsrc(&mcam->counters);
+ if (index < 0)
+ break;
+ rsp->cntr_list[cntr] = index;
+ rsp->count++;
+ mcam->cntr2pfvf_map[index] = pcifunc;
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, entry = 0;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ if (err) {
+ mutex_unlock(&mcam->lock);
+ return err;
+ }
+
+ /* Mark counter as free/unused */
+ mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP;
+ rvu_free_rsrc(&mcam->counters, req->cntr);
+
+ /* Disable all MCAM entry's stats which are using this counter */
+ while (entry < mcam->bmap_entries) {
+ if (!mcam->cntr_refcnt[req->cntr])
+ break;
+
+ index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ if (index >= mcam->bmap_entries)
+ break;
+ entry = index + 1;
+ if (mcam->entry2cntr_map[index] != req->cntr)
+ continue;
+
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ index, req->cntr);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+ struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, entry = 0;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ if (rc)
+ goto exit;
+
+ /* Unmap the MCAM entry and counter */
+ if (!req->all) {
+ rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry);
+ if (rc)
+ goto exit;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, req->cntr);
+ goto exit;
+ }
+
+ /* Disable all MCAM entry's stats which are using this counter */
+ while (entry < mcam->bmap_entries) {
+ if (!mcam->cntr_refcnt[req->cntr])
+ break;
+
+ index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ if (index >= mcam->bmap_entries)
+ break;
+ entry = index + 1;
+
+ if (mcam->entry2cntr_map[index] != req->cntr)
+ continue;
+
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ index, req->cntr);
+ }
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ mutex_unlock(&mcam->lock);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct npc_mcam_oper_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ mutex_unlock(&mcam->lock);
+ if (err)
+ return err;
+
+ rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr));
+ rsp->stat &= BIT_ULL(48) - 1;
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_and_write_entry_req *req,
+ struct npc_mcam_alloc_and_write_entry_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct npc_mcam_alloc_counter_req cntr_req;
+ struct npc_mcam_alloc_counter_rsp cntr_rsp;
+ struct npc_mcam_alloc_entry_req entry_req;
+ struct npc_mcam_alloc_entry_rsp entry_rsp;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 entry = NPC_MCAM_ENTRY_INVALID;
+ u16 cntr = NPC_MCAM_ENTRY_INVALID;
+ int blkaddr, rc;
+ u8 nix_intf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_MCAM_INVALID_REQ;
+
+ if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
+ req->hdr.pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Try to allocate a MCAM entry */
+ entry_req.hdr.pcifunc = req->hdr.pcifunc;
+ entry_req.contig = true;
+ entry_req.priority = req->priority;
+ entry_req.ref_entry = req->ref_entry;
+ entry_req.count = 1;
+
+ rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu,
+ &entry_req, &entry_rsp);
+ if (rc)
+ return rc;
+
+ if (!entry_rsp.count)
+ return NPC_MCAM_ALLOC_FAILED;
+
+ entry = entry_rsp.entry;
+
+ if (!req->alloc_cntr)
+ goto write_entry;
+
+ /* Now allocate counter */
+ cntr_req.hdr.pcifunc = req->hdr.pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (rc) {
+ /* Free allocated MCAM entry */
+ mutex_lock(&mcam->lock);
+ mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP;
+ npc_mcam_clear_bit(mcam, entry);
+ mutex_unlock(&mcam->lock);
+ return rc;
+ }
+
+ cntr = cntr_rsp.cntr;
+
+write_entry:
+ mutex_lock(&mcam->lock);
+
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf,
+ &req->entry_data, req->enable_entry);
+
+ if (req->alloc_cntr)
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr);
+ mutex_unlock(&mcam->lock);
+
+ rsp->entry = entry;
+ rsp->cntr = cntr;
+
+ return 0;
+}
+
+#define GET_KEX_CFG(intf) \
+ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf))
+
+#define GET_KEX_FLAGS(ld) \
+ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld))
+
+#define GET_KEX_LD(intf, lid, lt, ld) \
+ rvu_read64(rvu, BLKADDR_NPC, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld))
+
+#define GET_KEX_LDFLAGS(intf, ld, fl) \
+ rvu_read64(rvu, BLKADDR_NPC, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl))
+
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+ struct npc_get_kex_cfg_rsp *rsp)
+{
+ int lid, lt, ld, fl;
+
+ rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX);
+ rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX);
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] =
+ GET_KEX_LD(NIX_INTF_RX, lid, lt, ld);
+ rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] =
+ GET_KEX_LD(NIX_INTF_TX, lid, lt, ld);
+ }
+ }
+ }
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld);
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+ rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] =
+ GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl);
+ rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] =
+ GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl);
+ }
+ }
+ memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN);
+ return 0;
+}
+
+static int
+npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind,
+ u8 var_len_off, u8 var_len_off_mask, u8 shift_dir)
+{
+ struct npc_kpu_action0 *act0;
+ u8 shift_count = 0;
+ int blkaddr;
+ u64 val;
+
+ if (!var_len_off_mask)
+ return -EINVAL;
+
+ if (var_len_off_mask != 0xff) {
+ if (shift_dir)
+ shift_count = __ffs(var_len_off_mask);
+ else
+ shift_count = (8 - __fls(var_len_off_mask));
+ }
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ act0 = (struct npc_kpu_action0 *)&val;
+ act0->var_len_shift = shift_count;
+ act0->var_len_right = shift_dir;
+ act0->var_len_mask = var_len_off_mask;
+ act0->var_len_offset = var_len_off;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+ return 0;
+}
+
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir)
+
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr, nixlf, rc, intf_mode;
+ int pf = rvu_get_pf(pcifunc);
+ u64 rxpkind, txpkind;
+ u8 cgx_id, lmac_id;
+
+ /* use default pkind to disable edsa/higig */
+ rxpkind = rvu_npc_get_pkind(rvu, pf);
+ txpkind = NPC_TX_DEF_PKIND;
+ intf_mode = NPC_INTF_MODE_DEF;
+
+ if (mode & OTX2_PRIV_FLAGS_CUSTOM) {
+ if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) {
+ rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind,
+ var_len_off,
+ var_len_off_mask,
+ shift_dir);
+ if (rc)
+ return rc;
+ }
+ rxpkind = pkind;
+ txpkind = pkind;
+ }
+
+ if (dir & PKIND_RX) {
+ /* rx pkind set req valid only for cgx mapped PFs */
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return 0;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ rxpkind);
+ if (rc)
+ return rc;
+ }
+
+ if (dir & PKIND_TX) {
+ /* Tx pkind set request valid if PCIFUNC has NIXLF attached */
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf),
+ txpkind);
+ }
+
+ pfvf->intf_mode = intf_mode;
+ return 0;
+}
+
+int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode,
+ req->dir, req->pkind, req->var_len_off,
+ req->var_len_off_mask, req->shift_dir);
+}
+
+int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
+ struct msg_req *req,
+ struct npc_mcam_read_base_rule_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int index, blkaddr, nixlf, rc = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u8 intf, enable;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Return the channel number in case of PF */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ rsp->entry.kw[0] = pfvf->rx_chan_base;
+ rsp->entry.kw_mask[0] = 0xFFFULL;
+ goto out;
+ }
+
+ /* Find the pkt steering rule installed by PF to this VF */
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc)
+ goto read_entry;
+ }
+
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (rc < 0) {
+ mutex_unlock(&mcam->lock);
+ goto out;
+ }
+ /* Read the default ucast entry if there is no pkt steering rule */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_UCAST_ENTRY);
+read_entry:
+ /* Read the mcam entry */
+ npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf,
+ &enable);
+ mutex_unlock(&mcam->lock);
+out:
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
+ struct npc_mcam_get_stats_req *req,
+ struct npc_mcam_get_stats_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, cntr;
+ int blkaddr;
+ u64 regval;
+ u32 bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ index = req->entry & (mcam->banksize - 1);
+ bank = npc_get_bank(mcam, req->entry);
+
+ /* read MCAM entry STAT_ACT register */
+ regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank));
+
+ if (!(regval & rvu->hw->npc_stat_ena)) {
+ rsp->stat_ena = 0;
+ mutex_unlock(&mcam->lock);
+ return 0;
+ }
+
+ cntr = regval & 0x1FF;
+
+ rsp->stat_ena = 1;
+ rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr));
+ rsp->stat &= BIT_ULL(48) - 1;
+
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
new file mode 100644
index 000000000..237f82082
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -0,0 +1,1700 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
+
+static const char * const npc_flow_names[] = {
+ [NPC_DMAC] = "dmac",
+ [NPC_SMAC] = "smac",
+ [NPC_ETYPE] = "ether type",
+ [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag",
+ [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag",
+ [NPC_OUTER_VID] = "outer vlan id",
+ [NPC_INNER_VID] = "inner vlan id",
+ [NPC_TOS] = "tos",
+ [NPC_IPFRAG_IPV4] = "fragmented IPv4 header ",
+ [NPC_SIP_IPV4] = "ipv4 source ip",
+ [NPC_DIP_IPV4] = "ipv4 destination ip",
+ [NPC_IPFRAG_IPV6] = "fragmented IPv6 header ",
+ [NPC_SIP_IPV6] = "ipv6 source ip",
+ [NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_IPPROTO_TCP] = "ip proto tcp",
+ [NPC_IPPROTO_UDP] = "ip proto udp",
+ [NPC_IPPROTO_SCTP] = "ip proto sctp",
+ [NPC_IPPROTO_ICMP] = "ip proto icmp",
+ [NPC_IPPROTO_ICMP6] = "ip proto icmp6",
+ [NPC_IPPROTO_AH] = "ip proto AH",
+ [NPC_IPPROTO_ESP] = "ip proto ESP",
+ [NPC_SPORT_TCP] = "tcp source port",
+ [NPC_DPORT_TCP] = "tcp destination port",
+ [NPC_SPORT_UDP] = "udp source port",
+ [NPC_DPORT_UDP] = "udp destination port",
+ [NPC_SPORT_SCTP] = "sctp source port",
+ [NPC_DPORT_SCTP] = "sctp destination port",
+ [NPC_LXMB] = "Mcast/Bcast header ",
+ [NPC_IPSEC_SPI] = "SPI ",
+ [NPC_UNKNOWN] = "unknown",
+};
+
+bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 mcam_features;
+ u64 unsupported;
+
+ mcam_features = is_npc_intf_tx(intf) ? mcam->tx_features : mcam->rx_features;
+ unsupported = (mcam_features ^ features) & ~mcam_features;
+
+ /* Return false if at least one of the input flows is not extracted */
+ return !unsupported;
+}
+
+const char *npc_get_field_name(u8 hdr)
+{
+ if (hdr >= ARRAY_SIZE(npc_flow_names))
+ return npc_flow_names[NPC_UNKNOWN];
+
+ return npc_flow_names[hdr];
+}
+
+/* Compute keyword masks and figure out the number of keywords a field
+ * spans in the key.
+ */
+static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type,
+ u8 nr_bits, int start_kwi, int offset, u8 intf)
+{
+ struct npc_key_field *field = &mcam->rx_key_fields[type];
+ u8 bits_in_kw;
+ int max_kwi;
+
+ if (mcam->banks_per_entry == 1)
+ max_kwi = 1; /* NPC_MCAM_KEY_X1 */
+ else if (mcam->banks_per_entry == 2)
+ max_kwi = 3; /* NPC_MCAM_KEY_X2 */
+ else
+ max_kwi = 6; /* NPC_MCAM_KEY_X4 */
+
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (offset + nr_bits <= 64) {
+ /* one KW only */
+ if (start_kwi > max_kwi)
+ return;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0)
+ << offset;
+ field->nr_kws = 1;
+ } else if (offset + nr_bits > 64 &&
+ offset + nr_bits <= 128) {
+ /* two KWs */
+ if (start_kwi + 1 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 64;
+ field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 2;
+ } else {
+ /* three KWs */
+ if (start_kwi + 2 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask */
+ field->kw_mask[start_kwi + 1] = ~0ULL;
+ /* third KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 128;
+ field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 3;
+ }
+}
+
+/* Helper function to figure out whether field exists in the key */
+static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *input;
+
+ input = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ return input->nr_kws > 0;
+}
+
+static bool npc_is_same(struct npc_key_field *input,
+ struct npc_key_field *field)
+{
+ return memcmp(&input->layer_mdata, &field->layer_mdata,
+ sizeof(struct npc_layer_mdata)) == 0;
+}
+
+static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
+ u64 cfg, u8 lid, u8 lt, u8 intf)
+{
+ struct npc_key_field *input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ input->layer_mdata.ltype = lt;
+ input->layer_mdata.lid = lid;
+}
+
+static bool npc_check_overlap_fields(struct npc_key_field *input1,
+ struct npc_key_field *input2)
+{
+ int kwi;
+
+ /* Fields with same layer id and different ltypes are mutually
+ * exclusive hence they can be overlapped
+ */
+ if (input1->layer_mdata.lid == input2->layer_mdata.lid &&
+ input1->layer_mdata.ltype != input2->layer_mdata.ltype)
+ return false;
+
+ for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) {
+ if (input1->kw_mask[kwi] & input2->kw_mask[kwi])
+ return true;
+ }
+
+ return false;
+}
+
+/* Helper function to check whether given field overlaps with any other fields
+ * in the key. Due to limitations on key size and the key extraction profile in
+ * use higher layers can overwrite lower layer's header fields. Hence overlap
+ * needs to be checked.
+ */
+static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
+ enum key_fields type, u8 start_lid, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *dummy, *input;
+ int start_kwi, offset;
+ u8 nr_bits, lid, lt, ld;
+ u64 cfg;
+
+ dummy = &mcam->rx_key_fields[NPC_UNKNOWN];
+ input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf)) {
+ dummy = &mcam->tx_key_fields[NPC_UNKNOWN];
+ input = &mcam->tx_key_fields[type];
+ }
+
+ for (lid = start_lid; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ memset(dummy, 0, sizeof(struct npc_key_field));
+ npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg,
+ lid, lt, intf);
+ /* exclude input */
+ if (npc_is_same(input, dummy))
+ continue;
+ start_kwi = dummy->layer_mdata.key / 8;
+ offset = (dummy->layer_mdata.key * 8) % 64;
+ nr_bits = dummy->layer_mdata.len * 8;
+ /* form KW masks */
+ npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits,
+ start_kwi, offset, intf);
+ /* check any input field bits falls in any
+ * other field bits.
+ */
+ if (npc_check_overlap_fields(dummy, input))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
+{
+ if (!npc_is_field_present(rvu, type, intf) ||
+ npc_check_overlap(rvu, blkaddr, type, 0, intf))
+ return false;
+ return true;
+}
+
+static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 40 ... 43:
+ type = NPC_EXACT_RESULT;
+ break;
+
+ default:
+ return;
+ }
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
+static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 0 ... 2:
+ type = NPC_CHAN;
+ break;
+ case 3:
+ type = NPC_ERRLEV;
+ break;
+ case 4 ... 5:
+ type = NPC_ERRCODE;
+ break;
+ case 6:
+ type = NPC_LXMB;
+ break;
+ /* check for LTYPE only as of now */
+ case 9:
+ type = NPC_LA;
+ break;
+ case 12:
+ type = NPC_LB;
+ break;
+ case 15:
+ type = NPC_LC;
+ break;
+ case 18:
+ type = NPC_LD;
+ break;
+ case 21:
+ type = NPC_LE;
+ break;
+ case 24:
+ type = NPC_LF;
+ break;
+ case 27:
+ type = NPC_LG;
+ break;
+ case 30:
+ type = NPC_LH;
+ break;
+ default:
+ return;
+ }
+
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
+static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *key_fields;
+ /* Ether type can come from three layers
+ * (ethernet, single tagged, double tagged)
+ */
+ struct npc_key_field *etype_ether;
+ struct npc_key_field *etype_tag1;
+ struct npc_key_field *etype_tag2;
+ /* Outer VLAN TCI can come from two layers
+ * (single tagged, double tagged)
+ */
+ struct npc_key_field *vlan_tag1;
+ struct npc_key_field *vlan_tag2;
+ /* Inner VLAN TCI for double tagged frames */
+ struct npc_key_field *vlan_tag3;
+ u64 *features;
+ u8 start_lid;
+ int i;
+
+ key_fields = mcam->rx_key_fields;
+ features = &mcam->rx_features;
+
+ if (is_npc_intf_tx(intf)) {
+ key_fields = mcam->tx_key_fields;
+ features = &mcam->tx_features;
+ }
+
+ /* Handle header fields which can come from multiple layers like
+ * etype, outer vlan tci. These fields should have same position in
+ * the key otherwise to install a mcam rule more than one entry is
+ * needed which complicates mcam space management.
+ */
+ etype_ether = &key_fields[NPC_ETYPE_ETHER];
+ etype_tag1 = &key_fields[NPC_ETYPE_TAG1];
+ etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
+ vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
+ vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
+ vlan_tag3 = &key_fields[NPC_VLAN_TAG3];
+
+ /* if key profile programmed does not extract Ethertype at all */
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) {
+ dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n");
+ goto vlan_tci;
+ }
+
+ /* if key profile programmed extracts Ethertype from one layer */
+ if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_ether;
+ if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag2;
+
+ /* if key profile programmed extracts Ethertype from multiple layers */
+ if (etype_ether->nr_kws && etype_tag1->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n");
+ goto vlan_tci;
+ }
+ }
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ }
+ if (etype_ether->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n");
+ goto vlan_tci;
+ }
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+ if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n");
+ goto vlan_tci;
+ }
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+
+ /* check none of higher layers overwrite Ethertype */
+ start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) {
+ dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n");
+ goto vlan_tci;
+ }
+ *features |= BIT_ULL(NPC_ETYPE);
+vlan_tci:
+ /* if key profile does not extract outer vlan tci at all */
+ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) {
+ dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n");
+ goto done;
+ }
+
+ /* if key profile extracts outer vlan tci from one layer */
+ if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag1;
+ if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+
+ /* if key profile extracts outer vlan tci from multiple layers */
+ if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n");
+ goto done;
+ }
+ }
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+ }
+ /* check none of higher layers overwrite outer vlan tci */
+ start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) {
+ dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n");
+ goto done;
+ }
+ *features |= BIT_ULL(NPC_OUTER_VID);
+
+ /* If key profile extracts inner vlan tci */
+ if (vlan_tag3->nr_kws) {
+ key_fields[NPC_INNER_VID] = *vlan_tag3;
+ *features |= BIT_ULL(NPC_INNER_VID);
+ }
+done:
+ return;
+}
+
+static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ u8 lt, u64 cfg, u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 hdr, key, nr_bytes, bit_offset;
+ u8 la_ltype, la_start;
+ /* starting KW index and starting bit position */
+ int start_kwi, offset;
+
+ nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+
+ /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
+ * ethernet header.
+ */
+ if (is_npc_intf_tx(intf)) {
+ la_ltype = NPC_LT_LA_IH_NIX_ETHER;
+ la_start = 8;
+ } else {
+ la_ltype = NPC_LT_LA_ETHER;
+ la_start = 0;
+ }
+
+#define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \
+do { \
+ start_kwi = key / 8; \
+ offset = (key * 8) % 64; \
+ if (lid == (hlid) && lt == (hlt)) { \
+ if ((hstart) >= hdr && \
+ ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \
+ bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
+ npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
+ offset += bit_offset; \
+ start_kwi += offset / 64; \
+ offset %= 64; \
+ npc_set_kw_masks(mcam, (name), (hlen) * 8, \
+ start_kwi, offset, intf); \
+ } \
+ } \
+} while (0)
+
+ /* List LID, LTYPE, start offset from layer and length(in bytes) of
+ * packet header fields below.
+ * Example: Source IP is 4 bytes and starts at 12th byte of IP header
+ */
+ NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1);
+ NPC_SCAN_HDR(NPC_IPFRAG_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 6, 1);
+ NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
+ NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
+ NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1);
+ if (rvu->hw->cap.npc_hash_extract) {
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][0])
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 4);
+ else
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][1])
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 4);
+ else
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ } else {
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ }
+
+ NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG3, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6, 2);
+ NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
+
+ NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4);
+ NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4);
+
+ /* SMAC follows the DMAC(which is 6 bytes) */
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
+ /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
+ NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
+}
+
+static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *features = &mcam->rx_features;
+ u64 tcp_udp_sctp;
+ int hdr;
+
+ if (is_npc_intf_tx(intf))
+ features = &mcam->tx_features;
+
+ for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
+ *features |= BIT_ULL(hdr);
+ }
+
+ tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
+ BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
+
+ /* for tcp/udp/sctp corresponding layer type should be in the key */
+ if (*features & tcp_udp_sctp) {
+ if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features &= ~tcp_udp_sctp;
+ else
+ *features |= BIT_ULL(NPC_IPPROTO_TCP) |
+ BIT_ULL(NPC_IPPROTO_UDP) |
+ BIT_ULL(NPC_IPPROTO_SCTP);
+ }
+
+ /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) {
+ *features |= BIT_ULL(NPC_IPPROTO_AH);
+ *features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ *features |= BIT_ULL(NPC_IPPROTO_ICMP6);
+ }
+
+ /* for ESP, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_ESP);
+
+ /* for vlan corresponding layer type should be in the key */
+ if (*features & BIT_ULL(NPC_OUTER_VID))
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features &= ~BIT_ULL(NPC_OUTER_VID);
+
+ /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
+ (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
+ *features |= BIT_ULL(NPC_IPSEC_SPI);
+
+ /* for vlan ethertypes corresponding layer type should be in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
+ BIT_ULL(NPC_VLAN_ETYPE_STAG);
+
+ /* for L2M/L2B/L3M/L3B, check if the type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
+ *features |= BIT_ULL(NPC_LXMB);
+}
+
+/* Scan key extraction profile and record how fields of our interest
+ * fill the key structure. Also verify Channel and DMAC exists in
+ * key and not overwritten by other header fields.
+ */
+static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 lid, lt, ld, bitnr;
+ u64 cfg, masked_cfg;
+ u8 key_nibble = 0;
+
+ /* Scan and note how parse result is going to be in key.
+ * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
+ * parse result in the key. The enabled nibbles from parse result
+ * will be concatenated in key.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
+ masked_cfg = cfg & NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) {
+ npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
+ /* Ignore exact match bits for mcam entries except the first rule
+ * which is drop on hit. This first rule is configured explitcitly by
+ * exact match code.
+ */
+ masked_cfg = cfg & NPC_EXACT_NIBBLE;
+ bitnr = NPC_EXACT_NIBBLE_START;
+ for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) {
+ npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
+ /* Scan and note how layer data is going to be in key */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ npc_scan_ldata(rvu, blkaddr, lid, lt, cfg,
+ intf);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX);
+ if (err)
+ return err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX);
+ if (err)
+ return err;
+
+ /* Channel is mandatory */
+ if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite channel */
+ if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel cannot be overwritten\n");
+ return -EINVAL;
+ }
+
+ npc_set_features(rvu, blkaddr, NIX_INTF_TX);
+ npc_set_features(rvu, blkaddr, NIX_INTF_RX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX);
+
+ return 0;
+}
+
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ INIT_LIST_HEAD(&mcam->mcam_rules);
+
+ return npc_scan_verify_kex(rvu, blkaddr);
+}
+
+static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *mcam_features = &mcam->rx_features;
+ u64 unsupported;
+ u8 bit;
+
+ if (is_npc_intf_tx(intf))
+ mcam_features = &mcam->tx_features;
+
+ unsupported = (*mcam_features ^ features) & ~(*mcam_features);
+ if (unsupported) {
+ dev_warn(rvu->dev, "Unsupported flow(s):\n");
+ for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
+ dev_warn(rvu->dev, "%s ", npc_get_field_name(bit));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* npc_update_entry - Based on the masks generated during
+ * the key scanning, updates the given entry with value and
+ * masks for the field of interest. Maximum 16 bytes of a packet
+ * header can be extracted by HW hence lo and hi are sufficient.
+ * When field bytes are less than or equal to 8 then hi should be
+ * 0 for value and mask.
+ *
+ * If exact match of value is required then mask should be all 1's.
+ * If any bits in mask are 0 then corresponding bits in value are
+ * dont care.
+ */
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry dummy = { {0} };
+ struct npc_key_field *field;
+ u64 kw1, kw2, kw3;
+ u8 shift;
+ int i;
+
+ field = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (!field->nr_kws)
+ return;
+
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ /* place key value in kw[x] */
+ shift = __ffs64(field->kw_mask[i]);
+ /* update entry value */
+ kw1 = (val_lo << shift) & field->kw_mask[i];
+ dummy.kw[i] = kw1;
+ /* update entry mask */
+ kw1 = (mask_lo << shift) & field->kw_mask[i];
+ dummy.kw_mask[i] = kw1;
+
+ if (field->nr_kws == 1)
+ break;
+ /* place remaining bits of key value in kw[x + 1] */
+ if (field->nr_kws == 2) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw[i + 1] = kw2;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw_mask[i + 1] = kw2;
+ break;
+ }
+ /* place remaining bits of key value in kw[x + 1], kw[x + 2] */
+ if (field->nr_kws == 3) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? val_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw[i + 1] = kw2;
+ dummy.kw[i + 2] = kw3;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? mask_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw_mask[i + 1] = kw2;
+ dummy.kw_mask[i + 2] = kw3;
+ break;
+ }
+ }
+ /* dummy is ready with values and masks for given key
+ * field now clear and update input entry with those
+ */
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ entry->kw[i] &= ~field->kw_mask[i];
+ entry->kw_mask[i] &= ~field->kw_mask[i];
+
+ entry->kw[i] |= dummy.kw[i];
+ entry->kw_mask[i] |= dummy.kw_mask[i];
+ }
+}
+
+static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS];
+ u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS];
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+ u64 mask_lo, mask_hi;
+ u64 val_lo, val_hi;
+
+ /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet
+ * values to be programmed in MCAM should as below:
+ * val_high: 0xfe80000000000000
+ * val_low: 0x2c6863fffe5e2d0a
+ */
+ if (features & BIT_ULL(NPC_SIP_IPV6)) {
+ be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS);
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+
+ mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1];
+ mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3];
+ val_hi = (u64)src_ip[0] << 32 | src_ip[1];
+ val_lo = (u64)src_ip[2] << 32 | src_ip[3];
+
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src));
+ memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src));
+ }
+ if (features & BIT_ULL(NPC_DIP_IPV6)) {
+ be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS);
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+
+ mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1];
+ mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3];
+ val_hi = (u64)dst_ip[0] << 32 | dst_ip[1];
+ val_lo = (u64)dst_ip[2] << 32 | dst_ip[3];
+
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst));
+ memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst));
+ }
+}
+
+static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, u8 intf)
+{
+ bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG));
+ bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG));
+ bool vid = !!(features & BIT_ULL(NPC_OUTER_VID));
+
+ /* If only VLAN id is given then always match outer VLAN id */
+ if (vid && !ctag && !stag) {
+ npc_update_entry(rvu, NPC_LB, entry,
+ NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
+ NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+ return;
+ }
+ if (ctag)
+ npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0,
+ ~0ULL, 0, intf);
+ if (stag)
+ npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0,
+ ~0ULL, 0, intf);
+}
+
+static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf,
+ int blkaddr)
+{
+ u64 dmac_mask = ether_addr_to_u64(mask->dmac);
+ u64 smac_mask = ether_addr_to_u64(mask->smac);
+ u64 dmac_val = ether_addr_to_u64(pkt->dmac);
+ u64 smac_val = ether_addr_to_u64(pkt->smac);
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+
+ if (!features)
+ return;
+
+ /* For tcp/udp/sctp LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_TCP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_UDP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_SCTP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_ICMP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_ICMP6))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6,
+ 0, ~0ULL, 0, intf);
+
+ /* For AH, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_AH))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH,
+ 0, ~0ULL, 0, intf);
+ /* For ESP, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_ESP))
+ npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
+ 0, ~0ULL, 0, intf);
+
+ if (features & BIT_ULL(NPC_LXMB)) {
+ output->lxmb = is_broadcast_ether_addr(pkt->dmac) ? 2 : 1;
+ npc_update_entry(rvu, NPC_LXMB, entry, output->lxmb, 0,
+ output->lxmb, 0, intf);
+ }
+#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
+do { \
+ if (features & BIT_ULL((field))) { \
+ npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \
+ (mask_lo), (mask_hi), intf); \
+ memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \
+ memcpy(&omask->member, &mask->member, sizeof(mask->member)); \
+ } \
+} while (0)
+
+ NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+
+ NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
+ NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
+ ntohs(mask->etype), 0);
+ NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0);
+ NPC_WRITE_FLOW(NPC_IPFRAG_IPV4, ip_flag, pkt->ip_flag, 0,
+ mask->ip_flag, 0);
+ NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
+ ntohl(mask->ip4src), 0);
+ NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
+ ntohl(mask->ip4dst), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+
+ NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
+ ntohl(mask->spi), 0);
+
+ NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
+ ntohs(mask->vlan_tci), 0);
+ NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0,
+ ntohs(mask->vlan_itci), 0);
+
+ NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0,
+ mask->next_header, 0);
+ npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
+ npc_update_vlan_features(rvu, entry, features, intf);
+
+ npc_update_field_hash(rvu, intf, entry, blkaddr, features,
+ pkt, mask, opkt, omask);
+}
+
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry)
+{
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry == entry) {
+ mutex_unlock(&mcam->lock);
+ return iter;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ return NULL;
+}
+
+static void rvu_mcam_add_rule(struct npc_mcam *mcam,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct list_head *head = &mcam->mcam_rules;
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry > rule->entry)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&rule->list, head);
+ mutex_unlock(&mcam->lock);
+}
+
+static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct npc_mcam_oper_counter_req free_req = { 0 };
+ struct msg_rsp free_rsp;
+
+ if (!rule->has_cntr)
+ return;
+
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.cntr = rule->cntr;
+
+ rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
+ rule->has_cntr = false;
+}
+
+static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ int err;
+
+ cntr_req.hdr.pcifunc = pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ /* we try to allocate a counter to track the stats of this
+ * rule. If counter could not be allocated then proceed
+ * without counter because counters are limited than entries.
+ */
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
+ &cntr_rsp);
+ if (!err && cntr_rsp.count) {
+ rule->cntr = cntr_rsp.cntr;
+ rule->has_cntr = true;
+ rsp->counter = rule->cntr;
+ } else {
+ rsp->counter = err;
+ }
+}
+
+static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req,
+ u16 target, bool pf_set_vfs_mac)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct nix_rx_action action;
+
+ if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
+ req->chan_mask = 0x0; /* Do not care channel */
+
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask,
+ 0, NIX_INTF_RX);
+
+ *(u64 *)&action = 0x00;
+ action.pf_func = target;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+ action.flow_key_alg = req->flow_key_alg;
+
+ if (req->op == NIX_RX_ACTION_DEFAULT) {
+ if (pfvf->def_ucast_rule) {
+ action = pfvf->def_ucast_rule->rx_action;
+ } else {
+ /* For profiles which do not extract DMAC, the default
+ * unicast entry is unused. Hence modify action for the
+ * requests which use same action as default unicast
+ * entry
+ */
+ *(u64 *)&action = 0;
+ action.pf_func = target;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ }
+ }
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) |
+ FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) |
+ FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) |
+ FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) |
+ FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
+ FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
+}
+
+static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
+{
+ struct nix_tx_action action;
+ u64 mask = ~0ULL;
+
+ /* If AF is installing then do not care about
+ * PF_FUNC in Send Descriptor
+ */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ mask = 0;
+
+ npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
+ 0, mask, 0, NIX_INTF_TX);
+
+ *(u64 *)&action = 0x00;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) |
+ FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) |
+ FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) |
+ FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) |
+ FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
+ FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
+}
+
+static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
+ int nixlf, struct rvu_pfvf *pfvf,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp, bool enable,
+ bool pf_set_vfs_mac)
+{
+ struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule;
+ u64 features, installed_features, missing_features = 0;
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule dummy = { 0 };
+ struct rvu_npc_mcam_rule *rule;
+ u16 owner = req->hdr.pcifunc;
+ struct msg_rsp write_rsp;
+ struct mcam_entry *entry;
+ bool new = false;
+ u16 entry_index;
+ int err;
+
+ installed_features = req->features;
+ features = req->features;
+ entry = &write_req.entry_data;
+ entry_index = req->entry;
+
+ npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
+ req->intf, blkaddr);
+
+ if (is_npc_intf_rx(req->intf))
+ npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
+ else
+ npc_update_tx_entry(rvu, pfvf, entry, req, target);
+
+ /* Default unicast rules do not exist for TX */
+ if (is_npc_intf_tx(req->intf))
+ goto find_rule;
+
+ if (req->default_rule) {
+ entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf,
+ NIXLF_UCAST_ENTRY);
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index);
+ }
+
+ /* update mcam entry with default unicast rule attributes */
+ if (def_ucast_rule && (req->default_rule && req->append)) {
+ missing_features = (def_ucast_rule->features ^ features) &
+ def_ucast_rule->features;
+ if (missing_features)
+ npc_update_flow(rvu, entry, missing_features,
+ &def_ucast_rule->packet,
+ &def_ucast_rule->mask,
+ &dummy, req->intf,
+ blkaddr);
+ installed_features = req->features | missing_features;
+ }
+
+find_rule:
+ rule = rvu_mcam_find_rule(mcam, entry_index);
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+ new = true;
+ }
+
+ /* allocate new counter if rule has no counter */
+ if (!req->default_rule && req->set_cntr && !rule->has_cntr)
+ rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
+
+ /* if user wants to delete an existing counter for a rule then
+ * free the counter
+ */
+ if (!req->set_cntr && rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+
+ write_req.hdr.pcifunc = owner;
+
+ /* AF owns the default rules so change the owner just to relax
+ * the checks in rvu_mbox_handler_npc_mcam_write_entry
+ */
+ if (req->default_rule)
+ write_req.hdr.pcifunc = 0;
+
+ write_req.entry = entry_index;
+ write_req.intf = req->intf;
+ write_req.enable_entry = (u8)enable;
+ /* if counter is available then clear and use it */
+ if (req->set_cntr && rule->has_cntr) {
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val);
+ write_req.set_cntr = 1;
+ write_req.cntr = rule->cntr;
+ }
+
+ /* update rule */
+ memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
+ memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
+ rule->entry = entry_index;
+ memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action));
+ if (is_npc_intf_tx(req->intf))
+ memcpy(&rule->tx_action, &entry->action,
+ sizeof(struct nix_tx_action));
+ rule->vtag_action = entry->vtag_action;
+ rule->features = installed_features;
+ rule->default_rule = req->default_rule;
+ rule->owner = owner;
+ rule->enable = enable;
+ rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ rule->chan &= rule->chan_mask;
+ rule->lxmb = dummy.lxmb;
+ if (is_npc_intf_tx(req->intf))
+ rule->intf = pfvf->nix_tx_intf;
+ else
+ rule->intf = pfvf->nix_rx_intf;
+
+ if (new)
+ rvu_mcam_add_rule(mcam, rule);
+ if (req->default_rule)
+ pfvf->def_ucast_rule = rule;
+
+ /* write to mcam entry registers */
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ return err;
+ }
+
+ /* VF's MAC address is being changed via PF */
+ if (pf_set_vfs_mac) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ }
+
+ if (test_bit(PF_SET_VF_CFG, &pfvf->flags) &&
+ req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ rule->vfvlan_cfg = true;
+
+ if (is_npc_intf_rx(req->intf) && req->match_id &&
+ (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS))
+ return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
+ req->index, req->match_id);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp)
+{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+ bool pf_set_vfs_mac = false;
+ bool enable = true;
+ u16 target;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return NPC_MCAM_INVALID_REQ;
+ }
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_FLOW_INTF_INVALID;
+
+ /* If DMAC is not extracted in MKEX, rules installed by AF
+ * can rely on L2MB bit set by hardware protocol checker for
+ * broadcast and multicast addresses.
+ */
+ if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf))
+ goto process_flow;
+
+ if (is_pffunc_af(req->hdr.pcifunc) &&
+ req->features & BIT_ULL(NPC_DMAC)) {
+ if (is_unicast_ether_addr(req->packet.dmac)) {
+ dev_warn(rvu->dev,
+ "%s: mkex profile does not support ucast flow\n",
+ __func__);
+ return NPC_FLOW_NOT_SUPPORTED;
+ }
+
+ if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) {
+ dev_warn(rvu->dev,
+ "%s: mkex profile does not support bcast/mcast flow",
+ __func__);
+ return NPC_FLOW_NOT_SUPPORTED;
+ }
+
+ /* Modify feature to use LXMB instead of DMAC */
+ req->features &= ~BIT_ULL(NPC_DMAC);
+ req->features |= BIT_ULL(NPC_LXMB);
+ }
+
+process_flow:
+ if (from_vf && req->default_rule)
+ return NPC_FLOW_VF_PERM_DENIED;
+
+ /* Each PF/VF info is maintained in struct rvu_pfvf.
+ * rvu_pfvf for the target PF/VF needs to be retrieved
+ * hence modify pcifunc accordingly.
+ */
+
+ /* AF installing for a PF/VF */
+ if (!req->hdr.pcifunc)
+ target = req->vf;
+ /* PF installing for its VF */
+ else if (!from_vf && req->vf) {
+ target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
+ pf_set_vfs_mac = req->default_rule &&
+ (req->features & BIT_ULL(NPC_DMAC));
+ }
+ /* msg received from PF/VF */
+ else
+ target = req->hdr.pcifunc;
+
+ /* ignore chan_mask in case pf func is not AF, revisit later */
+ if (!is_pffunc_af(req->hdr.pcifunc))
+ req->chan_mask = 0xFFF;
+
+ err = npc_check_unsupported_flows(rvu, req->features, req->intf);
+ if (err)
+ return NPC_FLOW_NOT_SUPPORTED;
+
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ /* PF installing for its VF */
+ if (req->hdr.pcifunc && !from_vf && req->vf)
+ set_bit(PF_SET_VF_CFG, &pfvf->flags);
+
+ /* update req destination mac addr */
+ if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
+ is_zero_ether_addr(req->packet.dmac)) {
+ ether_addr_copy(req->packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ }
+
+ /* Proceed if NIXLF is attached or not for TX rules */
+ err = nix_get_nixlf(rvu, target, &nixlf, NULL);
+ if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
+ return NPC_FLOW_NO_NIXLF;
+
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ enable = false;
+
+ /* Packets reaching NPC in Tx path implies that a
+ * NIXLF is properly setup and transmitting.
+ * Hence rules can be enabled for Tx.
+ */
+ if (is_npc_intf_tx(req->intf))
+ enable = true;
+
+ /* Do not allow requests from uninitialized VFs */
+ if (from_vf && !enable)
+ return NPC_FLOW_VF_NOT_INIT;
+
+ /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */
+ if (pf_set_vfs_mac && !enable) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ return 0;
+ }
+
+ mutex_lock(&rswitch->switch_lock);
+ err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf,
+ req, rsp, enable, pf_set_vfs_mac);
+ mutex_unlock(&rswitch->switch_lock);
+
+ return err;
+}
+
+static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
+ u16 pcifunc)
+{
+ struct npc_mcam_ena_dis_entry_req dis_req = { 0 };
+ struct msg_rsp dis_rsp;
+
+ if (rule->default_rule)
+ return 0;
+
+ if (rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
+
+ dis_req.hdr.pcifunc = pcifunc;
+ dis_req.entry = rule->entry;
+
+ list_del(&rule->list);
+ kfree(rule);
+
+ return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp);
+}
+
+int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ struct npc_delete_flow_req *req,
+ struct npc_delete_flow_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *iter, *tmp;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct list_head del_list;
+ int blkaddr;
+
+ INIT_LIST_HEAD(&del_list);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
+ if (iter->owner == pcifunc) {
+ /* All rules */
+ if (req->all) {
+ list_move_tail(&iter->list, &del_list);
+ /* Range of rules */
+ } else if (req->end && iter->entry >= req->start &&
+ iter->entry <= req->end) {
+ list_move_tail(&iter->list, &del_list);
+ /* single rule */
+ } else if (req->entry == iter->entry) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr)
+ rsp->cntr_val = rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(iter->cntr));
+ list_move_tail(&iter->list, &del_list);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ list_for_each_entry_safe(iter, tmp, &del_list, list) {
+ u16 entry = iter->entry;
+
+ /* clear the mcam entry target pcifunc */
+ mcam->entry2target_pffunc[entry] = 0x0;
+ if (npc_delete_flow(rvu, iter, pcifunc))
+ dev_err(rvu->dev, "rule deletion failed for entry:%u",
+ entry);
+ }
+
+ return 0;
+}
+
+static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr,
+ struct rvu_npc_mcam_rule *rule,
+ struct rvu_pfvf *pfvf)
+{
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct mcam_entry *entry = &write_req.entry_data;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct msg_rsp rsp;
+ u8 intf, enable;
+ int err;
+
+ ether_addr_copy(rule->packet.dmac, pfvf->mac_addr);
+
+ npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry,
+ entry, &intf, &enable);
+
+ npc_update_entry(rvu, NPC_DMAC, entry,
+ ether_addr_to_u64(pfvf->mac_addr), 0,
+ 0xffffffffffffull, 0, intf);
+
+ write_req.hdr.pcifunc = rule->owner;
+ write_req.entry = rule->entry;
+ write_req.intf = pfvf->nix_rx_intf;
+
+ mutex_unlock(&mcam->lock);
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp);
+ mutex_lock(&mcam->lock);
+
+ return err;
+}
+
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target);
+ struct rvu_npc_mcam_rule *def_ucast_rule;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr, bank, index;
+ u64 def_action;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ def_ucast_rule = pfvf->def_ucast_rule;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == target && !rule->enable) {
+ if (rule->default_rule) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ continue;
+ }
+
+ if (rule->vfvlan_cfg)
+ npc_update_dmac_value(rvu, blkaddr, rule, pfvf);
+
+ if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
+ if (!def_ucast_rule)
+ continue;
+ /* Use default unicast entry action */
+ rule->rx_action = def_ucast_rule->rx_action;
+ def_action = *(u64 *)&def_ucast_rule->rx_action;
+ bank = npc_get_bank(mcam, rule->entry);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION
+ (rule->entry, bank), def_action);
+ }
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ }
+ }
+
+ /* Enable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, true);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+ /* Disable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, false);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+/* single drop on non hit rule starting from 0th index. This an extension
+ * to RPM mac filter to support more rules.
+ */
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ struct npc_mcam_write_entry_req req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ struct msg_rsp rsp;
+ bool enabled;
+ int blkaddr;
+ int err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Bail out if no exact match support */
+ if (!rvu_npc_exact_has_match_table(rvu)) {
+ dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__);
+ return -EINVAL;
+ }
+
+ /* If 0th entry is already used, return err */
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx);
+ if (enabled) {
+ dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n",
+ __func__, mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Add this entry to mcam rules list */
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ /* Disable rule by default. Enable rule when first dmac filter is
+ * installed
+ */
+ rule->enable = false;
+ rule->chan = chan_val;
+ rule->chan_mask = chan_mask;
+ rule->entry = mcam_idx;
+ rvu_mcam_add_rule(mcam, rule);
+
+ /* Reserve slot 0 */
+ npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx);
+
+ /* Allocate counter for this single drop on non hit rule */
+ cntr_req.hdr.pcifunc = 0; /* AF request */
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n",
+ __func__, err);
+ return -EFAULT;
+ }
+ *counter_idx = cntr_rsp.cntr;
+
+ /* Fill in fields for this mcam entry */
+ npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0,
+ exact_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0,
+ chan_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0,
+ bcast_mcast_mask, 0, NIX_INTF_RX);
+
+ req.intf = NIX_INTF_RX;
+ req.set_cntr = true;
+ req.cntr = cntr_rsp.cntr;
+ req.entry = mcam_idx;
+
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n",
+ __func__, mcam_idx);
+ return err;
+ }
+
+ dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n",
+ __func__, mcam_idx, req.cntr);
+
+ /* disable entry at Bank 0, index 0 */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu,
+ struct npc_get_field_status_req *req,
+ struct npc_get_field_status_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_FLOW_INTF_INVALID;
+
+ if (npc_check_field(rvu, blkaddr, req->field, req->intf))
+ rsp->enable = 1;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
new file mode 100644
index 000000000..3f5c9042d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_FS_H
+#define __RVU_NPC_FS_H
+
+#define IPV6_WORDS 4
+#define NPC_BYTESM GENMASK_ULL(19, 16)
+#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
+#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
+#define NPC_LDATA_EN BIT_ULL(7)
+
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf);
+
+#endif /* RVU_NPC_FS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
new file mode 100644
index 000000000..d2661e7fa
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -0,0 +1,2036 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
+#include <linux/debugfs.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
+
+static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
+ size_t width_bits)
+{
+ const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
+ const size_t msb = start_bit + width_bits - 1;
+ const size_t lword = start_bit >> 6;
+ const size_t uword = msb >> 6;
+ size_t lbits;
+ u64 hi, lo;
+
+ if (lword == uword)
+ return (input[lword] >> (start_bit & 63)) & mask;
+
+ lbits = 64 - (start_bit & 63);
+ hi = input[uword];
+ lo = (input[lword] >> (start_bit & 63));
+ return ((hi << lbits) | lo) & mask;
+}
+
+static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
+{
+ u64 prev_orig_word = 0;
+ u64 cur_orig_word = 0;
+ size_t extra = key_bit_len % 64;
+ size_t max_idx = key_bit_len / 64;
+ size_t i;
+
+ if (extra)
+ max_idx++;
+
+ for (i = 0; i < max_idx; i++) {
+ cur_orig_word = key[i];
+ key[i] = key[i] << 1;
+ key[i] |= ((prev_orig_word >> 63) & 0x1);
+ prev_orig_word = cur_orig_word;
+ }
+}
+
+static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
+ size_t key_bit_len)
+{
+ u32 hash_out = 0;
+ u64 temp_data = 0;
+ int i;
+
+ for (i = data_bit_len - 1; i >= 0; i--) {
+ temp_data = (data[i / 64]);
+ temp_data = temp_data >> (i % 64);
+ temp_data &= 0x1;
+ if (temp_data)
+ hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
+
+ rvu_npc_lshift_key(key, key_bit_len);
+ }
+
+ return hash_out;
+}
+
+u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
+ u8 intf, u8 hash_idx)
+{
+ u64 hash_key[3];
+ u64 data_padded[2];
+ u32 field_hash;
+
+ hash_key[0] = rsp.secret_key[1] << 31;
+ hash_key[0] |= rsp.secret_key[2];
+ hash_key[1] = rsp.secret_key[1] >> 33;
+ hash_key[1] |= rsp.secret_key[0] << 31;
+ hash_key[2] = rsp.secret_key[0] >> 33;
+
+ data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0];
+ data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1];
+ field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
+
+ field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]);
+ field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]);
+ return field_hash;
+}
+
+static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr,
+ u8 intf, int lid, int lt, int ld)
+{
+ u8 hdr, key;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld));
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+
+ /* Update use_hash(bit-20) to 'true' and
+ * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG
+ */
+ cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
+ hdr, 0x1, 0x0, key);
+
+ return cfg;
+}
+
+static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_tx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg;
+
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ cfg = npc_update_use_hash(rvu, blkaddr,
+ intf, lid, lt, ld);
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+
+ hash_cnt++;
+ }
+ }
+ }
+ }
+}
+
+static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg;
+
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ cfg = npc_update_use_hash(rvu, blkaddr,
+ intf, lid, lt, ld);
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+ hash_cnt++;
+ }
+ }
+ }
+}
+
+void npc_config_secret_key(struct rvu *rvu, int blkaddr)
+{
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+
+ if (!hwcap->npc_hash_extract)
+ return;
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
+ RVU_NPC_HASH_SECRET_KEY0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
+ RVU_NPC_HASH_SECRET_KEY1);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
+ RVU_NPC_HASH_SECRET_KEY2);
+ }
+}
+
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash;
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ u8 intf, ld, hdr_offset, byte_len;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 cfg;
+
+ /* Check if hardware supports hash extraction */
+ if (!hwcap->npc_hash_extract)
+ return;
+
+ /* Check if IPv6 source/destination address
+ * should be hash enabled.
+ * Hashing reduces 128bit SIP/DIP fields to 32bit
+ * so that 224 bit X2 key can be used for IPv6 based filters as well,
+ * which in turn results in more number of MCAM entries available for
+ * use.
+ *
+ * Hashing of IPV6 SIP/DIP is enabled in below scenarios
+ * 1. If the silicon variant supports hashing feature
+ * 2. If the number of bytes of IP addr being extracted is 4 bytes ie
+ * 32bit. The assumption here is that if user wants 8bytes of LSB of
+ * IP addr or full 16 bytes then his intention is not to use 32bit
+ * hash.
+ */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf,
+ NPC_LID_LC,
+ NPC_LT_LC_IP6,
+ ld));
+ hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ byte_len = FIELD_GET(NPC_BYTESM, cfg);
+ /* Hashing of IPv6 source/destination address should be
+ * enabled if,
+ * hdr_offset == 8 (offset of source IPv6 address) or
+ * hdr_offset == 24 (offset of destination IPv6)
+ * address) and the number of byte to be
+ * extracted is 4. As per hardware configuration
+ * byte_len should be == actual byte_len - 1.
+ * Hence byte_len is checked against 3 but nor 4.
+ */
+ if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3)
+ mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true;
+ }
+ }
+
+ /* Update hash configuration if the field is hash enabled */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_hash_rx(rvu, blkaddr, intf);
+ npc_program_mkex_hash_tx(rvu, blkaddr, intf);
+ }
+}
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ struct npc_get_field_hash_info_req req;
+ struct npc_get_field_hash_info_rsp rsp;
+ u64 ldata[2], cfg;
+ u32 field_hash;
+ u8 hash_idx;
+
+ if (!rvu->hw->cap.npc_hash_extract) {
+ dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
+ return;
+ }
+
+ req.intf = intf;
+ rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp);
+
+ for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
+ if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
+ u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
+ u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
+ u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
+
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
+ switch (ltype & ltype_mask) {
+ /* If hash extract enabled is supported for IPv6 then
+ * 128 bit IPv6 source and destination addressed
+ * is hashed to 32 bit value.
+ */
+ case NPC_LT_LC_IP6:
+ /* ld[0] == hash_idx[0] == Source IPv6
+ * ld[1] == hash_idx[1] == Destination IPv6
+ */
+ if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
+ u32 src_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+ ldata[1] = (u64)src_ip[0] << 32 | src_ip[1];
+ ldata[0] = (u64)src_ip[2] << 32 | src_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ rsp,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry,
+ field_hash, 0,
+ GENMASK(31, 0), 0, intf);
+ memcpy(&opkt->ip6src, &pkt->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&omask->ip6src, &mask->ip6src,
+ sizeof(mask->ip6src));
+ } else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) {
+ u32 dst_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+ ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1];
+ ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ rsp,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry,
+ field_hash, 0,
+ GENMASK(31, 0), 0, intf);
+ memcpy(&opkt->ip6dst, &pkt->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&omask->ip6dst, &mask->ip6dst,
+ sizeof(mask->ip6dst));
+ }
+
+ break;
+ }
+ }
+ }
+ }
+}
+
+int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
+ struct npc_get_field_hash_info_req *req,
+ struct npc_get_field_hash_info_rsp *rsp)
+{
+ u64 *secret_key = rsp->secret_key;
+ u8 intf = req->intf;
+ int i, j, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
+ secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
+ secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
+
+ for (i = 0; i < NPC_MAX_HASH; i++) {
+ for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
+ rsp->hash_mask[NIX_INTF_RX][i][j] =
+ GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
+ rsp->hash_mask[NIX_INTF_TX][i][j] =
+ GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
+ }
+ }
+
+ for (i = 0; i < NPC_MAX_INTF; i++)
+ for (j = 0; j < NPC_MAX_HASH; j++)
+ rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
+
+ return 0;
+}
+
+/**
+ * rvu_exact_prepare_mdata - Make mdata for mcam entry
+ * @mac: MAC address
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @mask: LDATA mask.
+ * Return: Meta data
+ */
+static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
+{
+ u64 ldata = ether_addr_to_u64(mac);
+
+ /* Please note that mask is 48bit which excludes chan and ctype.
+ * Increase mask bits if we need to include them as well.
+ */
+ ldata |= ((u64)chan << 48);
+ ldata |= ((u64)ctype << 60);
+ ldata &= mask;
+ ldata = ldata << 2;
+
+ return ldata;
+}
+
+/**
+ * rvu_exact_calculate_hash - calculate hash index to mem table.
+ * @rvu: resource virtualization unit.
+ * @chan: Channel number
+ * @ctype: Channel type.
+ * @mac: MAC address
+ * @mask: HASH mask.
+ * @table_depth: Depth of table.
+ * Return: Hash value
+ */
+static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
+ u64 mask, u32 table_depth)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ u64 hash_key[2];
+ u64 key_in[2];
+ u64 ldata;
+ u32 hash;
+
+ key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
+ key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
+
+ hash_key[0] = key_in[0] << 31;
+ hash_key[0] |= key_in[1];
+ hash_key[1] = key_in[0] >> 33;
+
+ ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
+
+ dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
+ ldata, hash_key[1], hash_key[0]);
+ hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
+
+ hash &= table->mem_table.hash_mask;
+ hash += table->mem_table.hash_offset;
+ dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash);
+
+ return hash;
+}
+
+/**
+ * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
+ * @rvu: resource virtualization unit.
+ * @way: Indicate way to table.
+ * @index: Hash index to 4 way table.
+ * @hash: Hash value.
+ *
+ * Searches 4 way table using hash index. Returns 0 on success.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
+ u32 *index, unsigned int hash)
+{
+ struct npc_exact_table *table;
+ int depth, i;
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ /* Check all the 4 ways for a free slot. */
+ mutex_lock(&table->lock);
+ for (i = 0; i < table->mem_table.ways; i++) {
+ if (test_bit(hash + i * depth, table->mem_table.bmap))
+ continue;
+
+ set_bit(hash + i * depth, table->mem_table.bmap);
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
+ __func__, i, hash);
+
+ *way = i;
+ *index = hash;
+ return 0;
+ }
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
+ bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_free_id - Free seq id from bitmat.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier to be freed.
+ */
+static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ mutex_lock(&table->lock);
+ clear_bit(seq_id, table->id_bmap);
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
+}
+
+/**
+ * rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ * Return: True or false.
+ */
+static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
+ if (idx == table->tot_ids) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
+ __func__, table->tot_ids);
+
+ return false;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->id_bmap);
+ mutex_unlock(&table->lock);
+
+ *seq_id = idx;
+ dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
+ * @rvu: resource virtualization unit.
+ * @index: Index to exact CAM table.
+ * Return: 0 upon success; else error number.
+ */
+static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
+ if (idx == table->cam_table.depth) {
+ mutex_unlock(&table->lock);
+ dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
+ bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
+ return -ENOSPC;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->cam_table.bmap);
+ mutex_unlock(&table->lock);
+
+ *index = idx;
+ dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
+ __func__, idx);
+ return 0;
+}
+
+/**
+ * rvu_exact_prepare_table_entry - Data for exact match table entry.
+ * @rvu: Resource virtualization unit.
+ * @enable: Enable/Disable entry
+ * @ctype: Software defined channel type. Currently set as 0.
+ * @chan: Channel number.
+ * @mac_addr: Destination mac address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
+ u8 ctype, u16 chan, u8 *mac_addr)
+
+{
+ u64 ldata = ether_addr_to_u64(mac_addr);
+
+ /* Enable or disable */
+ u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
+
+ /* Set Ctype */
+ mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
+
+ /* Set chan */
+ mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
+
+ /* MAC address */
+ mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
+
+ return mdata;
+}
+
+/**
+ * rvu_exact_config_secret_key - Configure secret key.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_secret_key(struct rvu *rvu)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY1);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY2);
+}
+
+/**
+ * rvu_exact_config_search_key - Configure search key
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_search_key(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg_val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* HDR offset */
+ reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
+
+ /* BYTESM1, number of bytes - 1 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
+
+ /* Enable LID and set LID to NPC_LID_LA */
+ reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
+ reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA);
+
+ /* Clear layer type based extraction */
+
+ /* Disable LT_EN */
+ reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
+
+ /* Set LTYPE_MATCH to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
+
+ /* Set LTYPE_MASK to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
+}
+
+/**
+ * rvu_exact_config_result_ctrl - Set exact table hash control
+ * @rvu: Resource virtualization unit.
+ * @depth: Depth of Exact match table.
+ *
+ * Sets mask and offset for hash for mem table.
+ */
+static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
+{
+ int blkaddr;
+ u64 reg = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Set mask. Note that depth is a power of 2 */
+ rvu->hw->table->mem_table.hash_mask = (depth - 1);
+ reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
+
+ /* Set offset as 0 */
+ rvu->hw->table->mem_table.hash_offset = 0;
+ reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
+
+ /* Set reg for RX */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
+ /* Store hash mask and offset for s/w algorithm */
+}
+
+/**
+ * rvu_exact_config_table_mask - Set exact table mask.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_table_mask(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 mask = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Don't use Ctype */
+ mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
+
+ /* Set chan */
+ mask |= GENMASK_ULL(59, 48);
+
+ /* Full ldata */
+ mask |= GENMASK_ULL(47, 0);
+
+ /* Store mask for s/w hash calcualtion */
+ rvu->hw->table->mem_table.mask = mask;
+
+ /* Set mask for RX.*/
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
+}
+
+/**
+ * rvu_npc_exact_get_max_entries - Get total number of entries in table.
+ * @rvu: resource virtualization unit.
+ * Return: Maximum table entries possible.
+ */
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ return table->tot_ids;
+}
+
+/**
+ * rvu_npc_exact_has_match_table - Checks support for exact match.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match table is supported/enabled.
+ */
+bool rvu_npc_exact_has_match_table(struct rvu *rvu)
+{
+ return rvu->hw->cap.npc_exact_match_enabled;
+}
+
+/**
+ * __rvu_npc_exact_find_entry_by_seq_id - find entry by id
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ *
+ * Caller should acquire the lock.
+ * Return: Pointer to table entry.
+ */
+static struct npc_exact_table_entry *
+__rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *entry = NULL;
+ struct list_head *lhead;
+
+ lhead = &table->lhead_gbl;
+
+ /* traverse to find the matching entry */
+ list_for_each_entry(entry, lhead, glist) {
+ if (entry->seq_id != seq_id)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * rvu_npc_exact_add_to_list - Add entry to list
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE to select MEM/CAM table.
+ * @ways: MEM table ways.
+ * @index: Index in MEM/CAM table.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @mac_addr: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence identifier
+ * @cmd: True if function is called by ethtool cmd
+ * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
+ * @pcifunc: pci function
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
+ u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
+ u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
+{
+ struct npc_exact_table_entry *entry, *tmp, *iter;
+ struct npc_exact_table *table = rvu->hw->table;
+ struct list_head *lhead, *pprev;
+
+ WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
+
+ if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
+ dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
+ return -EFAULT;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rvu_npc_exact_free_id(rvu, *seq_id);
+ dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&table->lock);
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+ lhead = &table->lhead_cam_tbl_entry;
+ table->cam_tbl_entry_cnt++;
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+ lhead = &table->lhead_mem_tbl_entry[ways];
+ table->mem_tbl_entry_cnt++;
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ kfree(entry);
+ rvu_npc_exact_free_id(rvu, *seq_id);
+
+ dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
+ return -EINVAL;
+ }
+
+ /* Add to global list */
+ INIT_LIST_HEAD(&entry->glist);
+ list_add_tail(&entry->glist, &table->lhead_gbl);
+ INIT_LIST_HEAD(&entry->list);
+ entry->index = index;
+ entry->ways = ways;
+ entry->opc_type = opc_type;
+
+ entry->pcifunc = pcifunc;
+
+ ether_addr_copy(entry->mac, mac_addr);
+ entry->chan = chan;
+ entry->ctype = ctype;
+ entry->cgx_id = cgx_id;
+ entry->lmac_id = lmac_id;
+
+ entry->seq_id = *seq_id;
+
+ entry->mcam_idx = mcam_idx;
+ entry->cmd = cmd;
+
+ pprev = lhead;
+
+ /* Insert entry in ascending order of index */
+ list_for_each_entry_safe(iter, tmp, lhead, list) {
+ if (index < iter->index)
+ break;
+
+ pprev = &iter->list;
+ }
+
+ /* Add to each table list */
+ list_add(&entry->list, pprev);
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mem_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @ways: ways for MEM table.
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
+}
+
+/**
+ * rvu_npc_exact_cam_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
+}
+
+/**
+ * rvu_npc_exact_dealloc_table_entry - dealloc table entry
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE for selection of table(MEM or CAM)
+ * @ways: ways if opc_type is MEM table.
+ * @index: Index of MEM or CAM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
+ u8 ways, u32 index)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table *table;
+ u8 null_dmac[6] = { 0 };
+ int depth;
+
+ /* Prepare entry with all fields set to zero */
+ u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ mutex_lock(&table->lock);
+
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index, table->cam_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
+ __func__, ways, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
+ clear_bit(index, table->cam_table.bmap);
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
+ __func__, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
+ clear_bit(index + ways * depth, table->mem_table.bmap);
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
+ return -ENOSPC;
+ }
+
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
+ __func__, index, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_alloc_table_entry - Allociate an entry
+ * @rvu: resource virtualization unit.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @index: Index of MEM table or CAM table.
+ * @ways: Ways. Only valid for MEM table.
+ * @opc_type: OPCODE to select table (MEM or CAM)
+ *
+ * Try allocating a slot from MEM table. If all 4 ways
+ * slot are full for a hash index, check availability in
+ * 32-entry CAM table for allocation.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype,
+ u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
+{
+ struct npc_exact_table *table;
+ unsigned int hash;
+ int err;
+
+ table = rvu->hw->table;
+
+ /* Check in 4-ways mem entry for free slote */
+ hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
+ table->mem_table.depth);
+ err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_MEM;
+ dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
+ __func__, *ways, *index);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
+
+ /* wayss is 0 for cam table */
+ *ways = 0;
+ err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_CAM;
+ dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
+ __func__, *index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: Drop rule index in NPC mcam.
+ * @chan_val: Channel value.
+ * @chan_mask: Channel Mask.
+ * @pcifunc: pcifunc of interface.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
+ u64 chan_val, u64 chan_mask, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
+ continue;
+
+ return false;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX)
+ return false;
+
+ table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
+ table->drop_rule_map[i].chan_val = (u16)chan_val;
+ table->drop_rule_map[i].chan_mask = (u16)chan_mask;
+ table->drop_rule_map[i].pcifunc = pcifunc;
+ table->drop_rule_map[i].valid = true;
+ return true;
+}
+
+/**
+ * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (SDK, LBK or CGX)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LAMC identifier.
+ * @val: Channel number.
+ * @mask: Channel mask.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
+ u8 cgx_id, u8 lmac_id,
+ u64 *val, u64 *mask)
+{
+ u16 chan_val, chan_mask;
+
+ /* No support for SDP and LBK */
+ if (intf_type != NIX_INTF_TYPE_CGX)
+ return false;
+
+ chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
+ chan_mask = 0xfff;
+
+ if (val)
+ *val = chan_val;
+
+ if (mask)
+ *mask = chan_mask;
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
+ * @rvu: resource virtualization unit.
+ * @drop_rule_idx: Drop rule index in NPC mcam.
+ *
+ * Debugfs (exact_drop_cnt) entry displays pcifunc for interface
+ * by retrieving the pcifunc value from data base.
+ * Return: Drop rule index.
+ */
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
+ continue;
+
+ return table->drop_rule_map[i].pcifunc;
+ }
+
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, drop_rule_idx);
+ return -1;
+}
+
+/**
+ * rvu_npc_exact_get_drop_rule_info - Get drop rule information.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (CGX, SDP or LBK)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: Channel value.
+ * @mask: Channel mask.
+ * @pcifunc: pcifunc of interface corresponding to the drop rule.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
+ u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
+ u64 *mask, u16 *pcifunc)
+{
+ struct npc_exact_table *table;
+ u64 chan_val, chan_mask;
+ bool rc;
+ int i;
+
+ table = rvu->hw->table;
+
+ if (intf_type != NIX_INTF_TYPE_CGX) {
+ dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
+ return false;
+ }
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc)
+ return false;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (val)
+ *val = table->drop_rule_map[i].chan_val;
+ if (mask)
+ *mask = table->drop_rule_map[i].chan_mask;
+ if (pcifunc)
+ *pcifunc = table->drop_rule_map[i].pcifunc;
+
+ *drop_mcam_idx = i;
+ return true;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX) {
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, *drop_mcam_idx);
+ return false;
+ }
+
+ dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return false;
+}
+
+/**
+ * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: +1 or -1.
+ * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
+ *
+ * when first exact match entry against a drop rule is added, enable_or_disable_cam
+ * is set to true. When last exact match entry against a drop rule is deleted,
+ * enable_or_disable_cam is set to true.
+ * Return: Number of rules
+ */
+static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
+ int val, bool *enable_or_disable_cam)
+{
+ struct npc_exact_table *table;
+ u16 *cnt, old_cnt;
+ bool promisc;
+
+ table = rvu->hw->table;
+ promisc = table->promisc_mode[drop_mcam_idx];
+
+ cnt = &table->cnt_cmd_rules[drop_mcam_idx];
+ old_cnt = *cnt;
+
+ *cnt += val;
+
+ if (!enable_or_disable_cam)
+ goto done;
+
+ *enable_or_disable_cam = false;
+
+ if (promisc)
+ goto done;
+
+ /* If all rules are deleted and not already in promisc mode;
+ * disable cam
+ */
+ if (!*cnt && val < 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+ /* If rule got added and not already in promisc mode; enable cam */
+ if (!old_cnt && val > 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+done:
+ return *cnt;
+}
+
+/**
+ * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Deletes entry from linked lists and free up slot in HW MEM or CAM
+ * table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table_entry *entry = NULL;
+ struct npc_exact_table *table;
+ bool disable_cam = false;
+ u32 drop_mcam_idx = -1;
+ int *cnt;
+ bool rc;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
+ if (!entry) {
+ dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
+ &table->mem_tbl_entry_cnt;
+
+ /* delete from lists */
+ list_del_init(&entry->list);
+ list_del_init(&entry->glist);
+
+ (*cnt)--;
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
+ entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
+ __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ if (entry->cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
+
+ /* No dmac filter rules; disable drop on hit rule */
+ if (disable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ mutex_unlock(&table->lock);
+
+ rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
+
+ rvu_npc_exact_free_id(rvu, seq_id);
+
+ dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
+ __func__, seq_id, entry->mac);
+ kfree(entry);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_add_table_entry - Adds a table entry
+ * @rvu: resource virtualization unit.
+ * @cgx_id: cgx identifier.
+ * @lmac_id: lmac identifier.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence number.
+ * @cmd: Whether it is invoked by ethtool cmd.
+ * @mcam_idx: NPC mcam index corresponding to MAC
+ * @pcifunc: PCI func.
+ *
+ * Creates a new exact match table entry in either CAM or
+ * MEM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
+ u16 chan, u8 ctype, u32 *seq_id, bool cmd,
+ u32 mcam_idx, u16 pcifunc)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ enum npc_exact_opc_type opc_type;
+ bool enable_cam = false;
+ u32 drop_mcam_idx;
+ u32 index;
+ u64 mdata;
+ bool rc;
+ int err;
+ u8 ways;
+
+ ctype = 0;
+
+ err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
+ if (err) {
+ dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
+ return err;
+ }
+
+ /* Write mdata to table */
+ mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
+
+ if (opc_type == NPC_EXACT_OPC_CAM)
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
+ else
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata);
+
+ /* Insert entry to linked list */
+ err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
+ mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
+ if (err) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
+ return err;
+ }
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ if (cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
+
+ /* First command rule; enable drop on hit rule */
+ if (enable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
+ dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, index, mac, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_update_table_entry - Update exact match table.
+ * @rvu: resource virtualization unit.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @old_mac: Existing MAC address entry.
+ * @new_mac: New MAC address entry.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Updates MAC address of an entry. If entry is in MEM table, new
+ * hash value may not match with old one.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
+ u8 *old_mac, u8 *new_mac, u32 *seq_id)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ u32 hash_index;
+ u64 mdata;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
+ if (!entry) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev,
+ "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
+ __func__, cgx_id, lmac_id, old_mac);
+ return -ENODATA;
+ }
+
+ /* If entry is in mem table and new hash index is different than old
+ * hash index, we cannot update the entry. Fail in these scenarios.
+ */
+ if (entry->opc_type == NPC_EXACT_OPC_MEM) {
+ hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
+ new_mac, table->mem_table.mask,
+ table->mem_table.depth);
+ if (hash_index != entry->index) {
+ dev_dbg(rvu->dev,
+ "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
+ __func__, hash_index, entry->index);
+ mutex_unlock(&table->lock);
+ return -EINVAL;
+ }
+ }
+
+ mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
+
+ if (entry->opc_type == NPC_EXACT_OPC_MEM)
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
+ else
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
+
+ /* Update entry fields */
+ ether_addr_copy(entry->mac, new_mac);
+ *seq_id = entry->seq_id;
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, entry->index, entry->mac, entry->ways, entry->opc_type);
+
+ dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
+ __func__, old_mac, new_mac);
+
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_disable - Disable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc
+ *
+ * Drop rule is against each PF. We dont support DMAC filter for
+ * VF.
+ * Return: 0 upon success
+ */
+
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (!*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = false;
+ mutex_unlock(&table->lock);
+
+ /* Enable drop rule */
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
+ true);
+
+ dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_enable - Enable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = true;
+ mutex_unlock(&table->lock);
+
+ /* disable drop rule */
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
+ false);
+
+ dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_reset - Delete PF mac address.
+ * @rvu: resource virtualization unit.
+ * @req: Reset request
+ * @rsp: Reset response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ if (rc) {
+ /* TODO: how to handle this error case ? */
+ dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
+ __func__, pfvf->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_update - Update mac address field with new value.
+ * @rvu: resource virtualization unit.
+ * @req: Update request.
+ * @rsp: Update response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ struct rvu_pfvf *pfvf;
+ u32 seq_id, mcam_idx;
+ u8 old_mac[ETH_ALEN];
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
+ __func__, req->index, req->mac_addr);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
+ if (!entry) {
+ dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
+ mutex_unlock(&table->lock);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
+ }
+ ether_addr_copy(old_mac, entry->mac);
+ seq_id = entry->seq_id;
+ mcam_idx = entry->mcam_idx;
+ mutex_unlock(&table->lock);
+
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ /* This could be a new entry */
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
+ pfvf->mac_addr, pf);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id, true,
+ mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev,
+ "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
+ * @rvu: resource virtualization unit.
+ * @req: Add request.
+ * @rsp: Add response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+ u32 seq_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, -1, req->hdr.pcifunc);
+
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_del - Delete DMAC filter
+ * @rvu: resource virtualization unit.
+ * @req: Delete request.
+ * @rsp: Delete response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ int rc;
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
+ * @rvu: resource virtualization unit.
+ * @req: Set request.
+ * @rsp: Set response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u32 mcam_idx = -1;
+ int rc, nixlf;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = &rvu->pf[pf];
+
+ /* If table does not have an entry; both update entry and del table entry API
+ * below fails. Those are not failure conditions.
+ */
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pf);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
+ __func__, pfvf->mac_addr, pf);
+ }
+
+ /* find mcam entry if exist */
+ rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
+ if (!rc) {
+ mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
+ __func__, req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev,
+ "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match feature is supported.
+ */
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ bool empty;
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return false;
+
+ mutex_lock(&table->lock);
+ empty = list_empty(&table->lhead_gbl);
+ mutex_unlock(&table->lock);
+
+ return empty;
+}
+
+/**
+ * rvu_npc_exact_disable_feature - Disable feature.
+ * @rvu: resource virtualization unit.
+ */
+void rvu_npc_exact_disable_feature(struct rvu *rvu)
+{
+ rvu->hw->cap.npc_exact_match_enabled = false;
+}
+
+/**
+ * rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: PCI func to match.
+ */
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *tmp, *iter;
+ u32 seq_id;
+
+ mutex_lock(&table->lock);
+ list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
+ if (pcifunc != iter->pcifunc)
+ continue;
+
+ seq_id = iter->seq_id;
+ dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
+ pcifunc, seq_id);
+
+ mutex_unlock(&table->lock);
+ rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ mutex_lock(&table->lock);
+ }
+ mutex_unlock(&table->lock);
+}
+
+/**
+ * rvu_npc_exact_init - initialize exact match table
+ * @rvu: resource virtualization unit.
+ *
+ * Initialize HW and SW resources to manage 4way-2K table and fully
+ * associative 32-entry mcam table.
+ * Return: 0 upon success.
+ */
+int rvu_npc_exact_init(struct rvu *rvu)
+{
+ u64 bcast_mcast_val, bcast_mcast_mask;
+ struct npc_exact_table *table;
+ u64 exact_val, exact_mask;
+ u64 chan_val, chan_mask;
+ u8 cgx_id, lmac_id;
+ u32 *drop_mcam_idx;
+ u16 max_lmac_cnt;
+ u64 npc_const3;
+ int table_size;
+ int blkaddr;
+ u16 pcifunc;
+ int err, i;
+ u64 cfg;
+ bool rc;
+
+ /* Read NPC_AF_CONST3 and check for have exact
+ * match functionality is present
+ */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check exact match feature is supported */
+ npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62)))
+ return 0;
+
+ /* Check if kex profile has enabled EXACT match nibble */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ if (!(cfg & NPC_EXACT_NIBBLE_HIT))
+ return 0;
+
+ /* Set capability to true */
+ rvu->hw->cap.npc_exact_match_enabled = true;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
+ rvu->hw->table = table;
+
+ /* Read table size, ways and depth */
+ table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
+ table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
+ table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
+
+ dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
+ __func__, table->mem_table.ways, table->cam_table.depth);
+
+ /* Check if depth of table is not a sequre of 2
+ * TODO: why _builtin_popcount() is not working ?
+ */
+ if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
+ dev_err(rvu->dev,
+ "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
+ __func__, table->mem_table.depth);
+ return -EINVAL;
+ }
+
+ table_size = table->mem_table.depth * table->mem_table.ways;
+
+ /* Allocate bitmap for 4way 2K table */
+ table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size,
+ GFP_KERNEL);
+ if (!table->mem_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
+
+ /* Allocate bitmap for 32 entry mcam */
+ table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL);
+
+ if (!table->cam_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
+
+ table->tot_ids = table_size + table->cam_table.depth;
+ table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids,
+ GFP_KERNEL);
+
+ if (!table->id_bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
+ __func__, table->tot_ids);
+
+ /* Initialize list heads for npc_exact_table entries.
+ * This entry is used by debugfs to show entries in
+ * exact match table.
+ */
+ for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
+ INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
+
+ INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
+ INIT_LIST_HEAD(&table->lhead_gbl);
+
+ mutex_init(&table->lock);
+
+ rvu_exact_config_secret_key(rvu);
+ rvu_exact_config_search_key(rvu);
+
+ rvu_exact_config_table_mask(rvu);
+ rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
+
+ /* - No drop rule for LBK
+ * - Drop rules for SDP and each LMAC.
+ */
+ exact_val = !NPC_EXACT_RESULT_HIT;
+ exact_mask = NPC_EXACT_RESULT_HIT;
+
+ /* nibble - 3 2 1 0
+ * L3B L3M L2B L2M
+ */
+ bcast_mcast_val = 0b0000;
+ bcast_mcast_mask = 0b0011;
+
+ /* Install SDP drop rule */
+ drop_mcam_idx = &table->num_drop_rules;
+
+ max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
+ PF_CGXMAP_BASE;
+
+ for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
+ if (rvu->pf2cgxlmac_map[i] == 0xFF)
+ continue;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
+ __func__, chan_val, chan_mask, *drop_mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Filter rules are only for PF */
+ pcifunc = RVU_PFFUNC(i, 0);
+
+ dev_dbg(rvu->dev,
+ "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
+ __func__, cgx_id, lmac_id, chan_val, chan_mask);
+
+ rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
+ chan_val, chan_mask, pcifunc);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
+ __func__, cgx_id, lmac_id, chan_val);
+ return -EINVAL;
+ }
+
+ err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
+ &table->counter_idx[*drop_mcam_idx],
+ chan_val, chan_mask,
+ exact_val, exact_mask,
+ bcast_mcast_val, bcast_mcast_mask);
+ if (err) {
+ dev_err(rvu->dev,
+ "failed to configure drop rule (cgx=%d lmac=%d)\n",
+ cgx_id, lmac_id);
+ return err;
+ }
+
+ (*drop_mcam_idx)++;
+ }
+
+ dev_info(rvu->dev, "initialized exact match table successfully\n");
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
new file mode 100644
index 000000000..57a09328d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_HASH_H
+#define __RVU_NPC_HASH_H
+
+#define RVU_NPC_HASH_SECRET_KEY0 0xa9d5af4c9fbc76b1
+#define RVU_NPC_HASH_SECRET_KEY1 0xa9d5af4c9fbc87b4
+#define RVU_NPC_HASH_SECRET_KEY2 0x5954c9e7
+
+#define NPC_MAX_HASH 2
+#define NPC_MAX_HASH_MASK 2
+
+#define KEX_LD_CFG_USE_HASH(use_hash, bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ ((use_hash) << 20 | ((bytesm1) << 16) | ((hdr_ofs) << 8) | \
+ ((ena) << 7) | ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+#define KEX_LD_CFG_HASH(hdr_ofs, bytesm1, lt_en, lid_en, lid, ltype_match, ltype_mask) \
+ (((hdr_ofs) << 32) | ((bytesm1) << 16) | \
+ ((lt_en) << 12) | ((lid_en) << 11) | ((lid) << 8) | \
+ ((ltype_match) << 4) | ((ltype_mask) & 0xF))
+
+#define SET_KEX_LD_HASH(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_CFG(intf, ld), cfg)
+
+#define SET_KEX_LD_HASH_MASK(intf, ld, mask_idx, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg)
+
+#define GET_KEX_LD_HASH_CTRL(intf, ld) \
+ rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld))
+
+#define GET_KEX_LD_HASH_MASK(intf, ld, mask_idx) \
+ rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx))
+
+#define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg)
+
+struct npc_mcam_kex_hash {
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ bool lid_lt_ld_hash_en[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_CFG */
+ u64 hash[NPC_MAX_INTF][NPC_MAX_HASH];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
+ u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
+ u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+} __packed;
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask);
+void npc_config_secret_key(struct rvu *rvu, int blkaddr);
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr);
+u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
+ u8 intf, u8 hash_idx);
+
+static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
+ .lid_lt_ld_hash_en = {
+ [NIX_INTF_RX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ false,
+ false,
+ },
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ false,
+ false,
+ },
+ },
+ },
+ },
+
+ .hash = {
+ [NIX_INTF_RX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+
+ [NIX_INTF_TX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+ },
+
+ .hash_mask = {
+ [NIX_INTF_RX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+ },
+
+ .hash_ctrl = {
+ [NIX_INTF_RX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+ },
+};
+
+/* If exact match table support is enabled, enable drop rules */
+#define NPC_MCAM_DROP_RULE_MAX 30
+#define NPC_MCAM_SDP_DROP_RULE_IDX 0
+
+#define RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+enum npc_exact_opc_type {
+ NPC_EXACT_OPC_MEM,
+ NPC_EXACT_OPC_CAM,
+};
+
+struct npc_exact_table_entry {
+ struct list_head list;
+ struct list_head glist;
+ u32 seq_id; /* Sequence number of entry */
+ u32 index; /* Mem table or cam table index */
+ u32 mcam_idx;
+ /* Mcam index. This is valid only if "cmd" field is false */
+ enum npc_exact_opc_type opc_type;
+ u16 chan;
+ u16 pcifunc;
+ u8 ways;
+ u8 mac[ETH_ALEN];
+ u8 ctype;
+ u8 cgx_id;
+ u8 lmac_id;
+ bool cmd; /* Is added by ethtool command ? */
+};
+
+struct npc_exact_table {
+ struct mutex lock; /* entries update lock */
+ unsigned long *id_bmap;
+ int num_drop_rules;
+ u32 tot_ids;
+ u16 cnt_cmd_rules[NPC_MCAM_DROP_RULE_MAX];
+ u16 counter_idx[NPC_MCAM_DROP_RULE_MAX];
+ bool promisc_mode[NPC_MCAM_DROP_RULE_MAX];
+ struct {
+ int ways;
+ int depth;
+ unsigned long *bmap;
+ u64 mask; // Masks before hash calculation.
+ u16 hash_mask; // 11 bits for hash mask
+ u16 hash_offset; // 11 bits offset
+ } mem_table;
+
+ struct {
+ int depth;
+ unsigned long *bmap;
+ } cam_table;
+
+ struct {
+ bool valid;
+ u16 chan_val;
+ u16 chan_mask;
+ u16 pcifunc;
+ u8 drop_rule_idx;
+ } drop_rule_map[NPC_MCAM_DROP_RULE_MAX];
+
+#define NPC_EXACT_TBL_MAX_WAYS 4
+
+ struct list_head lhead_mem_tbl_entry[NPC_EXACT_TBL_MAX_WAYS];
+ int mem_tbl_entry_cnt;
+
+ struct list_head lhead_cam_tbl_entry;
+ int cam_tbl_entry_cnt;
+
+ struct list_head lhead_gbl;
+};
+
+bool rvu_npc_exact_has_match_table(struct rvu *rvu);
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu);
+int rvu_npc_exact_init(struct rvu *rvu);
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx);
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc);
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc);
+#endif /* RVU_NPC_HASH_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
new file mode 100644
index 000000000..d46ac29ad
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+#include "mbox.h"
+#include "rvu.h"
+
+struct reg_range {
+ u64 start;
+ u64 end;
+};
+
+struct hw_reg_map {
+ u8 regblk;
+ u8 num_ranges;
+ u64 mask;
+#define MAX_REG_RANGES 8
+ struct reg_range range[MAX_REG_RANGES];
+};
+
+static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
+ {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
+ {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
+ {0x1200, 0x12E0} } },
+ {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+ {0x1610, 0x1618}, {0x1700, 0x17C8} } },
+ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17C8} } },
+ {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg)
+{
+ int idx;
+ struct hw_reg_map *map;
+
+ /* Only 64bit offsets */
+ if (reg & 0x07)
+ return false;
+
+ if (regmap == TXSCHQ_HWREGMAP) {
+ if (regblk >= NIX_TXSCH_LVL_CNT)
+ return false;
+ map = &txsch_reg_map[regblk];
+ } else {
+ return false;
+ }
+
+ /* Should never happen */
+ if (map->regblk != regblk)
+ return false;
+
+ reg &= map->mask;
+
+ for (idx = 0; idx < map->num_ranges; idx++) {
+ if (reg >= map->range[idx].start &&
+ reg < map->range[idx].end)
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
new file mode 100644
index 000000000..18c1c9f36
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -0,0 +1,738 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef RVU_REG_H
+#define RVU_REG_H
+
+/* Admin function registers */
+#define RVU_AF_MSIXTR_BASE (0x10)
+#define RVU_AF_ECO (0x20)
+#define RVU_AF_BLK_RST (0x30)
+#define RVU_AF_PF_BAR4_ADDR (0x40)
+#define RVU_AF_RAS (0x100)
+#define RVU_AF_RAS_W1S (0x108)
+#define RVU_AF_RAS_ENA_W1S (0x110)
+#define RVU_AF_RAS_ENA_W1C (0x118)
+#define RVU_AF_GEN_INT (0x120)
+#define RVU_AF_GEN_INT_W1S (0x128)
+#define RVU_AF_GEN_INT_ENA_W1S (0x130)
+#define RVU_AF_GEN_INT_ENA_W1C (0x138)
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+#define RVU_AF_AFPFX_MBOXX(a, b) (0x2000 | (a) << 4 | (b) << 3)
+#define RVU_AF_PFME_STATUS (0x2800)
+#define RVU_AF_PFTRPEND (0x2810)
+#define RVU_AF_PFTRPEND_W1S (0x2820)
+#define RVU_AF_PF_RST (0x2840)
+#define RVU_AF_HWVF_RST (0x2850)
+#define RVU_AF_PFAF_MBOX_INT (0x2880)
+#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898)
+#define RVU_AF_PFFLR_INT (0x28a0)
+#define RVU_AF_PFFLR_INT_W1S (0x28a8)
+#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0)
+#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8)
+#define RVU_AF_PFME_INT (0x28c0)
+#define RVU_AF_PFME_INT_W1S (0x28c8)
+#define RVU_AF_PFME_INT_ENA_W1S (0x28d0)
+#define RVU_AF_PFME_INT_ENA_W1C (0x28d8)
+#define RVU_AF_PFX_BAR4_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_AF_PFX_BAR4_CFG (0x5200 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
+#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
+#define RVU_AF_SMMU_ADDR_REQ (0x6000)
+#define RVU_AF_SMMU_TXN_REQ (0x6008)
+#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
+#define RVU_AF_SMMU_ADDR_TLN (0x6018)
+#define RVU_AF_SMMU_TLN_FLIT0 (0x6020)
+
+/* Admin function's privileged PF/VF registers */
+#define RVU_PRIV_CONST (0x8000000)
+#define RVU_PRIV_GEN_CFG (0x8000010)
+#define RVU_PRIV_CLK_CFG (0x8000020)
+#define RVU_PRIV_ACTIVE_PC (0x8000030)
+#define RVU_PRIV_PFX_CFG(a) (0x8000100 | (a) << 16)
+#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16)
+#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16)
+#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16)
+#define RVU_PRIV_PFX_NIXX_CFG(a) (0x8000300 | (a) << 3)
+#define RVU_PRIV_PFX_NPA_CFG (0x8000310)
+#define RVU_PRIV_PFX_SSO_CFG (0x8000320)
+#define RVU_PRIV_PFX_SSOW_CFG (0x8000330)
+#define RVU_PRIV_PFX_TIM_CFG (0x8000340)
+#define RVU_PRIV_PFX_CPTX_CFG(a) (0x8000350 | (a) << 3)
+#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3)
+#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16)
+#define RVU_PRIV_HWVFX_NIXX_CFG(a) (0x8001300 | (a) << 3)
+#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310)
+#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320)
+#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330)
+#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340)
+#define RVU_PRIV_HWVFX_CPTX_CFG(a) (0x8001350 | (a) << 3)
+
+/* RVU PF registers */
+#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3)
+#define RVU_PF_INT (0xc20)
+#define RVU_PF_INT_W1S (0xc28)
+#define RVU_PF_INT_ENA_W1S (0xc30)
+#define RVU_PF_INT_ENA_W1C (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
+
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+
+/* NPA block's admin function registers */
+#define NPA_AF_BLK_RST (0x0000)
+#define NPA_AF_CONST (0x0010)
+#define NPA_AF_CONST1 (0x0018)
+#define NPA_AF_LF_RST (0x0020)
+#define NPA_AF_GEN_CFG (0x0030)
+#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_INP_CTL (0x00D0)
+#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
+#define NPA_AF_AVG_DELAY (0x0100)
+#define NPA_AF_GEN_INT (0x0140)
+#define NPA_AF_GEN_INT_W1S (0x0148)
+#define NPA_AF_GEN_INT_ENA_W1S (0x0150)
+#define NPA_AF_GEN_INT_ENA_W1C (0x0158)
+#define NPA_AF_RVU_INT (0x0160)
+#define NPA_AF_RVU_INT_W1S (0x0168)
+#define NPA_AF_RVU_INT_ENA_W1S (0x0170)
+#define NPA_AF_RVU_INT_ENA_W1C (0x0178)
+#define NPA_AF_ERR_INT (0x0180)
+#define NPA_AF_ERR_INT_W1S (0x0188)
+#define NPA_AF_ERR_INT_ENA_W1S (0x0190)
+#define NPA_AF_ERR_INT_ENA_W1C (0x0198)
+#define NPA_AF_RAS (0x01A0)
+#define NPA_AF_RAS_W1S (0x01A8)
+#define NPA_AF_RAS_ENA_W1S (0x01B0)
+#define NPA_AF_RAS_ENA_W1C (0x01B8)
+#define NPA_AF_BP_TEST (0x0200)
+#define NPA_AF_ECO (0x0300)
+#define NPA_AF_AQ_CFG (0x0600)
+#define NPA_AF_AQ_BASE (0x0610)
+#define NPA_AF_AQ_STATUS (0x0620)
+#define NPA_AF_AQ_DOOR (0x0630)
+#define NPA_AF_AQ_DONE_WAIT (0x0640)
+#define NPA_AF_AQ_DONE (0x0650)
+#define NPA_AF_AQ_DONE_ACK (0x0660)
+#define NPA_AF_AQ_DONE_INT (0x0680)
+#define NPA_AF_AQ_DONE_INT_W1S (0x0688)
+#define NPA_AF_AQ_DONE_ENA_W1S (0x0690)
+#define NPA_AF_AQ_DONE_ENA_W1C (0x0698)
+#define NPA_AF_BATCH_CTL (0x06a0)
+#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18)
+#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18)
+#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18)
+#define NPA_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 18)
+#define NPA_PRIV_AF_INT_CFG (0x10000)
+#define NPA_PRIV_LFX_CFG (0x10010)
+#define NPA_PRIV_LFX_INT_CFG (0x10020)
+#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030)
+
+/* NIX block's admin function registers */
+#define NIX_AF_CFG (0x0000)
+#define NIX_AF_STATUS (0x0010)
+#define NIX_AF_NDC_CFG (0x0018)
+#define NIX_AF_CONST (0x0020)
+#define NIX_AF_CONST1 (0x0028)
+#define NIX_AF_CONST2 (0x0030)
+#define NIX_AF_CONST3 (0x0038)
+#define NIX_AF_SQ_CONST (0x0040)
+#define NIX_AF_CQ_CONST (0x0048)
+#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PL_CONST (0x0058)
+#define NIX_AF_PSE_CONST (0x0060)
+#define NIX_AF_TL1_CONST (0x0070)
+#define NIX_AF_TL2_CONST (0x0078)
+#define NIX_AF_TL3_CONST (0x0080)
+#define NIX_AF_TL4_CONST (0x0088)
+#define NIX_AF_MDQ_CONST (0x0090)
+#define NIX_AF_MC_MIRROR_CONST (0x0098)
+#define NIX_AF_LSO_CFG (0x00A8)
+#define NIX_AF_BLK_RST (0x00B0)
+#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_PL_TS (0x00C8)
+#define NIX_AF_RX_CFG (0x00D0)
+#define NIX_AF_AVG_DELAY (0x00E0)
+#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_VWQE_TIMER (0x00F8)
+#define NIX_AF_RX_MCAST_BASE (0x0100)
+#define NIX_AF_RX_MCAST_CFG (0x0110)
+#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
+#define NIX_AF_RX_MCAST_BUF_CFG (0x0130)
+#define NIX_AF_RX_MIRROR_BUF_BASE (0x0140)
+#define NIX_AF_RX_MIRROR_BUF_CFG (0x0148)
+#define NIX_AF_LF_RST (0x0150)
+#define NIX_AF_GEN_INT (0x0160)
+#define NIX_AF_GEN_INT_W1S (0x0168)
+#define NIX_AF_GEN_INT_ENA_W1S (0x0170)
+#define NIX_AF_GEN_INT_ENA_W1C (0x0178)
+#define NIX_AF_ERR_INT (0x0180)
+#define NIX_AF_ERR_INT_W1S (0x0188)
+#define NIX_AF_ERR_INT_ENA_W1S (0x0190)
+#define NIX_AF_ERR_INT_ENA_W1C (0x0198)
+#define NIX_AF_RAS (0x01A0)
+#define NIX_AF_RAS_W1S (0x01A8)
+#define NIX_AF_RAS_ENA_W1S (0x01B0)
+#define NIX_AF_RAS_ENA_W1C (0x01B8)
+#define NIX_AF_RVU_INT (0x01C0)
+#define NIX_AF_RVU_INT_W1S (0x01C8)
+#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
+#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
+#define NIX_AF_TCP_TIMER (0x01E0)
+#define NIX_AF_RX_DEF_ET(a) (0x01F0ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_DEF_OL2 (0x0200)
+#define NIX_AF_RX_DEF_OIP4 (0x0210)
+#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_VLAN0_PCP_DEI (0x0228)
+#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_VLAN1_PCP_DEI (0x0238)
+#define NIX_AF_RX_DEF_IIP6 (0x0240)
+#define NIX_AF_RX_DEF_OTCP (0x0250)
+#define NIX_AF_RX_DEF_ITCP (0x0260)
+#define NIX_AF_RX_DEF_OUDP (0x0270)
+#define NIX_AF_RX_DEF_IUDP (0x0280)
+#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_CST_APAD0 (0x0298)
+#define NIX_AF_RX_DEF_ISCTP (0x02A0)
+#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_DEF_CST_APAD1 (0x02A8)
+#define NIX_AF_RX_DEF_IIP4_DSCP (0x02E0)
+#define NIX_AF_RX_DEF_OIP4_DSCP (0x02E8)
+#define NIX_AF_RX_DEF_IIP6_DSCP (0x02F0)
+#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
+#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
+#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
+#define NIX_AF_NDC_TX_SYNC (0x03F0)
+#define NIX_AF_AQ_CFG (0x0400)
+#define NIX_AF_AQ_BASE (0x0410)
+#define NIX_AF_AQ_STATUS (0x0420)
+#define NIX_AF_AQ_DOOR (0x0430)
+#define NIX_AF_AQ_DONE_WAIT (0x0440)
+#define NIX_AF_AQ_DONE (0x0450)
+#define NIX_AF_AQ_DONE_ACK (0x0460)
+#define NIX_AF_AQ_DONE_TIMER (0x0470)
+#define NIX_AF_AQ_DONE_INT (0x0480)
+#define NIX_AF_AQ_DONE_INT_W1S (0x0488)
+#define NIX_AF_AQ_DONE_ENA_W1S (0x0490)
+#define NIX_AF_AQ_DONE_ENA_W1C (0x0498)
+#define NIX_AF_RX_LINKX_SLX_SPKT_CNT (0x0500)
+#define NIX_AF_RX_LINKX_SLX_SXQE_CNT (0x0510)
+#define NIX_AF_RX_MCAST_JOBSX_SW_CNT (0x0520)
+#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT (0x0530)
+#define NIX_AF_RX_LINKX_CFG(a) (0x0540 | (a) << 16)
+#define NIX_AF_RX_SW_SYNC (0x0550)
+#define NIX_AF_RX_SW_SYNC_DONE (0x0560)
+#define NIX_AF_SEB_ECO (0x0600)
+#define NIX_AF_SEB_TEST_BP (0x0610)
+#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620)
+#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630)
+#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640)
+#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
+#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
+#define NIX_AF_SEB_CFG (0x05F0)
+#define NIX_PTP_1STEP_EN BIT_ULL(2)
+
+#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
+#define NIX_AF_DWRR_SDP_MTU (0x790) /* All CN10K except CN10KB */
+#define NIX_AF_DWRR_MTUX(a) (0x790 | (a) << 16) /* Only for CN10KB */
+#define NIX_AF_DWRR_RPM_MTU (0x7A0)
+#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
+#define NIX_AF_PSE_SHAPER_CFG (0x810)
+#define NIX_AF_TX_EXPR_CREDIT (0x830)
+#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18)
+#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16)
+#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16)
+#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16)
+#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16)
+#define NIX_AF_SDP_LINK_CREDIT (0xa40)
+#define NIX_AF_SDP_SW_XOFFX(a) (0xA60 | (a) << 3)
+#define NIX_AF_SDP_HW_XOFFX(a) (0xAC0 | (a) << 3)
+#define NIX_AF_TL4X_BP_STATUS(a) (0xB00 | (a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE(a) (0xC10 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE_STATE(a) (0xC50 | (a) << 16)
+#define NIX_AF_TL1X_SW_XOFF(a) (0xC70 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN(a) (0xC90 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW(a) (0xCA0 | (a) << 16)
+#define NIX_AF_TL1X_RED(a) (0xCB0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG0(a) (0xCC0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG1(a) (0xCC8 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG2(a) (0xCD0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG3(a) (0xCD8 | (a) << 16)
+#define NIX_AF_TL1A_DEBUG (0xce0)
+#define NIX_AF_TL1B_DEBUG (0xcf0)
+#define NIX_AF_TL1_DEBUG_GREEN (0xd00)
+#define NIX_AF_TL1_DEBUG_NODE (0xd10)
+#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xD20 | (a) << 16)
+#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xD30 | (a) << 16)
+#define NIX_AF_TL1X_RED_PACKETS(a) (0xD40 | (a) << 16)
+#define NIX_AF_TL1X_RED_BYTES(a) (0xD50 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xD60 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xD70 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xD80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_BYTES(a) (0xD90 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE(a) (0xE10 | (a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
+#define NIX_AF_TL2X_SCHED_STATE(a) (0xE40 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE_STATE(a) (0xE50 | (a) << 16)
+#define NIX_AF_TL2X_POINTERS(a) (0xE60 | (a) << 16)
+#define NIX_AF_TL2X_SW_XOFF(a) (0xE70 | (a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_GREEN(a) (0xE90 | (a) << 16)
+#define NIX_AF_TL2X_YELLOW(a) (0xEA0 | (a) << 16)
+#define NIX_AF_TL2X_RED(a) (0xEB0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG0(a) (0xEC0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG1(a) (0xEC8 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG2(a) (0xED0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG3(a) (0xED8 | (a) << 16)
+#define NIX_AF_TL2A_DEBUG (0xee0)
+#define NIX_AF_TL2B_DEBUG (0xef0)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
+#define NIX_AF_TL3X_SCHED_STATE(a) (0x1040 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE_STATE(a) (0x1050 | (a) << 16)
+#define NIX_AF_TL3X_POINTERS(a) (0x1060 | (a) << 16)
+#define NIX_AF_TL3X_SW_XOFF(a) (0x1070 | (a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_GREEN(a) (0x1090 | (a) << 16)
+#define NIX_AF_TL3X_YELLOW(a) (0x10A0 | (a) << 16)
+#define NIX_AF_TL3X_RED(a) (0x10B0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG0(a) (0x10C0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG1(a) (0x10C8 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG2(a) (0x10D0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG3(a) (0x10D8 | (a) << 16)
+#define NIX_AF_TL3A_DEBUG (0x10e0)
+#define NIX_AF_TL3B_DEBUG (0x10f0)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
+#define NIX_AF_TL4X_SCHED_STATE(a) (0x1240 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE_STATE(a) (0x1250 | (a) << 16)
+#define NIX_AF_TL4X_POINTERS(a) (0x1260 | (a) << 16)
+#define NIX_AF_TL4X_SW_XOFF(a) (0x1270 | (a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_GREEN(a) (0x1290 | (a) << 16)
+#define NIX_AF_TL4X_YELLOW(a) (0x12A0 | (a) << 16)
+#define NIX_AF_TL4X_RED(a) (0x12B0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG0(a) (0x12C0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG1(a) (0x12C8 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG2(a) (0x12D0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG3(a) (0x12D8 | (a) << 16)
+#define NIX_AF_TL4A_DEBUG (0x12e0)
+#define NIX_AF_TL4B_DEBUG (0x12f0)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
+#define NIX_AF_MDQX_SCHED_STATE(a) (0x1440 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE_STATE(a) (0x1450 | (a) << 16)
+#define NIX_AF_MDQX_POINTERS(a) (0x1460 | (a) << 16)
+#define NIX_AF_MDQX_SW_XOFF(a) (0x1470 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+#define NIX_AF_MDQX_MD_DEBUG(a) (0x14C0 | (a) << 16)
+#define NIX_AF_MDQX_PTR_FIFO(a) (0x14D0 | (a) << 16)
+#define NIX_AF_MDQA_DEBUG (0x14e0)
+#define NIX_AF_MDQB_DEBUG (0x14f0)
+#define NIX_AF_TL3_TL2X_CFG(a) (0x1600 | (a) << 18)
+#define NIX_AF_TL3_TL2X_BP_STATUS(a) (0x1610 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) (0x1800 | (a) << 18 | (b) << 3)
+#define NIX_AF_TX_MCASTX(a) (0x1900 | (a) << 15)
+#define NIX_AF_TX_VTAG_DEFX_CTL(a) (0x1A00 | (a) << 16)
+#define NIX_AF_TX_VTAG_DEFX_DATA(a) (0x1A10 | (a) << 16)
+#define NIX_AF_RX_BPIDX_STATUS(a) (0x1A20 | (a) << 17)
+#define NIX_AF_RX_CHANX_CFG(a) (0x1A30 | (a) << 15)
+#define NIX_AF_CINT_TIMERX(a) (0x1A40 | (a) << 18)
+#define NIX_AF_LSO_FORMATX_FIELDX(a, b) (0x1B00 | (a) << 16 | (b) << 3)
+#define NIX_AF_LFX_CFG(a) (0x4000 | (a) << 17)
+#define NIX_AF_LFX_SQS_CFG(a) (0x4020 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG2(a) (0x4028 | (a) << 17)
+#define NIX_AF_LFX_SQS_BASE(a) (0x4030 | (a) << 17)
+#define NIX_AF_LFX_RQS_CFG(a) (0x4040 | (a) << 17)
+#define NIX_AF_LFX_RQS_BASE(a) (0x4050 | (a) << 17)
+#define NIX_AF_LFX_CQS_CFG(a) (0x4060 | (a) << 17)
+#define NIX_AF_LFX_CQS_BASE(a) (0x4070 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG(a) (0x4080 | (a) << 17)
+#define NIX_AF_LFX_TX_PARSE_CFG(a) (0x4090 | (a) << 17)
+#define NIX_AF_LFX_RX_CFG(a) (0x40A0 | (a) << 17)
+#define NIX_AF_LFX_RSS_CFG(a) (0x40C0 | (a) << 17)
+#define NIX_AF_LFX_RSS_BASE(a) (0x40D0 | (a) << 17)
+#define NIX_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 17)
+#define NIX_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 17)
+#define NIX_AF_LFX_CINTS_CFG(a) (0x4120 | (a) << 17)
+#define NIX_AF_LFX_CINTS_BASE(a) (0x4130 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17)
+#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17)
+#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_LOCKX(a, b) (0x4300 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_TX_STATX(a, b) (0x4400 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RX_STATX(a, b) (0x4500 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RSS_GRPX(a, b) (0x4600 | (a) << 17 | (b) << 3)
+#define NIX_AF_RX_NPC_MC_RCV (0x4700)
+#define NIX_AF_RX_NPC_MC_DROP (0x4710)
+#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
+#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
+#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
+#define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
+
+#define NIX_PRIV_AF_INT_CFG (0x8000000)
+#define NIX_PRIV_LFX_CFG (0x8000010)
+#define NIX_PRIV_LFX_INT_CFG (0x8000020)
+#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+
+#define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0)
+#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
+#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
+
+/* SSO */
+#define SSO_AF_CONST (0x1000)
+#define SSO_AF_CONST1 (0x1008)
+#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_LF_HWGRP_RST (0x10e0)
+#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800)
+#define SSO_PRIV_LFX_HWGRP_CFG (0x10000)
+#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000)
+
+/* SSOW */
+#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010)
+#define SSOW_AF_LF_HWS_RST (0x0030)
+#define SSOW_PRIV_LFX_HWS_CFG (0x1000)
+#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000)
+
+/* TIM */
+#define TIM_AF_CONST (0x90)
+#define TIM_PRIV_LFX_CFG (0x20000)
+#define TIM_PRIV_LFX_INT_CFG (0x24000)
+#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_LF_RST (0x20)
+
+/* CPT */
+#define CPT_AF_CONSTANTS0 (0x0000)
+#define CPT_AF_CONSTANTS1 (0x1000)
+#define CPT_AF_DIAG (0x3000)
+#define CPT_AF_ECO (0x4000)
+#define CPT_AF_FLTX_INT(a) (0xa000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_W1S(a) (0xb000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1C(a) (0xc000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1S(a) (0xd000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE(a) (0xe000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE_W1S(a) (0xf000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF(a) (0x10000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF_W1S(a) (0x11000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_CTL2(a) (0x12000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_STS(a) (0x13000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_ERR_INFO (0x14000)
+#define CPT_AF_EXEX_ACTIVE(a) (0x16000ull | (u64)(a) << 3)
+#define CPT_AF_INST_REQ_PC (0x17000)
+#define CPT_AF_INST_LATENCY_PC (0x18000)
+#define CPT_AF_RD_REQ_PC (0x19000)
+#define CPT_AF_RD_LATENCY_PC (0x1a000)
+#define CPT_AF_RD_UC_PC (0x1b000)
+#define CPT_AF_ACTIVE_CYCLES_PC (0x1c000)
+#define CPT_AF_EXE_DBG_CTL (0x1d000)
+#define CPT_AF_EXE_DBG_DATA (0x1e000)
+#define CPT_AF_EXE_REQ_TIMER (0x1f000)
+#define CPT_AF_EXEX_CTL(a) (0x20000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_CTL (0x21000)
+#define CPT_AF_EXE_DBG_CNTX(a) (0x22000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_EVENT_CNT (0x23000)
+#define CPT_AF_EXE_EPCI_INBX_CNT(a) (0x24000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_EPCI_OUTBX_CNT(a) (0x25000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_UCODE_BASE(a) (0x26000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL(a) (0x27000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL2(a) (0x29000ull | (u64)(a) << 3)
+#define CPT_AF_CPTCLK_CNT (0x2a000)
+#define CPT_AF_PF_FUNC (0x2b000)
+#define CPT_AF_LFX_PTR_CTL(a) (0x2c000ull | (u64)(a) << 3)
+#define CPT_AF_GRPX_THR(a) (0x2d000ull | (u64)(a) << 3)
+#define CPT_AF_CTL (0x2e000ull)
+#define CPT_AF_XEX_THR(a) (0x2f000ull | (u64)(a) << 3)
+#define CPT_PRIV_LFX_CFG (0x41000)
+#define CPT_PRIV_AF_INT_CFG (0x42000)
+#define CPT_PRIV_LFX_INT_CFG (0x43000)
+#define CPT_AF_LF_RST (0x44000)
+#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
+#define CPT_AF_BLK_RST (0x46000)
+#define CPT_AF_RVU_INT (0x47000)
+#define CPT_AF_RVU_INT_W1S (0x47008)
+#define CPT_AF_RVU_INT_ENA_W1S (0x47010)
+#define CPT_AF_RVU_INT_ENA_W1C (0x47018)
+#define CPT_AF_RAS_INT (0x47020)
+#define CPT_AF_RAS_INT_W1S (0x47028)
+#define CPT_AF_RAS_INT_ENA_W1S (0x47030)
+#define CPT_AF_RAS_INT_ENA_W1C (0x47038)
+#define CPT_AF_CTX_FLUSH_TIMER (0x48000ull)
+#define CPT_AF_CTX_ERR (0x48008ull)
+#define CPT_AF_CTX_ENC_ID (0x48010ull)
+#define CPT_AF_CTX_MIS_PC (0x49400ull)
+#define CPT_AF_CTX_HIT_PC (0x49408ull)
+#define CPT_AF_CTX_AOP_PC (0x49410ull)
+#define CPT_AF_CTX_AOP_LATENCY_PC (0x49418ull)
+#define CPT_AF_CTX_IFETCH_PC (0x49420ull)
+#define CPT_AF_CTX_IFETCH_LATENCY_PC (0x49428ull)
+#define CPT_AF_CTX_FFETCH_PC (0x49430ull)
+#define CPT_AF_CTX_FFETCH_LATENCY_PC (0x49438ull)
+#define CPT_AF_CTX_WBACK_PC (0x49440ull)
+#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull)
+#define CPT_AF_CTX_PSH_PC (0x49450ull)
+#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
+#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
+#define CPT_AF_RXC_TIME (0x50010ull)
+#define CPT_AF_RXC_TIME_CFG (0x50018ull)
+#define CPT_AF_RXC_DFRG (0x50020ull)
+#define CPT_AF_RXC_ACTIVE_STS (0x50028ull)
+#define CPT_AF_RXC_ZOMBIE_STS (0x50030ull)
+#define CPT_AF_X2PX_LINK_CFG(a) (0x51000ull | (u64)(a) << 3)
+
+#define AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | (b))
+#define CPT_AF_BAR2_SEL 0x9000000
+#define CPT_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+
+#define CPT_AF_LF_CTL2_SHIFT 3
+#define CPT_AF_LF_SSO_PF_FUNC_SHIFT 32
+
+#define CPT_LF_CTL 0x10
+#define CPT_LF_INPROG 0x40
+#define CPT_LF_Q_SIZE 0x100
+#define CPT_LF_Q_INST_PTR 0x110
+#define CPT_LF_Q_GRP_PTR 0x120
+#define CPT_LF_CTX_FLUSH 0x510
+
+#define NPC_AF_BLK_RST (0x00040)
+
+/* NPC */
+#define NPC_AF_CFG (0x00000)
+#define NPC_AF_ACTIVE_PC (0x00010)
+#define NPC_AF_CONST (0x00020)
+#define NPC_AF_CONST1 (0x00030)
+#define NPC_AF_BLK_RST (0x00040)
+#define NPC_AF_MCAM_SCRUB_CTL (0x000a0)
+#define NPC_AF_KCAM_SCRUB_CTL (0x000b0)
+#define NPC_AF_CONST2 (0x00100)
+#define NPC_AF_CONST3 (0x00110)
+#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3)
+#define NPC_AF_PCK_CFG (0x00600)
+#define NPC_AF_PCK_DEF_OL2 (0x00610)
+#define NPC_AF_PCK_DEF_OIP4 (0x00620)
+#define NPC_AF_PCK_DEF_OIP6 (0x00630)
+#define NPC_AF_PCK_DEF_IIP4 (0x00640)
+#define NPC_AF_INTFX_HASHX_RESULT_CTRL(a, b) (0x006c0 | (a) << 4 | (b) << 3)
+#define NPC_AF_INTFX_HASHX_MASKX(a, b, c) (0x00700 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3)
+#define NPC_AF_INTFX_HASHX_CFG(a, b) (0x00b00 | (a) << 6 | (b) << 4)
+#define NPC_AF_INTFX_SECRET_KEY0(a) (0x00e00 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY1(a) (0x00e20 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY2(a) (0x00e40 | (a) << 3)
+#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8)
+#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6)
+#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6)
+#define NPC_AF_PKINDX_CPI_DEFX(a, b) (0x80020ull | (a) << 6 | (b) << 3)
+#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \
+ (0x100000 | (a) << 14 | (b) << 6 | (c) << 3)
+#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \
+ (0x100020 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \
+ (0x100028 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRY_DISX(a, b) (0x180000 | (a) << 6 | (b) << 3)
+#define NPC_AF_CPIX_CFG(a) (0x200000 | (a) << 3)
+#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \
+ (0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3)
+#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
+ (0x980000 | (a) << 16 | (b) << 12 | (c) << 3)
+#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8)
+#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4)
+#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4)
+#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4)
+#define NPC_AF_LKUP_CTL (0x2000000)
+#define NPC_AF_LKUP_DATAX(a) (0x2000200 | (a) << 4)
+#define NPC_AF_LKUP_RESULTX(a) (0x2000400 | (a) << 4)
+#define NPC_AF_INTFX_STAT(a) (0x2000800 | (a) << 4)
+#define NPC_AF_DBG_CTL (0x3000000)
+#define NPC_AF_DBG_STATUS (0x3000010)
+#define NPC_AF_KPUX_DBG(a) (0x3000020 | (a) << 8)
+#define NPC_AF_IKPU_ERR_CTL (0x3000080)
+#define NPC_AF_KPUX_ERR_CTL(a) (0x30000a0 | (a) << 8)
+#define NPC_AF_MCAM_DBG (0x3001000)
+#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
+#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+
+#define NPC_AF_EXACT_MEM_ENTRY(a, b) (0x300000 | (a) << 15 | (b) << 3)
+#define NPC_AF_EXACT_CAM_ENTRY(a) (0xC00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_MASK(a) (0x660 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_RESULT_CTL(a)(0x680 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_CFG(a) (0xA00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET0(a) (0xE00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET1(a) (0xE20 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET2(a) (0xE40 | (a) << 3)
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000000ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000010ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000020ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1800000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000038ull | (a) << 8 | (b) << 22); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000040ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900008ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000048ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000050ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MATCH_STATX(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880008ull | (a) << 8); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000078ull | (a) << 8); \
+ offset; }) \
+
+/* NDC */
+#define NDC_AF_CONST (0x00000)
+#define NDC_AF_CLK_EN (0x00020)
+#define NDC_AF_CTL (0x00030)
+#define NDC_AF_BANK_CTL (0x00040)
+#define NDC_AF_BANK_CTL_DONE (0x00048)
+#define NDC_AF_INTR (0x00058)
+#define NDC_AF_INTR_W1S (0x00060)
+#define NDC_AF_INTR_ENA_W1S (0x00068)
+#define NDC_AF_INTR_ENA_W1C (0x00070)
+#define NDC_AF_ACTIVE_PC (0x00078)
+#define NDC_AF_CAMS_RD_INTERVAL (0x00080)
+#define NDC_AF_BP_TEST_ENABLE (0x001F8)
+#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
+#define NDC_AF_BLK_RST (0x002F0)
+#define NDC_PRIV_AF_INT_CFG (0x002F8)
+#define NDC_AF_HASHX(a) (0x00300 | (a) << 3)
+#define NDC_AF_PORTX_RTX_RWX_REQ_PC(a, b, c) \
+ (0x00C00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_RWX_OSTDN_PC(a, b, c) \
+ (0x00D00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_RWX_LAT_PC(a, b, c) \
+ (0x00E00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_CANT_ALLOC_PC(a, b) \
+ (0x00F00 | (a) << 5 | (b) << 4)
+#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
+#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
+#define NDC_AF_BANKX_LINEX_METADATA(a, b) \
+ (0x10000 | (a) << 12 | (b) << 3)
+
+/* LBK */
+#define LBK_CONST (0x10ull)
+#define LBK_LINK_CFG_P2X (0x400ull)
+#define LBK_LINK_CFG_X2P (0x408ull)
+#define LBK_CONST_CHANS GENMASK_ULL(47, 32)
+#define LBK_CONST_DST GENMASK_ULL(31, 28)
+#define LBK_CONST_SRC GENMASK_ULL(27, 24)
+#define LBK_CONST_BUF_SIZE GENMASK_ULL(23, 0)
+#define LBK_LINK_CFG_RANGE_MASK GENMASK_ULL(19, 16)
+#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
+#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+
+/* APR */
+#define APR_AF_LMT_CFG (0x000ull)
+#define APR_AF_LMT_MAP_BASE (0x008ull)
+#define APR_AF_LMT_CTL (0x010ull)
+#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23
+#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22
+#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21
+
+#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
new file mode 100644
index 000000000..ae50d5625
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include "rvu.h"
+
+/* SDP PF device id */
+#define PCI_DEVID_OTX2_SDP_PF 0xA0F6
+
+/* Maximum SDP blocks in a chip */
+#define MAX_SDP 2
+
+/* SDP PF number */
+static int sdp_pf_num[MAX_SDP] = {-1, -1};
+
+bool is_sdp_pfvf(u16 pcifunc)
+{
+ u16 pf = rvu_get_pf(pcifunc);
+ u32 found = 0, i = 0;
+
+ while (i < MAX_SDP) {
+ if (pf == sdp_pf_num[i])
+ found = 1;
+ i++;
+ }
+
+ if (!found)
+ return false;
+
+ return true;
+}
+
+bool is_sdp_pf(u16 pcifunc)
+{
+ return (is_sdp_pfvf(pcifunc) &&
+ !(pcifunc & RVU_PFVF_FUNC_MASK));
+}
+
+bool is_sdp_vf(u16 pcifunc)
+{
+ return (is_sdp_pfvf(pcifunc) &&
+ !!(pcifunc & RVU_PFVF_FUNC_MASK));
+}
+
+int rvu_sdp_init(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ struct rvu_pfvf *pfvf;
+ u32 i = 0;
+
+ while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OTX2_SDP_PF,
+ pdev)) != NULL) {
+ /* The RVU PF number is one less than bus number */
+ sdp_pf_num[i] = pdev->bus->number - 1;
+ pfvf = &rvu->pf[sdp_pf_num[i]];
+
+ pfvf->sdp_info = devm_kzalloc(rvu->dev,
+ sizeof(struct sdp_node_info),
+ GFP_KERNEL);
+ if (!pfvf->sdp_info) {
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+
+ dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
+
+ i++;
+ }
+
+ pci_dev_put(pdev);
+
+ return 0;
+}
+
+int
+rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu,
+ struct sdp_chan_info_msg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ memcpy(pfvf->sdp_info, &req->info, sizeof(struct sdp_node_info));
+ dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n",
+ req->info.node_id, req->info.max_vfs, req->info.num_pf_rings,
+ req->info.pf_srn);
+ return 0;
+}
+
+int
+rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req,
+ struct sdp_get_chan_info_msg *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ if (!hw->cap.programmable_chans) {
+ rsp->chan_base = NIX_CHAN_SDP_CH_START;
+ rsp->num_chan = NIX_CHAN_SDP_NUM_CHANS;
+ } else {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ rsp->chan_base = hw->sdp_chan_base;
+ rsp->num_chan = rvu_read64(rvu, blkaddr, NIX_AF_CONST1) & 0xFFFUL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
new file mode 100644
index 000000000..edc9367b1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -0,0 +1,823 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef RVU_STRUCT_H
+#define RVU_STRUCT_H
+
+/* RVU Block revision IDs */
+#define RVU_BLK_RVUM_REVID 0x01
+
+#define RVU_MULTI_BLK_VER 0x7ULL
+
+/* RVU Block Address Enumeration */
+enum rvu_block_addr_e {
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC_NIX0_RX = 0xcULL,
+ BLKADDR_NDC_NIX0_TX = 0xdULL,
+ BLKADDR_NDC_NPA0 = 0xeULL,
+ BLKADDR_NDC_NIX1_RX = 0x10ULL,
+ BLKADDR_NDC_NIX1_TX = 0x11ULL,
+ BLKADDR_APR = 0x16ULL,
+ BLK_COUNT = 0x17ULL,
+};
+
+/* RVU Block Type Enumeration */
+enum rvu_block_type_e {
+ BLKTYPE_RVUM = 0x0,
+ BLKTYPE_MSIX = 0x1,
+ BLKTYPE_LMT = 0x2,
+ BLKTYPE_NIX = 0x3,
+ BLKTYPE_NPA = 0x4,
+ BLKTYPE_NPC = 0x5,
+ BLKTYPE_SSO = 0x6,
+ BLKTYPE_SSOW = 0x7,
+ BLKTYPE_TIM = 0x8,
+ BLKTYPE_CPT = 0x9,
+ BLKTYPE_NDC = 0xa,
+ BLKTYPE_MAX = 0xa,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_int_vec_e {
+ RVU_AF_INT_VEC_POISON = 0x0,
+ RVU_AF_INT_VEC_PFFLR = 0x1,
+ RVU_AF_INT_VEC_PFME = 0x2,
+ RVU_AF_INT_VEC_GEN = 0x3,
+ RVU_AF_INT_VEC_MBOX = 0x4,
+ RVU_AF_INT_VEC_CNT = 0x5,
+};
+
+/* CPT Admin function Interrupt Vector Enumeration */
+enum cpt_af_int_vec_e {
+ CPT_AF_INT_VEC_FLT0 = 0x0,
+ CPT_AF_INT_VEC_FLT1 = 0x1,
+ CPT_AF_INT_VEC_RVU = 0x2,
+ CPT_AF_INT_VEC_RAS = 0x3,
+ CPT_AF_INT_VEC_CNT = 0x4,
+};
+
+enum cpt_10k_af_int_vec_e {
+ CPT_10K_AF_INT_VEC_FLT0 = 0x0,
+ CPT_10K_AF_INT_VEC_FLT1 = 0x1,
+ CPT_10K_AF_INT_VEC_FLT2 = 0x2,
+ CPT_10K_AF_INT_VEC_RVU = 0x3,
+ CPT_10K_AF_INT_VEC_RAS = 0x4,
+ CPT_10K_AF_INT_VEC_CNT = 0x5,
+};
+
+/* NPA Admin function Interrupt Vector Enumeration */
+enum npa_af_int_vec_e {
+ NPA_AF_INT_VEC_RVU = 0x0,
+ NPA_AF_INT_VEC_GEN = 0x1,
+ NPA_AF_INT_VEC_AQ_DONE = 0x2,
+ NPA_AF_INT_VEC_AF_ERR = 0x3,
+ NPA_AF_INT_VEC_POISON = 0x4,
+ NPA_AF_INT_VEC_CNT = 0x5,
+};
+
+/* NIX Admin function Interrupt Vector Enumeration */
+enum nix_af_int_vec_e {
+ NIX_AF_INT_VEC_RVU = 0x0,
+ NIX_AF_INT_VEC_GEN = 0x1,
+ NIX_AF_INT_VEC_AQ_DONE = 0x2,
+ NIX_AF_INT_VEC_AF_ERR = 0x3,
+ NIX_AF_INT_VEC_POISON = 0x4,
+ NIX_AF_INT_VEC_CNT = 0x5,
+};
+
+/**
+ * RVU PF Interrupt Vector Enumeration
+ */
+enum rvu_pf_int_vec_e {
+ RVU_PF_INT_VEC_VFFLR0 = 0x0,
+ RVU_PF_INT_VEC_VFFLR1 = 0x1,
+ RVU_PF_INT_VEC_VFME0 = 0x2,
+ RVU_PF_INT_VEC_VFME1 = 0x3,
+ RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+ RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+ RVU_PF_INT_VEC_AFPF_MBOX = 0x6,
+ RVU_PF_INT_VEC_CNT = 0x7,
+};
+
+/* NPA admin queue completion enumeration */
+enum npa_aq_comp {
+ NPA_AQ_COMP_NOTDONE = 0x0,
+ NPA_AQ_COMP_GOOD = 0x1,
+ NPA_AQ_COMP_SWERR = 0x2,
+ NPA_AQ_COMP_CTX_POISON = 0x3,
+ NPA_AQ_COMP_CTX_FAULT = 0x4,
+ NPA_AQ_COMP_LOCKERR = 0x5,
+};
+
+/* NPA admin queue context types */
+enum npa_aq_ctype {
+ NPA_AQ_CTYPE_AURA = 0x0,
+ NPA_AQ_CTYPE_POOL = 0x1,
+};
+
+/* NPA admin queue instruction opcodes */
+enum npa_aq_instop {
+ NPA_AQ_INSTOP_NOP = 0x0,
+ NPA_AQ_INSTOP_INIT = 0x1,
+ NPA_AQ_INSTOP_WRITE = 0x2,
+ NPA_AQ_INSTOP_READ = 0x3,
+ NPA_AQ_INSTOP_LOCK = 0x4,
+ NPA_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* ALLOC/FREE input queues Enumeration from coprocessors */
+enum npa_inpq {
+ NPA_INPQ_NIX0_RX = 0x0,
+ NPA_INPQ_NIX0_TX = 0x1,
+ NPA_INPQ_NIX1_RX = 0x2,
+ NPA_INPQ_NIX1_TX = 0x3,
+ NPA_INPQ_SSO = 0x4,
+ NPA_INPQ_TIM = 0x5,
+ NPA_INPQ_DPI = 0x6,
+ NPA_INPQ_AURA_OP = 0xe,
+ NPA_INPQ_INTERNAL_RSV = 0xf,
+};
+
+/* NPA admin queue instruction structure */
+struct npa_aq_inst_s {
+ u64 op : 4; /* W0 */
+ u64 ctype : 4;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
+ u64 cindex : 20;
+ u64 reserved_44_62 : 19;
+ u64 doneint : 1;
+ u64 res_addr; /* W1 */
+};
+
+/* NPA admin queue result structure */
+struct npa_aq_res_s {
+ u64 op : 4; /* W0 */
+ u64 ctype : 4;
+ u64 compcode : 8;
+ u64 doneint : 1;
+ u64 reserved_17_63 : 47;
+ u64 reserved_64_127; /* W1 */
+};
+
+struct npa_aura_s {
+ u64 pool_addr; /* W0 */
+ u64 ena : 1; /* W1 */
+ u64 reserved_65 : 2;
+ u64 pool_caching : 1;
+ u64 pool_way_mask : 16;
+ u64 avg_con : 9;
+ u64 reserved_93 : 1;
+ u64 pool_drop_ena : 1;
+ u64 aura_drop_ena : 1;
+ u64 bp_ena : 2;
+ u64 reserved_98_103 : 6;
+ u64 aura_drop : 8;
+ u64 shift : 6;
+ u64 reserved_118_119 : 2;
+ u64 avg_level : 8;
+ u64 count : 36; /* W2 */
+ u64 reserved_164_167 : 4;
+ u64 nix0_bpid : 9;
+ u64 reserved_177_179 : 3;
+ u64 nix1_bpid : 9;
+ u64 reserved_189_191 : 3;
+ u64 limit : 36; /* W3 */
+ u64 reserved_228_231 : 4;
+ u64 bp : 8;
+ u64 reserved_241_243 : 3;
+ u64 fc_be : 1;
+ u64 fc_ena : 1;
+ u64 fc_up_crossing : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 reserved_252_255 : 4;
+ u64 fc_addr; /* W4 */
+ u64 pool_drop : 8; /* W5 */
+ u64 update_time : 16;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_363 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_371 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_379_383 : 5;
+ u64 thresh : 36; /* W6*/
+ u64 rsvd_423_420 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_435_447 : 13;
+ u64 reserved_448_511; /* W7 */
+};
+
+struct npa_pool_s {
+ u64 stack_base; /* W0 */
+ u64 ena : 1;
+ u64 nat_align : 1;
+ u64 reserved_66_67 : 2;
+ u64 stack_caching : 1;
+ u64 reserved_70_71 : 3;
+ u64 stack_way_mask : 16;
+ u64 buf_offset : 12;
+ u64 reserved_100_103 : 4;
+ u64 buf_size : 11;
+ u64 reserved_115_127 : 13;
+ u64 stack_max_pages : 32;
+ u64 stack_pages : 32;
+ u64 op_pc : 48;
+ u64 reserved_240_255 : 16;
+ u64 stack_offset : 4;
+ u64 reserved_260_263 : 4;
+ u64 shift : 6;
+ u64 reserved_270_271 : 2;
+ u64 avg_level : 8;
+ u64 avg_con : 9;
+ u64 fc_ena : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 fc_up_crossing : 1;
+ u64 fc_be : 1;
+ u64 reserved_298_299 : 2;
+ u64 update_time : 16;
+ u64 reserved_316_319 : 4;
+ u64 fc_addr; /* W5 */
+ u64 ptr_start; /* W6 */
+ u64 ptr_end; /* W7 */
+ u64 reserved_512_535 : 24;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_555 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_563 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_571_575 : 5;
+ u64 thresh : 36;
+ u64 rsvd_615_612 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_627_639 : 13;
+ u64 reserved_640_703; /* W10 */
+ u64 reserved_704_767; /* W11 */
+ u64 reserved_768_831; /* W12 */
+ u64 reserved_832_895; /* W13 */
+ u64 reserved_896_959; /* W14 */
+ u64 reserved_960_1023; /* W15 */
+};
+
+/* NIX admin queue completion status */
+enum nix_aq_comp {
+ NIX_AQ_COMP_NOTDONE = 0x0,
+ NIX_AQ_COMP_GOOD = 0x1,
+ NIX_AQ_COMP_SWERR = 0x2,
+ NIX_AQ_COMP_CTX_POISON = 0x3,
+ NIX_AQ_COMP_CTX_FAULT = 0x4,
+ NIX_AQ_COMP_LOCKERR = 0x5,
+ NIX_AQ_COMP_SQB_ALLOC_FAIL = 0x6,
+};
+
+/* NIX admin queue context types */
+enum nix_aq_ctype {
+ NIX_AQ_CTYPE_RQ = 0x0,
+ NIX_AQ_CTYPE_SQ = 0x1,
+ NIX_AQ_CTYPE_CQ = 0x2,
+ NIX_AQ_CTYPE_MCE = 0x3,
+ NIX_AQ_CTYPE_RSS = 0x4,
+ NIX_AQ_CTYPE_DYNO = 0x5,
+ NIX_AQ_CTYPE_BANDPROF = 0x6,
+};
+
+/* NIX admin queue instruction opcodes */
+enum nix_aq_instop {
+ NIX_AQ_INSTOP_NOP = 0x0,
+ NIX_AQ_INSTOP_INIT = 0x1,
+ NIX_AQ_INSTOP_WRITE = 0x2,
+ NIX_AQ_INSTOP_READ = 0x3,
+ NIX_AQ_INSTOP_LOCK = 0x4,
+ NIX_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* NIX admin queue instruction structure */
+struct nix_aq_inst_s {
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
+ u64 cindex : 20;
+ u64 reserved_44_62 : 19;
+ u64 doneint : 1;
+ u64 res_addr; /* W1 */
+};
+
+/* NIX admin queue result structure */
+struct nix_aq_res_s {
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 compcode : 8;
+ u64 doneint : 1;
+ u64 reserved_17_63 : 47;
+ u64 reserved_64_127; /* W1 */
+};
+
+/* NIX Completion queue context structure */
+struct nix_cq_ctx_s {
+ u64 base;
+ u64 rsvd_64_67 : 4;
+ u64 bp_ena : 1;
+ u64 rsvd_69_71 : 3;
+ u64 bpid : 9;
+ u64 rsvd_81_83 : 3;
+ u64 qint_idx : 7;
+ u64 cq_err : 1;
+ u64 cint_idx : 7;
+ u64 avg_con : 9;
+ u64 wrptr : 20;
+ u64 tail : 20;
+ u64 head : 20;
+ u64 avg_level : 8;
+ u64 update_time : 16;
+ u64 bp : 8;
+ u64 drop : 8;
+ u64 drop_ena : 1;
+ u64 ena : 1;
+ u64 rsvd_210_211 : 2;
+ u64 substream : 20;
+ u64 caching : 1;
+ u64 rsvd_233_235 : 3;
+ u64 qsize : 4;
+ u64 cq_err_int : 8;
+ u64 cq_err_int_ena : 8;
+};
+
+/* CN10K NIX Receive queue context structure */
+struct nix_cn10k_rq_ctx_s {
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 rsvd_36_24 : 13;
+ u64 lenerr_dis : 1;
+ u64 csum_il4_dis : 1;
+ u64 csum_ol4_dis : 1;
+ u64 len_il4_dis : 1;
+ u64 len_il3_dis : 1;
+ u64 len_ol4_dis : 1;
+ u64 len_ol3_dis : 1;
+ u64 wqe_aura : 20;
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 pb_stashing : 1;
+ u64 ipsecd_drop_ena : 1;
+ u64 chi_ena : 1;
+ u64 rsvd_127_125 : 3;
+ u64 band_prof_id : 10; /* W2 */
+ u64 rsvd_138 : 1;
+ u64 policer_ena : 1;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+ u64 xqe_drop : 8; /* W3 */
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+ u64 ltag : 24; /* W5 */
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 ipsec_vwqe : 1;
+ u64 vwqe_ena : 1;
+ u64 vwqe_wait : 8;
+ u64 max_vsize_exp : 4;
+ u64 vwqe_skip : 2;
+ u64 rsvd_383_382 : 2;
+ u64 octs : 48; /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 pkts : 48; /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 drop_octs : 48; /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_pkts : 48; /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 re_pkts : 48; /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* CN10K NIX Send queue context structure */
+struct nix_cn10k_sq_ctx_s {
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+ u64 smq : 10; /* W1 */
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_weight : 14;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+ u64 rsvd_120_119 : 2;
+ u64 smq_rr_count_lb : 7;
+ u64 smq_rr_count_ub : 25; /* W2 */
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+ u64 max_sqe_size : 2; /* W3 */
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+ u64 next_sqb : 64; /* W4 */
+ u64 tail_sqb : 64; /* W5 */
+ u64 smenq_sqb : 64; /* W6 */
+ u64 smenq_next_sqb : 64; /* W7 */
+ u64 head_sqb : 64; /* W8 */
+ u64 rsvd_583_576 : 8; /* W9 */
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+ u64 scm_lso_rem : 18; /* W10 */
+ u64 rsvd_703_658 : 46;
+ u64 octs : 48; /* W11 */
+ u64 rsvd_767_752 : 16;
+ u64 pkts : 48; /* W12 */
+ u64 rsvd_831_816 : 16;
+ u64 rsvd_895_832 : 64; /* W13 */
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
+};
+
+/* NIX Receive queue context structure */
+struct nix_rq_ctx_s {
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 substream : 20;
+ u64 wqe_aura : 20;
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 rsvd_127_122 : 6;
+ u64 rsvd_139_128 : 12; /* W2 */
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+ u64 xqe_drop : 8; /* W3*/
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+ u64 ltag : 24; /* W5 */
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 rsvd_383_366 : 18;
+ u64 octs : 48; /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 pkts : 48; /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 drop_octs : 48; /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_pkts : 48; /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 re_pkts : 48; /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* NIX sqe sizes */
+enum nix_maxsqesz {
+ NIX_MAXSQESZ_W16 = 0x0,
+ NIX_MAXSQESZ_W8 = 0x1,
+};
+
+/* NIX SQB caching type */
+enum nix_stype {
+ NIX_STYPE_STF = 0x0,
+ NIX_STYPE_STT = 0x1,
+ NIX_STYPE_STP = 0x2,
+};
+
+/* NIX Send queue context structure */
+struct nix_sq_ctx_s {
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+ u64 smq : 9;
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_quantum : 24;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+ u64 smq_rr_count : 25;
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+ u64 max_sqe_size : 2;
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+ u64 next_sqb : 64;/* W4 */
+ u64 tail_sqb : 64;/* W5 */
+ u64 smenq_sqb : 64;/* W6 */
+ u64 smenq_next_sqb : 64;/* W7 */
+ u64 head_sqb : 64;/* W8 */
+ u64 rsvd_583_576 : 8;
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+ u64 scm_lso_rem : 18;
+ u64 rsvd_703_658 : 46;
+ u64 octs : 48;
+ u64 rsvd_767_752 : 16;
+ u64 pkts : 48;
+ u64 rsvd_831_816 : 16;
+ u64 rsvd_895_832 : 64;/* W13 */
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
+};
+
+/* NIX Receive side scaling entry structure*/
+struct nix_rsse_s {
+ uint32_t rq : 20;
+ uint32_t reserved_20_31 : 12;
+
+};
+
+/* NIX receive multicast/mirror entry structure */
+struct nix_rx_mce_s {
+ uint64_t op : 2;
+ uint64_t rsvd_2 : 1;
+ uint64_t eol : 1;
+ uint64_t index : 20;
+ uint64_t rsvd_31_24 : 8;
+ uint64_t pf_func : 16;
+ uint64_t next : 16;
+};
+
+enum nix_band_prof_layers {
+ BAND_PROF_LEAF_LAYER = 0,
+ BAND_PROF_INVAL_LAYER = 1,
+ BAND_PROF_MID_LAYER = 2,
+ BAND_PROF_TOP_LAYER = 3,
+ BAND_PROF_NUM_LAYERS = 4,
+};
+
+enum NIX_RX_BAND_PROF_ACTIONRESULT_E {
+ NIX_RX_BAND_PROF_ACTIONRESULT_PASS = 0x0,
+ NIX_RX_BAND_PROF_ACTIONRESULT_DROP = 0x1,
+ NIX_RX_BAND_PROF_ACTIONRESULT_RED = 0x2,
+};
+
+enum nix_band_prof_pc_mode {
+ NIX_RX_PC_MODE_VLAN = 0,
+ NIX_RX_PC_MODE_DSCP = 1,
+ NIX_RX_PC_MODE_GEN = 2,
+ NIX_RX_PC_MODE_RSVD = 3,
+};
+
+/* NIX ingress policer bandwidth profile structure */
+struct nix_bandprof_s {
+ uint64_t pc_mode : 2; /* W0 */
+ uint64_t icolor : 2;
+ uint64_t tnl_ena : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t peir_exponent : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pebs_exponent : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t cir_exponent : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t cbs_exponent : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t peir_mantissa : 8;
+ uint64_t pebs_mantissa : 8;
+ uint64_t cir_mantissa : 8;
+ uint64_t cbs_mantissa : 8; /* W1 */
+ uint64_t lmode : 1;
+ uint64_t l_sellect : 3;
+ uint64_t rdiv : 4;
+ uint64_t adjust_exponent : 5;
+ uint64_t reserved_85_86 : 2;
+ uint64_t adjust_mantissa : 9;
+ uint64_t gc_action : 2;
+ uint64_t yc_action : 2;
+ uint64_t rc_action : 2;
+ uint64_t meter_algo : 2;
+ uint64_t band_prof_id : 7;
+ uint64_t reserved_111_118 : 8;
+ uint64_t hl_en : 1;
+ uint64_t reserved_120_127 : 8;
+ uint64_t ts : 48; /* W2 */
+ uint64_t reserved_176_191 : 16;
+ uint64_t pe_accum : 32; /* W3 */
+ uint64_t c_accum : 32;
+ uint64_t green_pkt_pass : 48; /* W4 */
+ uint64_t reserved_304_319 : 16;
+ uint64_t yellow_pkt_pass : 48; /* W5 */
+ uint64_t reserved_368_383 : 16;
+ uint64_t red_pkt_pass : 48; /* W6 */
+ uint64_t reserved_432_447 : 16;
+ uint64_t green_octs_pass : 48; /* W7 */
+ uint64_t reserved_496_511 : 16;
+ uint64_t yellow_octs_pass : 48; /* W8 */
+ uint64_t reserved_560_575 : 16;
+ uint64_t red_octs_pass : 48; /* W9 */
+ uint64_t reserved_624_639 : 16;
+ uint64_t green_pkt_drop : 48; /* W10 */
+ uint64_t reserved_688_703 : 16;
+ uint64_t yellow_pkt_drop : 48; /* W11 */
+ uint64_t reserved_752_767 : 16;
+ uint64_t red_pkt_drop : 48; /* W12 */
+ uint64_t reserved_816_831 : 16;
+ uint64_t green_octs_drop : 48; /* W13 */
+ uint64_t reserved_880_895 : 16;
+ uint64_t yellow_octs_drop : 48; /* W14 */
+ uint64_t reserved_944_959 : 16;
+ uint64_t red_octs_drop : 48; /* W15 */
+ uint64_t reserved_1008_1023 : 16;
+};
+
+enum nix_lsoalg {
+ NIX_LSOALG_NOP,
+ NIX_LSOALG_ADD_SEGNUM,
+ NIX_LSOALG_ADD_PAYLEN,
+ NIX_LSOALG_ADD_OFFSET,
+ NIX_LSOALG_TCP_FLAGS,
+};
+
+enum nix_txlayer {
+ NIX_TXLAYER_OL3,
+ NIX_TXLAYER_OL4,
+ NIX_TXLAYER_IL3,
+ NIX_TXLAYER_IL4,
+};
+
+struct nix_lso_format {
+ u64 offset : 8;
+ u64 layer : 2;
+ u64 rsvd_10_11 : 2;
+ u64 sizem1 : 2;
+ u64 rsvd_14_15 : 2;
+ u64 alg : 3;
+ u64 rsvd_19_63 : 45;
+};
+
+struct nix_rx_flowkey_alg {
+ u64 key_offset :6;
+ u64 ln_mask :1;
+ u64 fn_mask :1;
+ u64 hdr_offset :8;
+ u64 bytesm1 :5;
+ u64 lid :3;
+ u64 reserved_24_24 :1;
+ u64 ena :1;
+ u64 sel_chan :1;
+ u64 ltype_mask :4;
+ u64 ltype_match :4;
+ u64 reserved_35_63 :29;
+};
+
+/* NIX VTAG size */
+enum nix_vtag_size {
+ VTAGSIZE_T4 = 0x0,
+ VTAGSIZE_T8 = 0x1,
+};
+
+enum nix_tx_vtag_op {
+ NOP = 0x0,
+ VTAG_INSERT = 0x1,
+ VTAG_REPLACE = 0x2,
+};
+
+/* NIX RX VTAG actions */
+#define VTAG_STRIP BIT_ULL(4)
+#define VTAG_CAPTURE BIT_ULL(5)
+
+#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
new file mode 100644
index 000000000..854045ed3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include "rvu.h"
+
+static void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct nix_hw *nix_hw;
+
+ nix_hw = get_nix_hw(rvu->hw, pfvf->nix_blkaddr);
+ /* Enable LBK links with channel 63 for TX MCAM rule */
+ rvu_nix_tx_tl2_cfg(rvu, pfvf->nix_blkaddr, pcifunc,
+ &nix_hw->txsch[NIX_TXSCH_LVL_TL2], enable);
+}
+
+static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 chan_mask)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = pfvf->rx_chan_base;
+ req.chan_mask = chan_mask;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ req.default_rule = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ rvu_switch_enable_lbk_link(rvu, pcifunc, true);
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.entry = entry;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc, entry = 0;
+ int pf, vf, numvfs;
+ int err;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ /* rvu_get_nix_blkaddr sets up the corresponding NIX block
+ * address and NIX RX and TX interfaces for a pcifunc.
+ * Generally it is called during attach call of a pcifunc but it
+ * is called here since we are pre-installing rules before
+ * nixlfs are attached
+ */
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* MCAM RX rule for a PF/VF already exists as default unicast
+ * rules installed by AF. Hence change the channel in those
+ * rules to ignore channel so that packets with the required
+ * DMAC received from LBK(by other PF/VFs in system) or from
+ * external world (from wire) are accepted.
+ */
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
+ if (err) {
+ dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev,
+ "RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc,
+ start + entry);
+ if (err) {
+ dev_err(rvu->dev,
+ "TX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ }
+ }
+
+ return 0;
+}
+
+void rvu_switch_enable(struct rvu *rvu)
+{
+ struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct msg_rsp rsp;
+ int ret;
+
+ alloc_req.contig = true;
+ alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+ &alloc_rsp);
+ if (ret) {
+ dev_err(rvu->dev,
+ "Unable to allocate MCAM entries\n");
+ goto exit;
+ }
+
+ if (alloc_rsp.count != alloc_req.count) {
+ dev_err(rvu->dev,
+ "Unable to allocate %d MCAM entries, got %d\n",
+ alloc_req.count, alloc_rsp.count);
+ goto free_entries;
+ }
+
+ rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
+ GFP_KERNEL);
+ if (!rswitch->entry2pcifunc)
+ goto free_entries;
+
+ rswitch->used_entries = alloc_rsp.count;
+ rswitch->start_entry = alloc_rsp.entry;
+
+ ret = rvu_switch_install_rules(rvu);
+ if (ret)
+ goto uninstall_rules;
+
+ return;
+
+uninstall_rules:
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ kfree(rswitch->entry2pcifunc);
+free_entries:
+ free_req.all = 1;
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+exit:
+ return;
+}
+
+void rvu_switch_disable(struct rvu *rvu)
+{
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs;
+ struct msg_rsp rsp;
+ u16 pcifunc;
+ int err;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%d failed(%d)\n",
+ pf, err);
+
+ /* Disable LBK link */
+ rvu_switch_enable_lbk_link(rvu, pcifunc, false);
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+
+ rvu_switch_enable_lbk_link(rvu, pcifunc, false);
+ }
+ }
+
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ free_req.all = 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+ rswitch->used_entries = 0;
+ kfree(rswitch->entry2pcifunc);
+}
+
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u32 max = rswitch->used_entries;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ break;
+ }
+
+ if (entry >= max)
+ return;
+
+ rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
+ rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
new file mode 100644
index 000000000..775fd4c35
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#define CREATE_TRACE_POINTS
+#include "rvu_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
new file mode 100644
index 000000000..28984d0e8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvu
+
+#if !defined(__RVU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVU_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/pci.h>
+
+#include "mbox.h"
+
+TRACE_EVENT(otx2_msg_alloc,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
+ TP_ARGS(pdev, id, size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(u64, size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __entry->id = id;
+ __entry->size = size;
+ ),
+ TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size)
+);
+
+TRACE_EVENT(otx2_msg_send,
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
+ TP_ARGS(pdev, num_msgs, msg_size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, num_msgs)
+ __field(u64, msg_size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __entry->num_msgs = num_msgs;
+ __entry->msg_size = msg_size;
+ ),
+ TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
+ __entry->num_msgs, __entry->msg_size)
+);
+
+TRACE_EVENT(otx2_msg_check,
+ TP_PROTO(const struct pci_dev *pdev, u16 reqid, u16 rspid, int rc),
+ TP_ARGS(pdev, reqid, rspid, rc),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, reqid)
+ __field(u16, rspid)
+ __field(int, rc)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __entry->reqid = reqid;
+ __entry->rspid = rspid;
+ __entry->rc = rc;
+ ),
+ TP_printk("[%s] req->id:0x%x rsp->id:0x%x resp_code:%d\n",
+ __get_str(dev), __entry->reqid,
+ __entry->rspid, __entry->rc)
+);
+
+TRACE_EVENT(otx2_msg_interrupt,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u64 intr),
+ TP_ARGS(pdev, msg, intr),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, intr)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __assign_str(str, msg);
+ __entry->intr = intr;
+ ),
+ TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev),
+ __get_str(str), __entry->intr)
+);
+
+TRACE_EVENT(otx2_msg_process,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
+ TP_ARGS(pdev, id, err),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(int, err)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __entry->id = id;
+ __entry->err = err;
+ ),
+ TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->err)
+);
+
+#endif /* __RVU_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE rvu_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
new file mode 100644
index 000000000..5664f768c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's RVU Ethernet device drivers
+#
+
+obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
+obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
+
+rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_devlink.o qos_sq.o qos.o
+rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+
+rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
new file mode 100644
index 000000000..c1c99d705
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "cn10k.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+
+static struct dev_hw_ops otx2_hw_ops = {
+ .sq_aq_init = otx2_sq_aq_init,
+ .sqe_flush = otx2_sqe_flush,
+ .aura_freeptr = otx2_aura_freeptr,
+ .refill_pool_ptrs = otx2_refill_pool_ptrs,
+};
+
+static struct dev_hw_ops cn10k_hw_ops = {
+ .sq_aq_init = cn10k_sq_aq_init,
+ .sqe_flush = cn10k_sqe_flush,
+ .aura_freeptr = cn10k_aura_freeptr,
+ .refill_pool_ptrs = cn10k_refill_pool_ptrs,
+};
+
+int cn10k_lmtst_init(struct otx2_nic *pfvf)
+{
+
+ struct lmtst_tbl_setup_req *req;
+ struct otx2_lmt_info *lmt_info;
+ int err, cpu;
+
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ pfvf->hw_ops = &cn10k_hw_ops;
+ /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
+ pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
+ pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->use_local_lmt_region = true;
+
+ err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
+ LMT_LINE_SIZE);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+ pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
+ req->lmt_iova = (u64)pfvf->dync_lmt->iova;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ for_each_possible_cpu(cpu) {
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, cpu);
+ lmt_info->lmt_addr = ((u64)pfvf->hw.lmt_base +
+ (cpu * LMT_BURST_SIZE * LMT_LINE_SIZE));
+ lmt_info->lmt_id = cpu * LMT_BURST_SIZE;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_lmtst_init);
+
+int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ struct otx2_nic *pfvf = dev;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+ aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+#define NPA_MAX_BURST 16
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+{
+ struct otx2_nic *pfvf = dev;
+ int cnt = cq->pool_ptrs;
+ u64 ptrs[NPA_MAX_BURST];
+ dma_addr_t bufptr;
+ int num_ptrs = 1;
+
+ /* Refill pool with new buffers */
+ while (cq->pool_ptrs) {
+ if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
+ if (num_ptrs--)
+ __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
+ num_ptrs);
+ break;
+ }
+ cq->pool_ptrs--;
+ ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ num_ptrs++;
+ if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
+ __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
+ num_ptrs);
+ num_ptrs = 1;
+ }
+ }
+ return cnt - cq->pool_ptrs;
+}
+
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
+{
+ struct otx2_lmt_info *lmt_info;
+ struct otx2_nic *pfvf = dev;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, sq->sqe_base, size);
+ cn10k_lmt_flush(val, tar_addr);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
+
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf)
+{
+ struct nix_bandprof_free_req *req;
+ int rc;
+
+ if (is_dev_otx2(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Free all bandwidth profiles allocated */
+ req->free_all = true;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
+{
+ struct nix_bandprof_alloc_req *req;
+ struct nix_bandprof_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct nix_bandprof_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
+ rc = -EIO;
+ goto out;
+ }
+
+ *leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0];
+out:
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to allocate ingress bandwidth policer\n");
+ }
+
+ return rc;
+}
+
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int ret;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return ret;
+}
+
+#define POLICER_TIMESTAMP 1 /* 1 second */
+#define MAX_RATE_EXP 22 /* Valid rate exponent range: 0 - 22 */
+
+static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp,
+ u32 *burst_mantissa)
+{
+ int tmp;
+
+ /* Burst is calculated as
+ * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT]
+ * This is the upper limit on number tokens (bytes) that
+ * can be accumulated in the bucket.
+ */
+ *burst_exp = ilog2(burst);
+ if (burst < 256) {
+ /* No float: can't express mantissa in this case */
+ *burst_mantissa = 0;
+ return;
+ }
+
+ if (*burst_exp > MAX_RATE_EXP)
+ *burst_exp = MAX_RATE_EXP;
+
+ /* Calculate mantissa
+ * Find remaining bytes 'burst - 2^burst_exp'
+ * mantissa = (remaining bytes) / 2^ (burst_exp - 8)
+ */
+ tmp = burst - rounddown_pow_of_two(burst);
+ *burst_mantissa = tmp / (1UL << (*burst_exp - 8));
+}
+
+static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp,
+ u32 *rate_mantissa, u32 *rdiv)
+{
+ u32 div = 0;
+ u32 exp = 0;
+ u64 tmp;
+
+ /* Figure out mantissa, exponent and divider from given max pkt rate
+ *
+ * To achieve desired rate HW adds
+ * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every
+ * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket.
+ * Here policer timeunit is 2 usecs and rate is in bits per sec.
+ * Since floating point cannot be used below algorithm uses 1000000
+ * scale factor to support rates upto 100Gbps.
+ */
+ tmp = rate * 32 * 2;
+ if (tmp < 256000000) {
+ while (tmp < 256000000) {
+ tmp = tmp * 2;
+ div++;
+ }
+ } else {
+ for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++)
+ tmp = tmp / 2;
+
+ if (exp > MAX_RATE_EXP)
+ exp = MAX_RATE_EXP;
+ }
+
+ *rate_mantissa = (tmp - 256000000) / 1000000;
+ *rate_exp = exp;
+ *rdiv = div;
+}
+
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Enable policing and set the bandwidth profile (policer) index */
+ if (map)
+ aq->rq.policer_ena = 1;
+ else
+ aq->rq.policer_ena = 0;
+ aq->rq_mask.policer_ena = 1;
+
+ aq->rq.band_prof_id = policer;
+ aq->rq_mask.band_prof_id = GENMASK(9, 0);
+
+ /* Fill AQ info */
+ aq->qidx = rq_idx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf)
+{
+ struct nix_bandprof_free_req *req;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+ req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* Remove RQ's policer mapping */
+ for (qidx = 0; qidx < hw->rx_queues; qidx++)
+ cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, false);
+
+ rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ u32 burst_exp, burst_mantissa;
+ u32 rate_exp, rate_mantissa;
+ u32 rdiv;
+
+ /* Get exponent and mantissa values for the desired rate */
+ cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa);
+ cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv);
+
+ /* Init bandwidth profile */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Set initial color mode to blind */
+ aq->prof.icolor = 0x03;
+ aq->prof_mask.icolor = 0x03;
+
+ /* Set rate and burst values */
+ aq->prof.cir_exponent = rate_exp;
+ aq->prof_mask.cir_exponent = 0x1F;
+
+ aq->prof.cir_mantissa = rate_mantissa;
+ aq->prof_mask.cir_mantissa = 0xFF;
+
+ aq->prof.cbs_exponent = burst_exp;
+ aq->prof_mask.cbs_exponent = 0x1F;
+
+ aq->prof.cbs_mantissa = burst_mantissa;
+ aq->prof_mask.cbs_mantissa = 0xFF;
+
+ aq->prof.rdiv = rdiv;
+ aq->prof_mask.rdiv = 0xF;
+
+ if (pps) {
+ /* The amount of decremented tokens is calculated according to
+ * the following equation:
+ * max([ LMODE ? 0 : (packet_length - LXPTR)] +
+ * ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT],
+ * 1/256)
+ * if LMODE is 1 then rate limiting will be based on
+ * PPS otherwise bps.
+ * The aim of the ADJUST value is to specify a token cost per
+ * packet in contrary to the packet length that specifies a
+ * cost per byte. To rate limit based on PPS adjust mantissa
+ * is set as 384 and exponent as 1 so that number of tokens
+ * decremented becomes 1 i.e, 1 token per packeet.
+ */
+ aq->prof.adjust_exponent = 1;
+ aq->prof_mask.adjust_exponent = 0x1F;
+
+ aq->prof.adjust_mantissa = 384;
+ aq->prof_mask.adjust_mantissa = 0x1FF;
+
+ aq->prof.lmode = 0x1;
+ aq->prof_mask.lmode = 0x1;
+ }
+
+ /* Two rate three color marker
+ * With PEIR/EIR set to zero, color will be either green or red
+ */
+ aq->prof.meter_algo = 2;
+ aq->prof_mask.meter_algo = 0x3;
+
+ aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP;
+ aq->prof_mask.rc_action = 0x3;
+
+ aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.yc_action = 0x3;
+
+ aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.gc_action = 0x3;
+
+ /* Setting exponent value as 24 and mantissa as 0 configures
+ * the bucket with zero values making bucket unused. Peak
+ * information rate and Excess information rate buckets are
+ * unused here.
+ */
+ aq->prof.peir_exponent = 24;
+ aq->prof_mask.peir_exponent = 0x1F;
+
+ aq->prof.peir_mantissa = 0;
+ aq->prof_mask.peir_mantissa = 0xFF;
+
+ aq->prof.pebs_exponent = 24;
+ aq->prof_mask.pebs_exponent = 0x1F;
+
+ aq->prof.pebs_mantissa = 0;
+ aq->prof_mask.pebs_mantissa = 0xFF;
+
+ aq->prof.hl_en = 0;
+ aq->prof_mask.hl_en = 1;
+
+ /* Fill AQ info */
+ aq->qidx = profile;
+ aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst,
+ rate, false);
+ if (rc)
+ goto out;
+
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, true);
+ if (rc)
+ break;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
new file mode 100644
index 000000000..c1861f7de
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#ifndef CN10K_H
+#define CN10K_H
+
+#include "otx2_common.h"
+
+static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
+{
+ u32 weight;
+
+ /* On OTx2, since AF returns DWRR_MTU as '1', this logic
+ * will work on those silicons as well.
+ */
+ weight = mtu / pfvf->hw.dwrr_mtu;
+ if (mtu % pfvf->hw.dwrr_mtu)
+ weight += 1;
+
+ return weight;
+}
+
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int cn10k_lmtst_init(struct otx2_nic *pfvf);
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate);
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map);
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps);
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
+#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
new file mode 100644
index 000000000..6cc7a7896
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -0,0 +1,1816 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MACSEC hardware offload driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <crypto/skcipher.h>
+#include <linux/rtnetlink.h>
+#include <linux/bitfield.h>
+#include "otx2_common.h"
+
+#define MCS_TCAM0_MAC_DA_MASK GENMASK_ULL(47, 0)
+#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
+#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
+#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
+
+#define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9)
+
+#define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18)
+#define MCS_RX_SECY_PLCY_RP BIT_ULL(17)
+#define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16)
+#define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5)
+#define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1)
+#define MCS_RX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28)
+#define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22)
+#define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15)
+#define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14)
+#define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13)
+#define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2)
+#define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1)
+#define MCS_TX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_GCM_AES_128 0
+#define MCS_GCM_AES_256 1
+#define MCS_GCM_AES_XPN_128 2
+#define MCS_GCM_AES_XPN_256 3
+
+#define MCS_TCI_ES 0x40 /* end station */
+#define MCS_TCI_SC 0x20 /* SCI present */
+#define MCS_TCI_SCB 0x10 /* epon */
+#define MCS_TCI_E 0x08 /* encryption */
+#define MCS_TCI_C 0x04 /* changed text */
+
+#define CN10K_MAX_HASH_LEN 16
+#define CN10K_MAX_SAK_LEN 32
+
+static int cn10k_ecb_aes_encrypt(struct otx2_nic *pfvf, u8 *sak,
+ u16 sak_len, u8 *hash)
+{
+ u8 data[CN10K_MAX_HASH_LEN] = { 0 };
+ struct skcipher_request *req = NULL;
+ struct scatterlist sg_src, sg_dst;
+ struct crypto_skcipher *tfm;
+ DECLARE_CRYPTO_WAIT(wait);
+ int err;
+
+ tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+ if (IS_ERR(tfm)) {
+ dev_err(pfvf->dev, "failed to allocate transform for ecb-aes\n");
+ return PTR_ERR(tfm);
+ }
+
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ dev_err(pfvf->dev, "failed to allocate request for skcipher\n");
+ err = -ENOMEM;
+ goto free_tfm;
+ }
+
+ err = crypto_skcipher_setkey(tfm, sak, sak_len);
+ if (err) {
+ dev_err(pfvf->dev, "failed to set key for skcipher\n");
+ goto free_req;
+ }
+
+ /* build sg list */
+ sg_init_one(&sg_src, data, CN10K_MAX_HASH_LEN);
+ sg_init_one(&sg_dst, hash, CN10K_MAX_HASH_LEN);
+
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
+ skcipher_request_set_crypt(req, &sg_src, &sg_dst,
+ CN10K_MAX_HASH_LEN, NULL);
+
+ err = crypto_skcipher_encrypt(req);
+ err = crypto_wait_req(err, &wait);
+
+free_req:
+ skcipher_request_free(req);
+free_tfm:
+ crypto_free_skcipher(tfm);
+ return err;
+}
+
+static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy)
+{
+ struct cn10k_mcs_txsc *txsc;
+
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ if (txsc->sw_secy == secy)
+ return txsc;
+ }
+
+ return NULL;
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy,
+ struct macsec_rx_sc *rx_sc)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
+ return rxsc;
+ }
+
+ return NULL;
+}
+
+static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
+{
+ switch (rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ return "FLOW";
+ case MCS_RSRC_TYPE_SC:
+ return "SC";
+ case MCS_RSRC_TYPE_SECY:
+ return "SECY";
+ case MCS_RSRC_TYPE_SA:
+ return "SA";
+ default:
+ return "Unknown";
+ };
+
+ return "Unknown";
+}
+
+static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 *rsrc_id)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_alloc_rsrc_req *req;
+ struct mcs_alloc_rsrc_rsp *rsp;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_type = type;
+ req->rsrc_cnt = 1;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
+ req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ switch (rsp->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ *rsrc_id = rsp->flow_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SC:
+ *rsrc_id = rsp->sc_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ *rsrc_id = rsp->secy_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SA:
+ *rsrc_id = rsp->sa_ids[0];
+ break;
+ default:
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 hw_rsrc_id,
+ bool all)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_free_rsrc_req *req;
+
+ mutex_lock(&mbox->lock);
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req)
+ goto fail;
+
+ clear_req->id = hw_rsrc_id;
+ clear_req->type = type;
+ clear_req->dir = dir;
+
+ req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_id = hw_rsrc_id;
+ req->rsrc_type = type;
+ req->dir = dir;
+ if (all)
+ req->all = 1;
+
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return;
+fail:
+ dev_err(pfvf->dev, "Failed to free %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+}
+
+static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 policy;
+ u8 cipher;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
+ if (secy->replay_protect)
+ policy |= MCS_RX_SECY_PLCY_RP;
+
+ policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
+
+ switch (secy->key_len) {
+ case 16:
+ cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
+ break;
+ case 32:
+ cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
+ break;
+ default:
+ cipher = MCS_GCM_AES_128;
+ dev_warn(pfvf->dev, "Unsupported key length\n");
+ break;
+ }
+
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, cipher);
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
+
+ policy |= MCS_RX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = hw_secy_id;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct macsec_secy *secy = rxsc->sw_secy;
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 mac_da;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
+
+ req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
+ req->mask[0] = ~0ULL;
+ req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
+
+ req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
+
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = rxsc->hw_flow_id;
+ req->secy_id = hw_secy_id;
+ req->sc_id = rxsc->hw_sc_id;
+ req->dir = MCS_RX;
+
+ if (sw_rx_sc->active)
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_rx_sc_cam_write_req *sc_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
+ if (!sc_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
+ sc_req->sc_id = rxsc->hw_sc_id;
+ sc_req->secy_id = hw_secy_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_keys(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct mcs_sa_plcy_write_req *req,
+ u8 *sak, u8 *salt, ssci_t ssci)
+{
+ u8 hash_rev[CN10K_MAX_HASH_LEN];
+ u8 sak_rev[CN10K_MAX_SAK_LEN];
+ u8 salt_rev[MACSEC_SALT_LEN];
+ u8 hash[CN10K_MAX_HASH_LEN];
+ u32 ssci_63_32;
+ int err, i;
+
+ err = cn10k_ecb_aes_encrypt(pfvf, sak, secy->key_len, hash);
+ if (err) {
+ dev_err(pfvf->dev, "Generating hash using ECB(AES) failed\n");
+ return err;
+ }
+
+ for (i = 0; i < secy->key_len; i++)
+ sak_rev[i] = sak[secy->key_len - 1 - i];
+
+ for (i = 0; i < CN10K_MAX_HASH_LEN; i++)
+ hash_rev[i] = hash[CN10K_MAX_HASH_LEN - 1 - i];
+
+ for (i = 0; i < MACSEC_SALT_LEN; i++)
+ salt_rev[i] = salt[MACSEC_SALT_LEN - 1 - i];
+
+ ssci_63_32 = (__force u32)cpu_to_be32((__force u32)ssci);
+
+ memcpy(&req->plcy[0][0], sak_rev, secy->key_len);
+ memcpy(&req->plcy[0][4], hash_rev, CN10K_MAX_HASH_LEN);
+ memcpy(&req->plcy[0][6], salt_rev, MACSEC_SALT_LEN);
+ req->plcy[0][7] |= (u64)ssci_63_32 << 32;
+
+ return 0;
+}
+
+static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, bool sa_in_use)
+{
+ struct mcs_sa_plcy_write_req *plcy_req;
+ u8 *sak = rxsc->sa_key[assoc_num];
+ u8 *salt = rxsc->salt[assoc_num];
+ struct mcs_rx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
+ salt, rxsc->ssci[assoc_num]);
+ if (ret)
+ goto fail;
+
+ plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_RX;
+
+ map_req->sa_index = rxsc->hw_sa_id[assoc_num];
+ map_req->sa_in_use = sa_in_use;
+ map_req->sc_id = rxsc->hw_sc_id;
+ map_req->an = assoc_num;
+
+ /* Send two messages together */
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = rxsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct macsec_tx_sc *sw_tx_sc;
+ u8 sectag_tci = 0;
+ u8 tag_offset;
+ u64 policy;
+ u8 cipher;
+ int ret;
+
+ /* Insert SecTag after 12 bytes (DA+SA) or 16 bytes
+ * if VLAN tag needs to be sent in clear text.
+ */
+ tag_offset = txsc->vlan_dev ? 16 : 12;
+ sw_tx_sc = &secy->tx_sc;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (sw_tx_sc->send_sci) {
+ sectag_tci |= MCS_TCI_SC;
+ } else {
+ if (sw_tx_sc->end_station)
+ sectag_tci |= MCS_TCI_ES;
+ if (sw_tx_sc->scb)
+ sectag_tci |= MCS_TCI_SCB;
+ }
+
+ if (sw_tx_sc->encrypt)
+ sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
+
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ /* Write SecTag excluding AN bits(1..0) */
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
+ policy |= MCS_TX_SECY_PLCY_INS_MODE;
+ policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
+
+ switch (secy->key_len) {
+ case 16:
+ cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
+ break;
+ case 32:
+ cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
+ break;
+ default:
+ cipher = MCS_GCM_AES_128;
+ dev_warn(pfvf->dev, "Unsupported key length\n");
+ break;
+ }
+
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, cipher);
+
+ if (secy->protect_frames)
+ policy |= MCS_TX_SECY_PLCY_PROTECT;
+
+ /* If the encodingsa does not exist/active and protect is
+ * not set then frames can be sent out as it is. Hence enable
+ * the policy irrespective of secy operational when !protect.
+ */
+ if (!secy->protect_frames || secy->operational)
+ policy |= MCS_TX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 mac_sa;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
+
+ req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
+ req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
+
+ req->mask[0] = ~0ULL;
+ req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
+
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
+
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = txsc->hw_flow_id;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->sc_id = txsc->hw_sc_id;
+ req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ req->dir = MCS_TX;
+ /* This can be enabled since stack xmits packets only when interface is up */
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 sa_num, bool sa_active)
+{
+ struct mcs_tx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ /* Link the encoding_sa only to SC out of all SAs */
+ if (txsc->encoding_sa != sa_num)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+
+ map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req->sa_index0 = txsc->hw_sa_id[sa_num];
+ map_req->sa_index0_vld = sa_active;
+ map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ map_req->sc_id = txsc->hw_sc_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num)
+{
+ struct mcs_sa_plcy_write_req *plcy_req;
+ u8 *sak = txsc->sa_key[assoc_num];
+ u8 *salt = txsc->salt[assoc_num];
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
+ salt, txsc->ssci[assoc_num]);
+ if (ret)
+ goto fail;
+
+ plcy_req->plcy[0][8] = assoc_num;
+ plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = txsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
+ bool enable, enum mcs_direction dir)
+{
+ struct mcs_flowid_ena_dis_entry *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->flow_id = hw_flow_id;
+ req->ena = enable;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
+ struct mcs_sa_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sa_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sa_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sa_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SA;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
+ struct mcs_sc_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sc_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sc_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sc_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SC;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
+ struct mcs_secy_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_secy_stats *rsp;
+ struct mcs_stats_req *req;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_secy_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_secy_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SECY;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_txsc *txsc;
+ int ret;
+
+ txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
+ if (!txsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ &txsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ /* For a SecY, one TX secy and one RX secy HW resources are needed */
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_tx);
+ if (ret)
+ goto free_flowid;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_rx);
+ if (ret)
+ goto free_tx_secy;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ &txsc->hw_sc_id);
+ if (ret)
+ goto free_rx_secy;
+
+ return txsc;
+free_rx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+free_tx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+fail:
+ kfree(txsc);
+ return ERR_PTR(ret);
+}
+
+/* Free Tx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc)
+{
+ u8 sa_bmap = txsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
+ txsc, sa_num);
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ txsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+ int ret;
+
+ rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
+ if (!rxsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ &rxsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ &rxsc->hw_sc_id);
+ if (ret)
+ goto free_flowid;
+
+ return rxsc;
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+fail:
+ kfree(rxsc);
+ return ERR_PTR(ret);
+}
+
+/* Free Rx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc)
+{
+ u8 sa_bmap = rxsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
+ sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ rxsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+}
+
+static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
+{
+ if (sw_tx_sa) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, sw_tx_sa->next_pn);
+ cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
+ sw_tx_sa->active);
+ }
+
+ cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
+ cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
+ /* When updating secy, change RX secy also */
+ cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
+
+ return 0;
+}
+
+static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ struct macsec_rx_sa *sw_rx_sa;
+ u8 sa_num;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
+ sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
+ if (!sw_rx_sa)
+ continue;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
+ sa_num, sw_rx_sa->active);
+ cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
+ sw_rx_sa->next_pn);
+ }
+
+ cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
+ cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
+ }
+
+ return 0;
+}
+
+static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ bool delete)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ int ret;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
+ false, MCS_RX);
+ if (ret)
+ dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
+ mcs_rx_sc->hw_sc_id);
+ if (delete) {
+ cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
+ list_del(&mcs_rx_sc->entry);
+ kfree(mcs_rx_sc);
+ }
+ }
+
+ return 0;
+}
+
+static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_secy_stats rx_rsp = { 0 };
+ struct mcs_sc_stats sc_rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ /* Because of shared counters for some stats in the hardware, when
+ * updating secy policy take a snapshot of current stats and reset them.
+ * Below are the effected stats because of shared counters.
+ */
+
+ /* Check if sync is really needed */
+ if (secy->validate_frames == txsc->last_validate_frames &&
+ secy->replay_protect == txsc->last_replay_protect)
+ return;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
+
+ if (txsc->last_replay_protect)
+ rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
+
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
+ rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
+ }
+
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_replay_protect = secy->replay_protect;
+}
+
+static int cn10k_mdo_open(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+
+ return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
+}
+
+static int cn10k_mdo_stop(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ if (err)
+ return err;
+
+ return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
+}
+
+static int cn10k_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ return -EOPNOTSUPP;
+
+ txsc = cn10k_mcs_create_txsc(pfvf);
+ if (IS_ERR(txsc))
+ return -ENOSPC;
+
+ txsc->sw_secy = secy;
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_replay_protect = secy->replay_protect;
+ txsc->vlan_dev = is_vlan_dev(ctx->netdev);
+
+ list_add(&txsc->entry, &cfg->txsc_list);
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ bool active;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ /* Encoding SA got changed */
+ if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+ active = sw_tx_sa ? sw_tx_sa->active : false;
+ cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
+ }
+
+ if (netif_running(secy->netdev)) {
+ cn10k_mcs_sync_stats(pfvf, secy, txsc);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
+ cn10k_mcs_delete_txsc(pfvf, txsc);
+ list_del(&txsc->entry);
+ kfree(txsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
+ memcpy(&txsc->salt[sa_num], sw_tx_sa->key.salt.bytes, MACSEC_SALT_LEN);
+ txsc->ssci[sa_num] = sw_tx_sa->ssci;
+
+ txsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ if (err)
+ return err;
+
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ /* Keys cannot be changed after creation */
+ if (ctx->sa.update_pn) {
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn);
+ if (err)
+ return err;
+ }
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ txsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_rxsc *rxsc;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ rxsc = cn10k_mcs_create_rxsc(pfvf);
+ if (IS_ERR(rxsc))
+ return -ENOSPC;
+
+ rxsc->sw_secy = ctx->secy;
+ rxsc->sw_rxsc = ctx->rx_sc;
+ list_add(&rxsc->entry, &cfg->rxsc_list);
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ bool enable = ctx->rx_sc->active;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
+ enable, MCS_RX);
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
+ cn10k_mcs_delete_rxsc(pfvf, rxsc);
+ list_del(&rxsc->entry);
+ kfree(rxsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
+ memcpy(&rxsc->salt[sa_num], rx_sa->key.salt.bytes, MACSEC_SALT_LEN);
+ rxsc->ssci[sa_num] = rx_sa->ssci;
+
+ rxsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
+ sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
+ rx_sa->next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ if (!ctx->sa.update_pn)
+ return 0;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
+ rx_sa->next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+
+ rxsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
+ ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
+ ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+ txsc->stats.InPktsOverrun = 0;
+
+ ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
+ ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
+ ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
+ ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
+ ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
+ ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
+
+ if (secy->replay_protect)
+ rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
+
+ if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
+ rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+
+ ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
+ ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
+ ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
+ ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
+ ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
+ ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
+ ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
+ ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
+
+ ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
+ ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
+ ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
+
+ return 0;
+}
+
+static const struct macsec_ops cn10k_mcs_ops = {
+ .mdo_dev_open = cn10k_mdo_open,
+ .mdo_dev_stop = cn10k_mdo_stop,
+ .mdo_add_secy = cn10k_mdo_add_secy,
+ .mdo_upd_secy = cn10k_mdo_upd_secy,
+ .mdo_del_secy = cn10k_mdo_del_secy,
+ .mdo_add_rxsc = cn10k_mdo_add_rxsc,
+ .mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
+ .mdo_del_rxsc = cn10k_mdo_del_rxsc,
+ .mdo_add_rxsa = cn10k_mdo_add_rxsa,
+ .mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
+ .mdo_del_rxsa = cn10k_mdo_del_rxsa,
+ .mdo_add_txsa = cn10k_mdo_add_txsa,
+ .mdo_upd_txsa = cn10k_mdo_upd_txsa,
+ .mdo_del_txsa = cn10k_mdo_del_txsa,
+ .mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
+};
+
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_tx_sa *sw_tx_sa = NULL;
+ struct macsec_secy *secy = NULL;
+ struct cn10k_mcs_txsc *txsc;
+ u8 an;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
+ return;
+
+ /* Find the SecY to which the expired hardware SA is mapped */
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
+ if (txsc->hw_sa_id[an] == event->sa_id) {
+ secy = txsc->sw_secy;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+ }
+ }
+
+ if (secy && sw_tx_sa)
+ macsec_pn_wrapped(secy, sw_tx_sa);
+}
+
+int cn10k_mcs_init(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct cn10k_mcs_cfg *cfg;
+ struct mcs_intr_cfg *req;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return 0;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cfg->txsc_list);
+ INIT_LIST_HEAD(&cfg->rxsc_list);
+ pfvf->macsec_cfg = cfg;
+
+ pfvf->netdev->features |= NETIF_F_HW_MACSEC;
+ pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
+ if (!req)
+ goto fail;
+
+ req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ if (otx2_sync_mbox_msg(mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
+ mutex_unlock(&mbox->lock);
+ return 0;
+}
+
+void cn10k_mcs_free(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
+ kfree(pfvf->macsec_cfg);
+ pfvf->macsec_cfg = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
new file mode 100644
index 000000000..629cf1659
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -0,0 +1,1903 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <net/page_pool/helpers.h>
+#include <net/tso.h>
+#include <linux/bitfield.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "cn10k.h"
+
+static void otx2_nix_rq_op_stats(struct queue_stats *stats,
+ struct otx2_nic *pfvf, int qidx)
+{
+ u64 incr = (u64)qidx << 32;
+ u64 *ptr;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
+}
+
+static void otx2_nix_sq_op_stats(struct queue_stats *stats,
+ struct otx2_nic *pfvf, int qidx)
+{
+ u64 incr = (u64)qidx << 32;
+ u64 *ptr;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
+}
+
+void otx2_update_lmac_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+
+ if (!netif_running(pfvf->netdev))
+ return;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+ }
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+
+ if (!netif_running(pfvf->netdev))
+ return;
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox);
+ if (req)
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
+int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
+
+ if (!pfvf->qset.rq)
+ return 0;
+
+ otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
+ return 1;
+}
+
+int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx];
+
+ if (!pfvf->qset.sq)
+ return 0;
+
+ if (qidx >= pfvf->hw.non_qos_queues) {
+ if (!test_bit(qidx - pfvf->hw.non_qos_queues, pfvf->qos.qos_sq_bmap))
+ return 0;
+ }
+
+ otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
+ return 1;
+}
+
+void otx2_get_dev_stats(struct otx2_nic *pfvf)
+{
+ struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
+
+ dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+ dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
+ dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
+ dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST);
+ dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST);
+ dev_stats->rx_frames = dev_stats->rx_bcast_frames +
+ dev_stats->rx_mcast_frames +
+ dev_stats->rx_ucast_frames;
+
+ dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
+ dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
+ dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
+ dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
+ dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
+ dev_stats->tx_frames = dev_stats->tx_bcast_frames +
+ dev_stats->tx_mcast_frames +
+ dev_stats->tx_ucast_frames;
+}
+
+void otx2_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_dev_stats *dev_stats;
+
+ otx2_get_dev_stats(pfvf);
+
+ dev_stats = &pfvf->hw.dev_stats;
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_frames;
+ stats->rx_dropped = dev_stats->rx_drops;
+ stats->multicast = dev_stats->rx_mcast_frames;
+
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_frames;
+ stats->tx_dropped = dev_stats->tx_drops;
+}
+EXPORT_SYMBOL(otx2_get_stats64);
+
+/* Sync MAC address with RVU AF */
+static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
+{
+ struct nix_set_mac_addr *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
+ struct net_device *netdev)
+{
+ struct nix_get_mac_addr_rsp *rsp;
+ struct mbox_msghdr *msghdr;
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(msghdr)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return PTR_ERR(msghdr);
+ }
+ rsp = (struct nix_get_mac_addr_rsp *)msghdr;
+ eth_hw_addr_set(netdev, rsp->mac_addr);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+int otx2_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
+ eth_hw_addr_set(netdev, addr->sa_data);
+ /* update dmac field in vlan offload rule */
+ if (netif_running(netdev) &&
+ pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_install_rxvlan_offload_flow(pfvf);
+ /* update dmac address in ntuple and DMAC filter list */
+ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_update_pfmac_flow(pfvf);
+ } else {
+ return -EPERM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_set_mac_address);
+
+int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
+{
+ struct nix_frs_cfg *req;
+ u16 maxlen;
+ int err;
+
+ maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
+ /* Use max receive length supported by hardware for loopback devices */
+ if (is_otx2_lbkvf(pfvf->pdev))
+ req->maxlen = maxlen;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_config_pause_frm(struct otx2_nic *pfvf)
+{
+ struct cgx_pause_frm_cfg *req;
+ int err;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
+ req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
+ req->set = 1;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+EXPORT_SYMBOL(otx2_config_pause_frm);
+
+int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct nix_rss_flowkey_cfg_rsp *rsp;
+ struct nix_rss_flowkey_cfg *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->mcam_index = -1; /* Default or reserved index */
+ req->flowkey_cfg = rss->flowkey_cfg;
+ req->group = DEFAULT_RSS_CONTEXT_GROUP;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_rss_flowkey_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ pfvf->hw.flowkey_alg_idx = rsp->alg_idx;
+fail:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ const int index = rss->rss_size * ctx_id;
+ struct mbox *mbox = &pfvf->mbox;
+ struct otx2_rss_ctx *rss_ctx;
+ struct nix_aq_enq_req *aq;
+ int idx, err;
+
+ mutex_lock(&mbox->lock);
+ rss_ctx = rss->rss_ctx[ctx_id];
+ /* Get memory to put this msg */
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ err = otx2_sync_mbox_msg(mbox);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ mutex_unlock(&mbox->lock);
+ return -ENOMEM;
+ }
+ }
+
+ aq->rss.rq = rss_ctx->ind_tbl[idx];
+
+ /* Fill AQ info */
+ aq->qidx = index + idx;
+ aq->ctype = NIX_AQ_CTYPE_RSS;
+ aq->op = NIX_AQ_INSTOP_INIT;
+ }
+ err = otx2_sync_mbox_msg(mbox);
+ mutex_unlock(&mbox->lock);
+ return err;
+}
+
+void otx2_set_rss_key(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u64 *key = (u64 *)&rss->key[4];
+ int idx;
+
+ /* 352bit or 44byte key needs to be configured as below
+ * NIX_LF_RX_SECRETX0 = key<351:288>
+ * NIX_LF_RX_SECRETX1 = key<287:224>
+ * NIX_LF_RX_SECRETX2 = key<223:160>
+ * NIX_LF_RX_SECRETX3 = key<159:96>
+ * NIX_LF_RX_SECRETX4 = key<95:32>
+ * NIX_LF_RX_SECRETX5<63:32> = key<31:0>
+ */
+ otx2_write64(pfvf, NIX_LF_RX_SECRETX(5),
+ (u64)(*((u32 *)&rss->key)) << 32);
+ idx = sizeof(rss->key) / sizeof(u64);
+ while (idx > 0) {
+ idx--;
+ otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++);
+ }
+}
+
+int otx2_rss_init(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_rss_ctx *rss_ctx;
+ int idx, ret = 0;
+
+ rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+
+ /* Init RSS key if it is not setup already */
+ if (!rss->enable)
+ netdev_rss_key_fill(rss->key, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+
+ if (!netif_is_rxfh_configured(pfvf->netdev)) {
+ /* Set RSS group 0 as default indirection table */
+ rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
+ GFP_KERNEL);
+ if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
+ return -ENOMEM;
+
+ rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss_ctx->ind_tbl[idx] =
+ ethtool_rxfh_indir_default(idx,
+ pfvf->hw.rx_queues);
+ }
+ ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
+ if (ret)
+ return ret;
+
+ /* Flowkey or hash config to be used for generating flow tag */
+ rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
+ NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
+ NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
+ NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN |
+ NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+
+ ret = otx2_set_flowkey_cfg(pfvf);
+ if (ret)
+ return ret;
+
+ rss->enable = true;
+ return 0;
+}
+
+/* Setup UDP segmentation algorithm in HW */
+static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4)
+{
+ struct nix_lso_format *field;
+
+ field = (struct nix_lso_format *)&lso->fields[0];
+ lso->field_mask = GENMASK(18, 0);
+
+ /* IP's Length field */
+ field->layer = NIX_TXLAYER_OL3;
+ /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+ field->offset = v4 ? 2 : 4;
+ field->sizem1 = 1; /* i.e 2 bytes */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+
+ /* No ID field in IPv6 header */
+ if (v4) {
+ /* Increment IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1; /* i.e 2 bytes */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Update length in UDP header */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+}
+
+/* Setup segmentation algorithms in HW and retrieve algorithm index */
+void otx2_setup_segmentation(struct otx2_nic *pfvf)
+{
+ struct nix_lso_format_cfg_rsp *rsp;
+ struct nix_lso_format_cfg *lso;
+ struct otx2_hw *hw = &pfvf->hw;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* UDPv4 segmentation */
+ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
+ if (!lso)
+ goto fail;
+
+ /* Setup UDP/IP header fields that HW should update per segment */
+ otx2_setup_udp_segmentation(lso, true);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_lso_format_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
+ if (IS_ERR(rsp))
+ goto fail;
+
+ hw->lso_udpv4_idx = rsp->lso_format_idx;
+
+ /* UDPv6 segmentation */
+ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
+ if (!lso)
+ goto fail;
+
+ /* Setup UDP/IP header fields that HW should update per segment */
+ otx2_setup_udp_segmentation(lso, false);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_lso_format_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
+ if (IS_ERR(rsp))
+ goto fail;
+
+ hw->lso_udpv6_idx = rsp->lso_format_idx;
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+fail:
+ mutex_unlock(&pfvf->mbox.lock);
+ netdev_info(pfvf->netdev,
+ "Failed to get LSO index for UDP GSO offload, disabling\n");
+ pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4;
+}
+
+void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
+{
+ /* Configure CQE interrupt coalescing parameters
+ *
+ * HW triggers an irq when ECOUNT > cq_ecount_wait, hence
+ * set 1 less than cq_ecount_wait. And cq_time_wait is in
+ * usecs, convert that to 100ns count.
+ */
+ otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx),
+ ((u64)(pfvf->hw.cq_time_wait * 10) << 48) |
+ ((u64)pfvf->hw.cq_qcount_wait << 32) |
+ (pfvf->hw.cq_ecount_wait - 1));
+}
+
+static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
+{
+ unsigned int offset = 0;
+ struct page *page;
+ size_t sz;
+
+ sz = SKB_DATA_ALIGN(pool->rbsize);
+ sz = ALIGN(sz, OTX2_ALIGN);
+
+ page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ *dma = page_pool_get_dma_addr(page) + offset;
+ return 0;
+}
+
+static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
+{
+ u8 *buf;
+
+ if (pool->page_pool)
+ return otx2_alloc_pool_buf(pfvf, pool, dma);
+
+ buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
+ if (unlikely(!buf))
+ return -ENOMEM;
+
+ *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(pfvf->dev, *dma))) {
+ page_frag_free(buf);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma);
+ local_bh_enable();
+ return ret;
+}
+
+int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma)
+{
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
+ return -ENOMEM;
+ return 0;
+}
+
+void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ schedule_work(&pfvf->reset_task);
+}
+EXPORT_SYMBOL(otx2_tx_timeout);
+
+void otx2_get_mac_from_af(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int err;
+
+ err = otx2_hw_get_mac_addr(pfvf, netdev);
+ if (err)
+ dev_warn(pfvf->dev, "Failed to read mac from hardware\n");
+
+ /* If AF doesn't provide a valid MAC, generate a random one */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ eth_hw_addr_random(netdev);
+}
+EXPORT_SYMBOL(otx2_get_mac_from_af);
+
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc)
+{
+ u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC];
+ struct otx2_hw *hw = &pfvf->hw;
+ struct nix_txschq_config *req;
+ u64 schq, parent;
+ u64 dwrr_val;
+
+ dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->lvl = lvl;
+ req->num_regs = 1;
+
+ schq_list = hw->txschq_list;
+#ifdef CONFIG_DCB
+ if (txschq_for_pfc)
+ schq_list = pfvf->pfc_schq_list;
+#endif
+
+ schq = schq_list[lvl][prio];
+ /* Set topology e.t.c configuration */
+ if (lvl == NIX_TXSCH_LVL_SMQ) {
+ req->reg[0] = NIX_AF_SMQX_CFG(schq);
+ req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
+ req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
+ (0x2ULL << 36);
+ /* Set link type for DWRR MTU selection on CN10K silicons */
+ if (!is_dev_otx2(pfvf->pdev))
+ req->regval[0] |= FIELD_PREP(GENMASK_ULL(58, 57),
+ (u64)hw->smq_link_type);
+ req->num_regs++;
+ /* MDQ config */
+ parent = schq_list[NIX_TXSCH_LVL_TL4][prio];
+ req->reg[1] = NIX_AF_MDQX_PARENT(schq);
+ req->regval[1] = parent << 16;
+ req->num_regs++;
+ /* Set DWRR quantum */
+ req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
+ req->regval[2] = dwrr_val;
+ } else if (lvl == NIX_TXSCH_LVL_TL4) {
+ parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
+ req->reg[0] = NIX_AF_TL4X_PARENT(schq);
+ req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
+ req->regval[1] = dwrr_val;
+ } else if (lvl == NIX_TXSCH_LVL_TL3) {
+ parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
+ req->reg[0] = NIX_AF_TL3X_PARENT(schq);
+ req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
+ req->regval[1] = dwrr_val;
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
+ }
+ } else if (lvl == NIX_TXSCH_LVL_TL2) {
+ parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
+ req->reg[0] = NIX_AF_TL2X_PARENT(schq);
+ req->regval[0] = parent << 16;
+
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
+ req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
+
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
+ }
+ } else if (lvl == NIX_TXSCH_LVL_TL1) {
+ /* Default config for TL1.
+ * For VF this is always ignored.
+ */
+
+ /* On CN10K, if RR_WEIGHT is greater than 16384, HW will
+ * clip it to 16384, so configuring a 24bit max value
+ * will work on both OTx2 and CN10K.
+ */
+ req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
+ req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
+
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
+ req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL1X_CIR(schq);
+ req->regval[2] = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+EXPORT_SYMBOL(otx2_txschq_config);
+
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq)
+{
+ struct nix_txschq_config *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_SMQ;
+ req->reg[0] = NIX_AF_SMQX_CFG(smq);
+ req->regval[0] |= BIT_ULL(49);
+ req->num_regs++;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_smq_flush);
+
+int otx2_txsch_alloc(struct otx2_nic *pfvf)
+{
+ struct nix_txsch_alloc_req *req;
+ struct nix_txsch_alloc_rsp *rsp;
+ int lvl, schq, rc;
+
+ /* Get memory to put this msg */
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Request one schq per level */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ req->schq[lvl] = 1;
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ return rc;
+
+ rsp = (struct nix_txsch_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ /* Setup transmit scheduler list */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ for (schq = 0; schq < rsp->schq[lvl]; schq++)
+ pfvf->hw.txschq_list[lvl][schq] =
+ rsp->schq_list[lvl][schq];
+
+ pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
+ pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
+
+ return 0;
+}
+
+void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq)
+{
+ struct nix_txsch_free_req *free_req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+ if (!free_req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ netdev_err(pfvf->netdev,
+ "Failed alloc txschq free req\n");
+ return;
+ }
+
+ free_req->schq_lvl = lvl;
+ free_req->schq = schq;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ netdev_err(pfvf->netdev,
+ "Failed stop txschq %d at level %d\n", schq, lvl);
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+}
+EXPORT_SYMBOL(otx2_txschq_free_one);
+
+void otx2_txschq_stop(struct otx2_nic *pfvf)
+{
+ int lvl, schq;
+
+ /* free non QOS TLx nodes */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ otx2_txschq_free_one(pfvf, lvl,
+ pfvf->hw.txschq_list[lvl][0]);
+
+ /* Clear the txschq list */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
+ pfvf->hw.txschq_list[lvl][schq] = 0;
+ }
+
+}
+
+void otx2_sqb_flush(struct otx2_nic *pfvf)
+{
+ int qidx, sqe_tail, sqe_head;
+ struct otx2_snd_queue *sq;
+ u64 incr, *ptr, val;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
+ sq = &pfvf->qset.sq[qidx];
+ if (!sq->sqb_ptrs)
+ continue;
+
+ incr = (u64)qidx << 32;
+ val = otx2_atomic64_add(incr, ptr);
+ sqe_head = (val >> 20) & 0x3F;
+ sqe_tail = (val >> 28) & 0x3F;
+ if (sqe_head != sqe_tail)
+ usleep_range(50, 60);
+ }
+}
+
+/* RED and drop levels of CQ on packet reception.
+ * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
+ */
+#define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize))
+#define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize))
+
+/* RED and drop levels of AURA for packet reception.
+ * For AURA level is measure of fullness (0x0 = empty, 255 = full).
+ * Eg: For RQ length 1K, for pass/drop level 204/230.
+ * RED accepts pkts if free pointers > 102 & <= 205.
+ * Drops pkts if free pointers < 102.
+ */
+#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
+#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
+#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
+
+static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct nix_aq_enq_req *aq;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->rq.cq = qidx;
+ aq->rq.ena = 1;
+ aq->rq.pb_caching = 1;
+ aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
+ aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
+ aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
+ aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
+ aq->rq.qint_idx = 0;
+ aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
+ aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
+ aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
+ aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+{
+ struct otx2_nic *pfvf = dev;
+ struct otx2_snd_queue *sq;
+ struct nix_aq_enq_req *aq;
+
+ sq = &pfvf->qset.sq[qidx];
+ sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
+ aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+ aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_snd_queue *sq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[sqb_aura];
+ sq = &qset->sq[qidx];
+ sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128;
+ sq->sqe_cnt = qset->sqe_cnt;
+
+ err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
+ if (err)
+ return err;
+
+ if (qidx < pfvf->hw.tx_queues) {
+ err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
+ TSO_HEADER_SIZE);
+ if (err)
+ return err;
+ }
+
+ sq->sqe_base = sq->sqe->base;
+ sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
+ if (!sq->sg)
+ return -ENOMEM;
+
+ if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
+ err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
+ sizeof(*sq->timestamps));
+ if (err)
+ return err;
+ }
+
+ sq->head = 0;
+ sq->cons_head = 0;
+ sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
+ sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
+ /* Set SQE threshold to 10% of total SQEs */
+ sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
+ sq->aura_id = sqb_aura;
+ sq->aura_fc_addr = pool->fc_addr->base;
+ sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
+
+ sq->stats.bytes = 0;
+ sq->stats.pkts = 0;
+
+ return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+
+}
+
+static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ int err, pool_id, non_xdp_queues;
+ struct nix_aq_enq_req *aq;
+ struct otx2_cq_queue *cq;
+
+ cq = &qset->cq[qidx];
+ cq->cq_idx = qidx;
+ non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
+ if (qidx < pfvf->hw.rx_queues) {
+ cq->cq_type = CQ_RX;
+ cq->cint_idx = qidx;
+ cq->cqe_cnt = qset->rqe_cnt;
+ if (pfvf->xdp_prog)
+ xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ } else if (qidx < non_xdp_queues) {
+ cq->cq_type = CQ_TX;
+ cq->cint_idx = qidx - pfvf->hw.rx_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
+ } else {
+ if (pfvf->hw.xdp_queues &&
+ qidx < non_xdp_queues + pfvf->hw.xdp_queues) {
+ cq->cq_type = CQ_XDP;
+ cq->cint_idx = qidx - non_xdp_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
+ } else {
+ cq->cq_type = CQ_QOS;
+ cq->cint_idx = qidx - non_xdp_queues -
+ pfvf->hw.xdp_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
+ }
+ }
+ cq->cqe_size = pfvf->qset.xqe_size;
+
+ /* Allocate memory for CQEs */
+ err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size);
+ if (err)
+ return err;
+
+ /* Save CQE CPU base for faster reference */
+ cq->cqe_base = cq->cqe->base;
+ /* In case where all RQs auras point to single pool,
+ * all CQs receive buffer pool also point to same pool.
+ */
+ pool_id = ((cq->cq_type == CQ_RX) &&
+ (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
+ cq->rbpool = &qset->pool[pool_id];
+ cq->refill_task_sched = false;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->cq.ena = 1;
+ aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
+ aq->cq.caching = 1;
+ aq->cq.base = cq->cqe->iova;
+ aq->cq.cint_idx = cq->cint_idx;
+ aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS;
+ aq->cq.qint_idx = 0;
+ aq->cq.avg_level = 255;
+
+ if (qidx < pfvf->hw.rx_queues) {
+ aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
+ aq->cq.drop_ena = 1;
+
+ if (!is_otx2_lbkvf(pfvf->pdev)) {
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]];
+#else
+ aq->cq.bpid = pfvf->bpid[0];
+#endif
+
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ }
+ }
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static void otx2_pool_refill_task(struct work_struct *work)
+{
+ struct otx2_cq_queue *cq;
+ struct refill_work *wrk;
+ struct otx2_nic *pfvf;
+ int qidx;
+
+ wrk = container_of(work, struct refill_work, pool_refill_work.work);
+ pfvf = wrk->pf;
+ qidx = wrk - pfvf->refill_wrk;
+ cq = &pfvf->qset.cq[qidx];
+
+ cq->refill_task_sched = false;
+
+ local_bh_disable();
+ napi_schedule(wrk->napi);
+ local_bh_enable();
+}
+
+int otx2_config_nix_queues(struct otx2_nic *pfvf)
+{
+ int qidx, err;
+
+ /* Initialize RX queues */
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+
+ err = otx2_rq_init(pfvf, qidx, lpb_aura);
+ if (err)
+ return err;
+ }
+
+ /* Initialize TX queues */
+ for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
+ u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+
+ err = otx2_sq_init(pfvf, qidx, sqb_aura);
+ if (err)
+ return err;
+ }
+
+ /* Initialize completion queues */
+ for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
+ err = otx2_cq_init(pfvf, qidx);
+ if (err)
+ return err;
+ }
+
+ pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf,
+ NIX_LF_CQ_OP_STATUS);
+
+ /* Initialize work queue for receive buffer refill */
+ pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
+ sizeof(struct refill_work), GFP_KERNEL);
+ if (!pfvf->refill_wrk)
+ return -ENOMEM;
+
+ for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
+ pfvf->refill_wrk[qidx].pf = pfvf;
+ INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work,
+ otx2_pool_refill_task);
+ }
+ return 0;
+}
+
+int otx2_config_nix(struct otx2_nic *pfvf)
+{
+ struct nix_lf_alloc_req *nixlf;
+ struct nix_lf_alloc_rsp *rsp;
+ int err;
+
+ pfvf->qset.xqe_size = pfvf->hw.xqe_size;
+
+ /* Get memory to put this msg */
+ nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
+ if (!nixlf)
+ return -ENOMEM;
+
+ /* Set RQ/SQ/CQ counts */
+ nixlf->rq_cnt = pfvf->hw.rx_queues;
+ nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf);
+ nixlf->cq_cnt = pfvf->qset.cq_cnt;
+ nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
+ nixlf->rss_grps = MAX_RSS_GROUPS;
+ nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
+ /* We don't know absolute NPA LF idx attached.
+ * AF will replace 'RVU_DEFAULT_PF_FUNC' with
+ * NPA LF attached to this RVU PF/VF.
+ */
+ nixlf->npa_func = RVU_DEFAULT_PF_FUNC;
+ /* Disable alignment pad, enable L2 length check,
+ * enable L4 TCP/UDP checksum verification.
+ */
+ nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+
+ rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
+ &nixlf->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->qints < 1)
+ return -ENXIO;
+
+ return rsp->hdr.rc;
+}
+
+void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ int sqb, qidx;
+ u64 iova, pa;
+
+ for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
+ sq = &qset->sq[qidx];
+ if (!sq->sqb_ptrs)
+ continue;
+ for (sqb = 0; sqb < sq->sqb_count; sqb++) {
+ if (!sq->sqb_ptrs[sqb])
+ continue;
+ iova = sq->sqb_ptrs[sqb];
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ }
+ sq->sqb_count = 0;
+ }
+}
+
+void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ u64 iova, int size)
+{
+ struct page *page;
+ u64 pa;
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ page = virt_to_head_page(phys_to_virt(pa));
+
+ if (pool->page_pool) {
+ page_pool_put_full_page(pool->page_pool, page, true);
+ } else {
+ dma_unmap_page_attrs(pfvf->dev, iova, size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
+ put_page(page);
+ }
+}
+
+void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
+{
+ int pool_id, pool_start = 0, pool_end = 0, size = 0;
+ struct otx2_pool *pool;
+ u64 iova;
+
+ if (type == AURA_NIX_SQ) {
+ pool_start = otx2_get_pool_idx(pfvf, type, 0);
+ pool_end = pool_start + pfvf->hw.sqpool_cnt;
+ size = pfvf->hw.sqb_size;
+ }
+ if (type == AURA_NIX_RQ) {
+ pool_start = otx2_get_pool_idx(pfvf, type, 0);
+ pool_end = pfvf->hw.rqpool_cnt;
+ size = pfvf->rbsize;
+ }
+
+ /* Free SQB and RQB pointers from the aura pool */
+ for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
+ iova = otx2_aura_allocptr(pfvf, pool_id);
+ pool = &pfvf->qset.pool[pool_id];
+ while (iova) {
+ if (type == AURA_NIX_RQ)
+ iova -= OTX2_HEAD_ROOM;
+
+ otx2_free_bufs(pfvf, pool, iova, size);
+
+ iova = otx2_aura_allocptr(pfvf, pool_id);
+ }
+ }
+}
+
+void otx2_aura_pool_free(struct otx2_nic *pfvf)
+{
+ struct otx2_pool *pool;
+ int pool_id;
+
+ if (!pfvf->qset.pool)
+ return;
+
+ for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) {
+ pool = &pfvf->qset.pool[pool_id];
+ qmem_free(pfvf->dev, pool->stack);
+ qmem_free(pfvf->dev, pool->fc_addr);
+ page_pool_destroy(pool->page_pool);
+ pool->page_pool = NULL;
+ }
+ devm_kfree(pfvf->dev, pfvf->qset.pool);
+ pfvf->qset.pool = NULL;
+}
+
+int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
+{
+ struct npa_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+
+ /* Allocate memory for HW to update Aura count.
+ * Alloc one cache line, so that it fits all FC_STYPE modes.
+ */
+ if (!pool->fc_addr) {
+ err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
+ if (err)
+ return err;
+ }
+
+ /* Initialize this aura's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+ }
+
+ aq->aura_id = aura_id;
+ /* Will be filled by AF with correct pool context address */
+ aq->aura.pool_addr = pool_id;
+ aq->aura.pool_caching = 1;
+ aq->aura.shift = ilog2(numptrs) - 8;
+ aq->aura.count = numptrs;
+ aq->aura.limit = numptrs;
+ aq->aura.avg_level = 255;
+ aq->aura.ena = 1;
+ aq->aura.fc_ena = 1;
+ aq->aura.fc_addr = pool->fc_addr->iova;
+ aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+
+ /* Enable backpressure for RQ aura */
+ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
+ aq->aura.bp_ena = 0;
+ /* If NIX1 LF is attached then specify NIX1_RX.
+ *
+ * Below NPA_AURA_S[BP_ENA] is set according to the
+ * NPA_BPINTF_E enumeration given as:
+ * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
+ * NIX0_RX is 0x0 + 0*0x1 = 0
+ * NIX1_RX is 0x0 + 1*0x1 = 1
+ * But in HRM it is given that
+ * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
+ * NIX-RX based on [BP] level. One bit per NIX-RX; index
+ * enumerated by NPA_BPINTF_E."
+ */
+ if (pfvf->nix_blkaddr == BLKADDR_NIX1)
+ aq->aura.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
+#else
+ aq->aura.nix0_bpid = pfvf->bpid[0];
+#endif
+
+ /* Set backpressure level for RQ's Aura */
+ aq->aura.bp = RQ_BP_LVL_AURA;
+ }
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_AURA;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size, int type)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct npa_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+ /* Alloc memory for stack which is used to store buffer pointers */
+ err = qmem_alloc(pfvf->dev, &pool->stack,
+ stack_pages, pfvf->hw.stack_pg_bytes);
+ if (err)
+ return err;
+
+ pool->rbsize = buf_size;
+
+ /* Initialize this pool's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ qmem_free(pfvf->dev, pool->stack);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ qmem_free(pfvf->dev, pool->stack);
+ return -ENOMEM;
+ }
+ }
+
+ aq->aura_id = pool_id;
+ aq->pool.stack_base = pool->stack->iova;
+ aq->pool.stack_caching = 1;
+ aq->pool.ena = 1;
+ aq->pool.buf_size = buf_size / 128;
+ aq->pool.stack_max_pages = stack_pages;
+ aq->pool.shift = ilog2(numptrs) - 8;
+ aq->pool.ptr_start = 0;
+ aq->pool.ptr_end = ~0ULL;
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ if (type != AURA_NIX_RQ) {
+ pool->page_pool = NULL;
+ return 0;
+ }
+
+ pp_params.order = get_order(buf_size);
+ pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
+
+ return 0;
+}
+
+int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+{
+ int qidx, pool_id, stack_pages, num_sqbs;
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ struct otx2_pool *pool;
+ dma_addr_t bufptr;
+ int err, ptr;
+
+ /* Calculate number of SQBs needed.
+ *
+ * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
+ * Last SQE is used for pointing to next SQB.
+ */
+ num_sqbs = (hw->sqb_size / 128) - 1;
+ num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
+
+ /* Get no of stack pages needed */
+ stack_pages =
+ (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+ for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ /* Initialize aura context */
+ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
+ if (err)
+ goto fail;
+
+ /* Initialize pool context */
+ err = otx2_pool_init(pfvf, pool_id, stack_pages,
+ num_sqbs, hw->sqb_size, AURA_NIX_SQ);
+ if (err)
+ goto fail;
+ }
+
+ /* Flush accumulated messages */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ /* Allocate pointers and free them to aura/pool */
+ for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ sq = &qset->sq[qidx];
+ sq->sqb_count = 0;
+ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
+ if (!sq->sqb_ptrs) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ for (ptr = 0; ptr < num_sqbs; ptr++) {
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
+ goto err_mem;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
+ sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
+ }
+ }
+
+err_mem:
+ return err ? -ENOMEM : 0;
+
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ otx2_aura_pool_free(pfvf);
+ return err;
+}
+
+int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int stack_pages, pool_id, rq;
+ struct otx2_pool *pool;
+ int err, ptr, num_ptrs;
+ dma_addr_t bufptr;
+
+ num_ptrs = pfvf->qset.rqe_cnt;
+
+ stack_pages =
+ (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+ for (rq = 0; rq < hw->rx_queues; rq++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
+ /* Initialize aura context */
+ err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs);
+ if (err)
+ goto fail;
+ }
+ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
+ err = otx2_pool_init(pfvf, pool_id, stack_pages,
+ num_ptrs, pfvf->rbsize, AURA_NIX_RQ);
+ if (err)
+ goto fail;
+ }
+
+ /* Flush accumulated messages */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ /* Allocate pointers and free them to aura/pool */
+ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
+ pool = &pfvf->qset.pool[pool_id];
+ for (ptr = 0; ptr < num_ptrs; ptr++) {
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
+ return -ENOMEM;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ bufptr + OTX2_HEAD_ROOM);
+ }
+ }
+ return 0;
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ otx2_aura_pool_free(pfvf);
+ return err;
+}
+
+int otx2_config_npa(struct otx2_nic *pfvf)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct npa_lf_alloc_req *npalf;
+ struct otx2_hw *hw = &pfvf->hw;
+ int aura_cnt;
+
+ /* Pool - Stack of free buffer pointers
+ * Aura - Alloc/frees pointers from/to pool for NIX DMA.
+ */
+
+ if (!hw->pool_cnt)
+ return -EINVAL;
+
+ qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt,
+ sizeof(struct otx2_pool), GFP_KERNEL);
+ if (!qset->pool)
+ return -ENOMEM;
+
+ /* Get memory to put this msg */
+ npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox);
+ if (!npalf)
+ return -ENOMEM;
+
+ /* Set aura and pool counts */
+ npalf->nr_pools = hw->pool_cnt;
+ aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt));
+ npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_detach_resources(struct mbox *mbox)
+{
+ struct rsrc_detach *detach;
+
+ mutex_lock(&mbox->lock);
+ detach = otx2_mbox_alloc_msg_detach_resources(mbox);
+ if (!detach) {
+ mutex_unlock(&mbox->lock);
+ return -ENOMEM;
+ }
+
+ /* detach all */
+ detach->partial = false;
+
+ /* Send detach request to AF */
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ mutex_unlock(&mbox->lock);
+ return 0;
+}
+EXPORT_SYMBOL(otx2_detach_resources);
+
+int otx2_attach_npa_nix(struct otx2_nic *pfvf)
+{
+ struct rsrc_attach *attach;
+ struct msg_req *msix;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
+ if (!attach) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ attach->npalf = true;
+ attach->nixlf = true;
+
+ /* Send attach request to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+
+ /* If the platform has two NIX blocks then LF may be
+ * allocated from NIX1.
+ */
+ if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+
+ /* Get NPA and NIX MSIX vector offsets */
+ msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
+ if (!msix) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+
+ if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
+ pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
+ dev_err(pfvf->dev,
+ "RVUPF: Invalid MSIX vector offset for NPA/NIX\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_attach_npa_nix);
+
+void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
+{
+ struct hwctx_disable_req *req;
+
+ mutex_lock(&mbox->lock);
+ /* Request AQ to disable this context */
+ if (npa)
+ req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
+
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return;
+ }
+
+ req->ctype = type;
+
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
+ __func__);
+
+ mutex_unlock(&mbox->lock);
+}
+
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+#ifdef CONFIG_DCB
+ req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
+ req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
+#else
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+#endif
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+EXPORT_SYMBOL(otx2_nix_config_bp);
+
+/* Mbox message handlers */
+void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
+ struct cgx_stats_rsp *rsp)
+{
+ int id;
+
+ for (id = 0; id < CGX_RX_STATS_COUNT; id++)
+ pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id];
+ for (id = 0; id < CGX_TX_STATS_COUNT; id++)
+ pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
+}
+
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks;
+ pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
+}
+
+void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
+ struct npa_lf_alloc_rsp *rsp)
+{
+ pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
+ pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
+}
+EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);
+
+void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
+ struct nix_lf_alloc_rsp *rsp)
+{
+ pfvf->hw.sqb_size = rsp->sqb_size;
+ pfvf->hw.rx_chan_base = rsp->rx_chan_base;
+ pfvf->hw.tx_chan_base = rsp->tx_chan_base;
+ pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
+ pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
+ pfvf->hw.cgx_links = rsp->cgx_links;
+ pfvf->hw.lbk_links = rsp->lbk_links;
+ pfvf->hw.tx_link = rsp->tx_link;
+}
+EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
+
+void mbox_handler_msix_offset(struct otx2_nic *pfvf,
+ struct msix_offset_rsp *rsp)
+{
+ pfvf->hw.npa_msixoff = rsp->npa_msixoff;
+ pfvf->hw.nix_msixoff = rsp->nix_msixoff;
+}
+EXPORT_SYMBOL(mbox_handler_msix_offset);
+
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int chan, chan_id;
+
+ for (chan = 0; chan < rsp->chan_cnt; chan++) {
+ chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
+ pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
+ }
+}
+EXPORT_SYMBOL(mbox_handler_nix_bp_enable);
+
+void otx2_free_cints(struct otx2_nic *pfvf, int n)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ int irq, qidx;
+
+ for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ qidx < n;
+ qidx++, irq++) {
+ int vector = pci_irq_vector(pfvf->pdev, irq);
+
+ irq_set_affinity_hint(vector, NULL);
+ free_cpumask_var(hw->affinity_mask[irq]);
+ free_irq(vector, &qset->napi[qidx]);
+ }
+}
+
+void otx2_set_cints_affinity(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int vec, cpu, irq, cint;
+
+ vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ cpu = cpumask_first(cpu_online_mask);
+
+ /* CQ interrupts */
+ for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) {
+ if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL))
+ return;
+
+ cpumask_set_cpu(cpu, hw->affinity_mask[vec]);
+
+ irq = pci_irq_vector(pfvf->pdev, vec);
+ irq_set_affinity_hint(irq, hw->affinity_mask[vec]);
+
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ if (unlikely(cpu >= nr_cpu_ids))
+ cpu = 0;
+ }
+}
+
+static u32 get_dwrr_mtu(struct otx2_nic *pfvf, struct nix_hw_info *hw)
+{
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ pfvf->hw.smq_link_type = SMQ_LINK_TYPE_LBK;
+ return hw->lbk_dwrr_mtu;
+ }
+
+ pfvf->hw.smq_link_type = SMQ_LINK_TYPE_RPM;
+ return hw->rpm_dwrr_mtu;
+}
+
+u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
+{
+ struct nix_hw_info *rsp;
+ struct msg_req *req;
+ u16 max_mtu;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!rc) {
+ rsp = (struct nix_hw_info *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+
+ /* HW counts VLAN insertion bytes (8 for double tag)
+ * irrespective of whether SQE is requesting to insert VLAN
+ * in the packet or not. Hence these 8 bytes have to be
+ * discounted from max packet size otherwise HW will throw
+ * SMQ errors
+ */
+ max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
+
+ /* Also save DWRR MTU, needed for DWRR weight calculation */
+ pfvf->hw.dwrr_mtu = get_dwrr_mtu(pfvf, rsp);
+ if (!pfvf->hw.dwrr_mtu)
+ pfvf->hw.dwrr_mtu = 1;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to get MTU from hardware setting default value(1500)\n");
+ max_mtu = 1500;
+ }
+ return max_mtu;
+}
+EXPORT_SYMBOL(otx2_get_max_mtu);
+
+int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
+ bool tc = !!(features & NETIF_F_HW_TC);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pfvf);
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && !tc &&
+ otx2_tc_flower_rule_cnt(pfvf)) {
+ netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
+ return -EBUSY;
+ }
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple &&
+ otx2_tc_flower_rule_cnt(pfvf) && !(changed & NETIF_F_HW_TC)) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE when TC flower offload is active, disable TC rules and retry\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+int __weak \
+otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
+ struct _req_type *req, \
+ struct _rsp_type *rsp) \
+{ \
+ /* Nothing to do here */ \
+ return 0; \
+} \
+EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
+MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
+#undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
new file mode 100644
index 000000000..069103070
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -0,0 +1,1128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef OTX2_COMMON_H
+#define OTX2_COMMON_H
+
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/iommu.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <linux/soc/marvell/octeontx2/asm.h>
+#include <net/macsec.h>
+#include <net/pkt_cls.h>
+#include <net/devlink.h>
+#include <linux/time64.h>
+#include <linux/dim.h>
+#include <uapi/linux/if_macsec.h>
+
+#include <mbox.h>
+#include <npc.h>
+#include "otx2_reg.h"
+#include "otx2_txrx.h"
+#include "otx2_devlink.h"
+#include <rvu_trace.h>
+#include "qos.h"
+
+/* IPv4 flag more fragment bit */
+#define IPV4_FLAG_MORE 0x20
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
+
+#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+#define NAME_SIZE 32
+
+#ifdef CONFIG_DCB
+/* Max priority supported for PFC */
+#define NIX_PF_PFC_PRIO_MAX 8
+#endif
+
+enum arua_mapped_qtypes {
+ AURA_NIX_RQ,
+ AURA_NIX_SQ,
+};
+
+/* NIX LF interrupts range*/
+#define NIX_LF_QINT_VEC_START 0x00
+#define NIX_LF_CINT_VEC_START 0x40
+#define NIX_LF_GINT_VEC 0x80
+#define NIX_LF_ERR_VEC 0x81
+#define NIX_LF_POISON_VEC 0x82
+
+/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
+#define SEND_CQ_SKID 2000
+
+#define OTX2_GET_RX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
+#define OTX2_GET_TX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
+
+struct otx2_lmt_info {
+ u64 lmt_addr;
+ u16 lmt_id;
+};
+/* RSS configuration */
+struct otx2_rss_ctx {
+ u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
+};
+
+struct otx2_rss_info {
+ u8 enable;
+ u32 flowkey_cfg;
+ u16 rss_size;
+#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
+ u8 key[RSS_HASH_KEY_SIZE];
+ struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
+};
+
+/* NIX (or NPC) RX errors */
+enum otx2_errlvl {
+ NPC_ERRLVL_RE,
+ NPC_ERRLVL_LID_LA,
+ NPC_ERRLVL_LID_LB,
+ NPC_ERRLVL_LID_LC,
+ NPC_ERRLVL_LID_LD,
+ NPC_ERRLVL_LID_LE,
+ NPC_ERRLVL_LID_LF,
+ NPC_ERRLVL_LID_LG,
+ NPC_ERRLVL_LID_LH,
+ NPC_ERRLVL_NIX = 0x0F,
+};
+
+enum otx2_errcodes_re {
+ /* NPC_ERRLVL_RE errcodes */
+ ERRCODE_FCS = 0x7,
+ ERRCODE_FCS_RCV = 0x8,
+ ERRCODE_UNDERSIZE = 0x10,
+ ERRCODE_OVERSIZE = 0x11,
+ ERRCODE_OL2_LEN_MISMATCH = 0x12,
+ /* NPC_ERRLVL_NIX errcodes */
+ ERRCODE_OL3_LEN = 0x10,
+ ERRCODE_OL4_LEN = 0x11,
+ ERRCODE_OL4_CSUM = 0x12,
+ ERRCODE_IL3_LEN = 0x20,
+ ERRCODE_IL4_LEN = 0x21,
+ ERRCODE_IL4_CSUM = 0x22,
+};
+
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
+
+struct otx2_dev_stats {
+ u64 rx_bytes;
+ u64 rx_frames;
+ u64 rx_ucast_frames;
+ u64 rx_bcast_frames;
+ u64 rx_mcast_frames;
+ u64 rx_drops;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_ucast_frames;
+ u64 tx_bcast_frames;
+ u64 tx_mcast_frames;
+ u64 tx_drops;
+};
+
+/* Driver counted stats */
+struct otx2_drv_stats {
+ atomic_t rx_fcs_errs;
+ atomic_t rx_oversize_errs;
+ atomic_t rx_undersize_errs;
+ atomic_t rx_csum_errs;
+ atomic_t rx_len_errs;
+ atomic_t rx_other_errs;
+};
+
+struct mbox {
+ struct otx2_mbox mbox;
+ struct work_struct mbox_wrk;
+ struct otx2_mbox mbox_up;
+ struct work_struct mbox_up_wrk;
+ struct otx2_nic *pfvf;
+ void *bbuf_base; /* Bounce buffer for mbox memory */
+ struct mutex lock; /* serialize mailbox access */
+ int num_msgs; /* mbox number of messages */
+ int up_num_msgs; /* mbox_up number of messages */
+};
+
+/* Egress rate limiting definitions */
+#define MAX_BURST_EXPONENT 0x0FULL
+#define MAX_BURST_MANTISSA 0xFFULL
+#define MAX_BURST_SIZE 130816ULL
+#define MAX_RATE_DIVIDER_EXPONENT 12ULL
+#define MAX_RATE_EXPONENT 0x0FULL
+#define MAX_RATE_MANTISSA 0xFFULL
+
+/* Bitfields in NIX_TLX_PIR register */
+#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
+#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
+#define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
+#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
+#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
+
+struct otx2_hw {
+ struct pci_dev *pdev;
+ struct otx2_rss_info rss_info;
+ u16 rx_queues;
+ u16 tx_queues;
+ u16 xdp_queues;
+ u16 tc_tx_queues;
+ u16 non_qos_queues; /* tx queues plus xdp queues */
+ u16 max_queues;
+ u16 pool_cnt;
+ u16 rqpool_cnt;
+ u16 sqpool_cnt;
+
+#define OTX2_DEFAULT_RBUF_LEN 2048
+ u16 rbuf_len;
+ u32 xqe_size;
+
+ /* NPA */
+ u32 stack_pg_ptrs; /* No of ptrs per stack page */
+ u32 stack_pg_bytes; /* Size of stack page */
+ u16 sqb_size;
+
+ /* NIX */
+ u8 txschq_link_cfg_lvl;
+ u8 txschq_aggr_lvl_rr_prio;
+ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 matchall_ipolicer;
+ u32 dwrr_mtu;
+ u8 smq_link_type;
+
+ /* HW settings, coalescing etc */
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u16 cq_qcount_wait;
+ u16 cq_ecount_wait;
+ u16 rq_skid;
+ u8 cq_time_wait;
+
+ /* Segmentation */
+ u8 lso_tsov4_idx;
+ u8 lso_tsov6_idx;
+ u8 lso_udpv4_idx;
+ u8 lso_udpv6_idx;
+
+ /* RSS */
+ u8 flowkey_alg_idx;
+
+ /* MSI-X */
+ u8 cint_cnt; /* CQ interrupt count */
+ u16 npa_msixoff; /* Offset of NPA vectors */
+ u16 nix_msixoff; /* Offset of NIX vectors */
+ char *irq_name;
+ cpumask_var_t *affinity_mask;
+
+ /* Stats */
+ struct otx2_dev_stats dev_stats;
+ struct otx2_drv_stats drv_stats;
+ u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
+ u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+ u64 cgx_fec_corr_blks;
+ u64 cgx_fec_uncorr_blks;
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
+ u8 tx_link; /* Transmit channel link number */
+#define HW_TSO 0
+#define CN10K_MBOX 1
+#define CN10K_LMTST 2
+#define CN10K_RPM 3
+#define CN10K_PTP_ONESTEP 4
+#define CN10K_HW_MACSEC 5
+#define QOS_CIR_PIR_SUPPORT 6
+ unsigned long cap_flag;
+
+#define LMT_LINE_SIZE 128
+#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
+ u64 *lmt_base;
+ struct otx2_lmt_info __percpu *lmt_info;
+};
+
+enum vfperm {
+ OTX2_RESET_VF_PERM,
+ OTX2_TRUSTED_VF,
+};
+
+struct otx2_vf_config {
+ struct otx2_nic *pf;
+ struct delayed_work link_event_work;
+ bool intf_down; /* interface was either configured or not */
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ int tx_vtag_idx;
+ bool trusted;
+};
+
+struct flr_work {
+ struct work_struct work;
+ struct otx2_nic *pf;
+};
+
+struct refill_work {
+ struct delayed_work pool_refill_work;
+ struct otx2_nic *pf;
+ struct napi_struct *napi;
+};
+
+/* PTPv2 originTimestamp structure */
+struct ptpv2_tstamp {
+ __be16 seconds_msb; /* 16 bits + */
+ __be32 seconds_lsb; /* 32 bits = 48 bits*/
+ __be32 nanoseconds;
+} __packed;
+
+struct otx2_ptp {
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct otx2_nic *nic;
+
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+
+ struct delayed_work extts_work;
+ u64 last_extts;
+ u64 thresh;
+
+ struct ptp_pin_desc extts_config;
+ u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
+ u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
+ u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp);
+ struct delayed_work synctstamp_work;
+ u64 tstamp;
+ u32 base_ns;
+};
+
+#define OTX2_HW_TIMESTAMP_LEN 8
+
+struct otx2_mac_table {
+ u8 addr[ETH_ALEN];
+ u16 mcam_entry;
+ bool inuse;
+};
+
+struct otx2_flow_config {
+ u16 *flow_ent;
+ u16 *def_ent;
+ u16 nr_flows;
+#define OTX2_DEFAULT_FLOWCOUNT 16
+#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_MAX_VLAN_FLOWS 1
+#define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
+#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
+ OTX2_MAX_UNICAST_FLOWS + \
+ OTX2_MAX_VLAN_FLOWS)
+ u16 unicast_offset;
+ u16 rx_vlan_offset;
+ u16 vf_vlan_offset;
+#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
+#define OTX2_VF_VLAN_RX_INDEX 0
+#define OTX2_VF_VLAN_TX_INDEX 1
+ u32 *bmap_to_dmacindex;
+ unsigned long *dmacflt_bmap;
+ struct list_head flow_list;
+ u32 dmacflt_max_flows;
+ u16 max_flows;
+ struct list_head flow_list_tc;
+ bool ntuple;
+};
+
+struct dev_hw_ops {
+ int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
+ void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+ int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
+ void (*aura_freeptr)(void *dev, int aura, u64 buf);
+};
+
+#define CN10K_MCS_SA_PER_SC 4
+
+/* Stats which need to be accumulated in software because
+ * of shared counters in hardware.
+ */
+struct cn10k_txsc_stats {
+ u64 InPktsUntagged;
+ u64 InPktsNoTag;
+ u64 InPktsBadTag;
+ u64 InPktsUnknownSCI;
+ u64 InPktsNoSCI;
+ u64 InPktsOverrun;
+};
+
+struct cn10k_rxsc_stats {
+ u64 InOctetsValidated;
+ u64 InOctetsDecrypted;
+ u64 InPktsUnchecked;
+ u64 InPktsDelayed;
+ u64 InPktsOK;
+ u64 InPktsInvalid;
+ u64 InPktsLate;
+ u64 InPktsNotValid;
+ u64 InPktsNotUsingSA;
+ u64 InPktsUnusedSA;
+};
+
+struct cn10k_mcs_txsc {
+ struct macsec_secy *sw_secy;
+ struct cn10k_txsc_stats stats;
+ struct list_head entry;
+ enum macsec_validation_type last_validate_frames;
+ bool last_replay_protect;
+ u16 hw_secy_id_tx;
+ u16 hw_secy_id_rx;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+ u8 encoding_sa;
+ u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN];
+ ssci_t ssci[CN10K_MCS_SA_PER_SC];
+ bool vlan_dev; /* macsec running on VLAN ? */
+};
+
+struct cn10k_mcs_rxsc {
+ struct macsec_secy *sw_secy;
+ struct macsec_rx_sc *sw_rxsc;
+ struct cn10k_rxsc_stats stats;
+ struct list_head entry;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+ u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN];
+ ssci_t ssci[CN10K_MCS_SA_PER_SC];
+};
+
+struct cn10k_mcs_cfg {
+ struct list_head txsc_list;
+ struct list_head rxsc_list;
+};
+
+struct otx2_nic {
+ void __iomem *reg_base;
+ struct net_device *netdev;
+ struct dev_hw_ops *hw_ops;
+ void *iommu_domain;
+ u16 tx_max_pktlen;
+ u16 rbsize; /* Receive buffer size */
+
+#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
+#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
+#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
+#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
+#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
+#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
+#define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
+#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
+#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
+#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
+#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
+#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
+#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
+#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
+#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
+#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
+ u64 flags;
+ u64 *cq_op_addr;
+
+ struct bpf_prog *xdp_prog;
+ struct otx2_qset qset;
+ struct otx2_hw hw;
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ /* Mbox */
+ struct mbox mbox;
+ struct mbox *mbox_pfvf;
+ struct workqueue_struct *mbox_wq;
+ struct workqueue_struct *mbox_pfvf_wq;
+
+ u8 total_vfs;
+ u16 pcifunc; /* RVU PF_FUNC */
+ u16 bpid[NIX_MAX_BPID_CHAN];
+ struct otx2_vf_config *vf_configs;
+ struct cgx_link_user_info linfo;
+
+ /* NPC MCAM */
+ struct otx2_flow_config *flow_cfg;
+ struct otx2_mac_table *mac_table;
+
+ u64 reset_count;
+ struct work_struct reset_task;
+ struct workqueue_struct *flr_wq;
+ struct flr_work *flr_wrk;
+ struct refill_work *refill_wrk;
+ struct workqueue_struct *otx2_wq;
+ struct work_struct rx_mode_work;
+
+ /* Ethtool stuff */
+ u32 msg_enable;
+
+ /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
+ int nix_blkaddr;
+ /* LMTST Lines info */
+ struct qmem *dync_lmt;
+ u16 tot_lmt_lines;
+ u16 npa_lmt_lines;
+ u32 nix_lmt_size;
+
+ struct otx2_ptp *ptp;
+ struct hwtstamp_config tstamp;
+
+ unsigned long rq_bmap;
+
+ /* Devlink */
+ struct otx2_devlink *dl;
+#ifdef CONFIG_DCB
+ /* PFC */
+ u8 pfc_en;
+ u8 *queue_to_pfc_map;
+ u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
+#endif
+ /* qos */
+ struct otx2_qos qos;
+
+ /* napi event count. It is needed for adaptive irq coalescing. */
+ u32 napi_events;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct cn10k_mcs_cfg *macsec_cfg;
+#endif
+};
+
+static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
+}
+
+static inline bool is_96xx_A0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x00) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+}
+
+static inline bool is_96xx_B0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x01) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+}
+
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_95XXN 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+#define PCI_REVISION_ID_95XXO 0xE0
+
+static inline bool is_dev_otx2(struct pci_dev *pdev)
+{
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
+}
+
+static inline bool is_dev_cn10kb(struct pci_dev *pdev)
+{
+ return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
+}
+
+static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+
+ pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
+ pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
+ pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
+
+ __set_bit(HW_TSO, &hw->cap_flag);
+
+ if (is_96xx_A0(pfvf->pdev)) {
+ __clear_bit(HW_TSO, &hw->cap_flag);
+
+ /* Time based irq coalescing is not supported */
+ pfvf->hw.cq_qcount_wait = 0x0;
+
+ /* Due to HW issue previous silicons required minimum
+ * 600 unused CQE to avoid CQ overflow.
+ */
+ pfvf->hw.rq_skid = 600;
+ pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
+ }
+ if (is_96xx_B0(pfvf->pdev))
+ __clear_bit(HW_TSO, &hw->cap_flag);
+
+ if (!is_dev_otx2(pfvf->pdev)) {
+ __set_bit(CN10K_MBOX, &hw->cap_flag);
+ __set_bit(CN10K_LMTST, &hw->cap_flag);
+ __set_bit(CN10K_RPM, &hw->cap_flag);
+ __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
+ __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag);
+ }
+
+ if (is_dev_cn10kb(pfvf->pdev))
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
+}
+
+/* Register read/write APIs */
+static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
+{
+ u64 blkaddr;
+
+ switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
+ case BLKTYPE_NIX:
+ blkaddr = nic->nix_blkaddr;
+ break;
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ break;
+ default:
+ blkaddr = BLKADDR_RVUM;
+ break;
+ }
+
+ offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
+ offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
+
+ return nic->reg_base + offset;
+}
+
+static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
+{
+ void __iomem *addr = otx2_get_regaddr(nic, offset);
+
+ writeq(val, addr);
+}
+
+static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
+{
+ void __iomem *addr = otx2_get_regaddr(nic, offset);
+
+ return readq(addr);
+}
+
+/* Mbox bounce buffer APIs */
+static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
+{
+ struct otx2_mbox *otx2_mbox;
+ struct otx2_mbox_dev *mdev;
+
+ mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
+ if (!mbox->bbuf_base)
+ return -ENOMEM;
+
+ /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
+ * prepare all mbox messages in bounce buffer instead of directly
+ * in hw mbox memory.
+ */
+ otx2_mbox = &mbox->mbox;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = mbox->bbuf_base;
+
+ otx2_mbox = &mbox->mbox_up;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = mbox->bbuf_base;
+ return 0;
+}
+
+static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
+{
+ u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *hdr;
+ u64 msg_size;
+
+ if (mdev->mbase == hw_mbase)
+ return;
+
+ hdr = hw_mbase + mbox->rx_start;
+ msg_size = hdr->msg_size;
+
+ if (msg_size > mbox->rx_size - msgs_offset)
+ msg_size = mbox->rx_size - msgs_offset;
+
+ /* Copy mbox messages from mbox memory to bounce buffer */
+ memcpy(mdev->mbase + mbox->rx_start,
+ hw_mbase + mbox->rx_start, msg_size + msgs_offset);
+}
+
+/* With the absence of API for 128-bit IO memory access for arm64,
+ * implement required operations at place.
+ */
+#if defined(CONFIG_ARM64)
+static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
+{
+ __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
+ ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
+}
+
+static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ __asm__ volatile(".cpu generic+lse\n"
+ "ldadd %x[i], %x[r], [%[b]]"
+ : [r]"=r"(result), "+m"(*ptr)
+ : [i]"r"(incr), [b]"r"(ptr)
+ : "memory");
+ return result;
+}
+
+#else
+#define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
+#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
+#endif
+
+static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
+ u64 *ptrs, u64 num_ptrs)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 size = 0, count_eot = 0;
+ u64 tar_addr, val = 0;
+
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
+ tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
+ /* LMTID is same as AURA Id */
+ val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63);
+ /* Set if [127:64] of last 128bit word has a valid pointer */
+ count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
+ /* Set AURA ID to free pointer */
+ ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are valid from NPA_LF_AURA_BATCH_FREE0.
+ *
+ * tar_addr[6:4] is LMTST size-1 in units of 128b.
+ */
+ if (num_ptrs > 2) {
+ size = (sizeof(u64) * num_ptrs) / 16;
+ if (!count_eot)
+ size++;
+ tar_addr |= ((size - 1) & 0x7) << 4;
+ }
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
+ /* Perform LMTST flush */
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
+{
+ struct otx2_nic *pfvf = dev;
+ u64 ptrs[2];
+
+ ptrs[1] = buf;
+ get_cpu();
+ /* Free only one buffer at time during init and teardown */
+ __cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
+ put_cpu();
+}
+
+/* Alloc pointer from pool/aura */
+static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
+{
+ u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
+ u64 incr = (u64)aura | BIT_ULL(63);
+
+ return otx2_atomic64_add(incr, ptr);
+}
+
+/* Free pointer to a pool/aura */
+static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
+{
+ struct otx2_nic *pfvf = dev;
+ void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0);
+
+ otx2_write128(buf, (u64)aura | BIT_ULL(63), addr);
+}
+
+static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
+{
+ if (type == AURA_NIX_SQ)
+ return pfvf->hw.rqpool_cnt + idx;
+
+ /* AURA_NIX_RQ */
+ return idx;
+}
+
+/* Mbox APIs */
+static inline int otx2_sync_mbox_msg(struct mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
+}
+
+static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox_up, devid);
+ err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
+}
+
+/* Use this API to send mbox msgs in atomic context
+ * where sleeping is not allowed
+ */
+static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
+}
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &mbox->mbox, 0, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
+ return req; \
+}
+
+MBOX_MESSAGES
+#undef M
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+int \
+otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
+ struct _req_type *req, \
+ struct _rsp_type *rsp); \
+
+MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
+#undef M
+
+/* Time to wait before watchdog kicks off */
+#define OTX2_TX_TIMEOUT (100 * HZ)
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+static inline bool is_otx2_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
+static inline int rvu_get_pf(u16 pcifunc)
+{
+ return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
+ struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t iova;
+
+ iova = dma_map_page_attrs(pfvf->dev, page,
+ offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova)))
+ return (dma_addr_t)NULL;
+ return iova;
+}
+
+static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_unmap_page_attrs(pfvf->dev, addr, size,
+ dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
+{
+ u16 smq;
+#ifdef CONFIG_DCB
+ if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
+ return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
+#endif
+ /* check if qidx falls under QOS queues */
+ if (qidx >= pfvf->hw.non_qos_queues)
+ smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues];
+ else
+ smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+
+ return smq;
+}
+
+static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf)
+{
+ return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues;
+}
+
+static inline u64 otx2_convert_rate(u64 rate)
+{
+ u64 converted_rate;
+
+ /* Convert bytes per second to Mbps */
+ converted_rate = rate * 8;
+ converted_rate = max_t(u64, converted_rate / 1000000, 1);
+
+ return converted_rate;
+}
+
+static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf)
+{
+ /* return here if MCAM entries not allocated */
+ if (!pfvf->flow_cfg)
+ return 0;
+
+ return pfvf->flow_cfg->nr_flows;
+}
+
+/* MSI-X APIs */
+void otx2_free_cints(struct otx2_nic *pfvf, int n);
+void otx2_set_cints_affinity(struct otx2_nic *pfvf);
+int otx2_set_mac_address(struct net_device *netdev, void *p);
+int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
+void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
+void otx2_get_mac_from_af(struct net_device *netdev);
+void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
+int otx2_config_pause_frm(struct otx2_nic *pfvf);
+void otx2_setup_segmentation(struct otx2_nic *pfvf);
+
+/* RVU block related APIs */
+int otx2_attach_npa_nix(struct otx2_nic *pfvf);
+int otx2_detach_resources(struct mbox *mbox);
+int otx2_config_npa(struct otx2_nic *pfvf);
+int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
+int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
+void otx2_aura_pool_free(struct otx2_nic *pfvf);
+void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
+void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
+int otx2_config_nix(struct otx2_nic *pfvf);
+int otx2_config_nix_queues(struct otx2_nic *pfvf);
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
+int otx2_txsch_alloc(struct otx2_nic *pfvf);
+void otx2_txschq_stop(struct otx2_nic *pfvf);
+void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
+void otx2_free_pending_sqe(struct otx2_nic *pfvf);
+void otx2_sqb_flush(struct otx2_nic *pfvf);
+int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma);
+int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
+void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
+void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
+int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma);
+int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size, int type);
+int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs);
+
+/* RSS configuration APIs*/
+int otx2_rss_init(struct otx2_nic *pfvf);
+int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
+void otx2_set_rss_key(struct otx2_nic *pfvf);
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
+
+/* Mbox handlers */
+void mbox_handler_msix_offset(struct otx2_nic *pfvf,
+ struct msix_offset_rsp *rsp);
+void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
+ struct npa_lf_alloc_rsp *rsp);
+void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
+ struct nix_lf_alloc_rsp *rsp);
+void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
+ struct nix_txsch_alloc_rsp *rsp);
+void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
+ struct cgx_stats_rsp *rsp);
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp);
+void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp);
+
+/* Device stats APIs */
+void otx2_get_dev_stats(struct otx2_nic *pfvf);
+void otx2_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+void otx2_update_lmac_stats(struct otx2_nic *pfvf);
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
+int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
+int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
+void otx2_set_ethtool_ops(struct net_device *netdev);
+void otx2vf_set_ethtool_ops(struct net_device *netdev);
+
+int otx2_open(struct net_device *netdev);
+int otx2_stop(struct net_device *netdev);
+int otx2_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues);
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
+
+/* MCAM filter related APIs */
+int otx2_mcam_flow_init(struct otx2_nic *pf);
+int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count);
+void otx2_mcam_flow_del(struct otx2_nic *pf);
+int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
+int otx2_get_flow(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 location);
+int otx2_get_all_flows(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+int otx2_add_flow(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc);
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
+int otx2_get_maxflows(struct otx2_flow_config *flow_cfg);
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev,
+ netdev_features_t features);
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
+void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ u64 iova, int size);
+
+/* tc support */
+int otx2_init_tc(struct otx2_nic *nic);
+void otx2_shutdown_tc(struct otx2_nic *nic);
+int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
+
+/* CGX/RPM DMAC filters support */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos);
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
+
+#ifdef CONFIG_DCB
+/* DCB support*/
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
+int otx2_dcbnl_set_ops(struct net_device *dev);
+/* PFC support */
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
+#endif
+
+#if IS_ENABLED(CONFIG_MACSEC)
+/* MACSEC offload support */
+int cn10k_mcs_init(struct otx2_nic *pfvf);
+void cn10k_mcs_free(struct otx2_nic *pfvf);
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
+#else
+static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
+static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
+static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
+ struct mcs_intr_info *event)
+{}
+#endif /* CONFIG_MACSEC */
+
+/* qos support */
+static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+
+ hw->tc_tx_queues = qos_txqs;
+ INIT_LIST_HEAD(&pfvf->qos.qos_tree);
+ mutex_init(&pfvf->qos.qos_lock);
+}
+
+static inline void otx2_shutdown_qos(struct otx2_nic *pfvf)
+{
+ mutex_destroy(&pfvf->qos.qos_lock);
+}
+
+u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid);
+void otx2_qos_config_txschq(struct otx2_nic *pfvf);
+void otx2_clean_qos_queues(struct otx2_nic *pfvf);
+#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
new file mode 100644
index 000000000..28fb643d2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+static int otx2_check_pfc_config(struct otx2_nic *pfvf)
+{
+ u8 tx_queues = pfvf->hw.tx_queues, prio;
+ u8 pfc_en = pfvf->pfc_en;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ if ((pfc_en & (1 << prio)) &&
+ prio > tx_queues - 1) {
+ dev_warn(pfvf->dev,
+ "Increase number of tx queues from %d to %d to support PFC.\n",
+ tx_queues, prio + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, lvl, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* Either PFC bit is not set
+ * or tx scheduler is not allocated for the priority
+ */
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* configure the scheduler for the tls*/
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ err = otx2_txschq_config(pfvf, lvl, prio, true);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s configure PFC tx schq for lvl:%d, prio:%d failed!\n",
+ __func__, lvl, prio);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_alloc_req *req;
+ struct nix_txsch_alloc_rsp *rsp;
+ int lvl, rc;
+
+ /* Get memory to put this msg */
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Request one schq per level upto max level as configured
+ * link config level. These rest of the scheduler can be
+ * same as hw.txschq_list.
+ */
+ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ req->schq[lvl] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ return rc;
+
+ rsp = (struct nix_txsch_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ /* Setup transmit scheduler list */
+ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) {
+ if (!rsp->schq[lvl])
+ return -ENOSPC;
+
+ pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0];
+ }
+
+ /* Set the Tx schedulers for rest of the levels same as
+ * hw.txschq_list as those will be common for all.
+ */
+ for (; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0];
+
+ pfvf->pfc_alloc_status[prio] = true;
+ return 0;
+}
+
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
+{
+ u8 pfc_en = pfvf->pfc_en;
+ u8 pfc_bit_set;
+ int err, prio;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
+{
+ int lvl;
+
+ /* free PFC TLx nodes */
+ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ otx2_txschq_free_one(pfvf, lvl,
+ pfvf->pfc_schq_list[lvl][prio]);
+
+ pfvf->pfc_alloc_status[prio] = false;
+ return 0;
+}
+
+static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
+ struct net_device *dev = pfvf->netdev;
+ bool if_up = netif_running(dev);
+ struct nix_aq_enq_req *sq_aq;
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_stop_all_queues(pfvf->netdev);
+ else
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ cn10k_sq_aq->qidx = prio;
+ cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ cn10k_sq_aq->sq.ena = 1;
+ cn10k_sq_aq->sq_mask.ena = 1;
+ cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0);
+ cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ } else {
+ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ sq_aq->qidx = prio;
+ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ sq_aq->sq.ena = 1;
+ sq_aq->sq_mask.ena = 1;
+ sq_aq->sq_mask.smq = GENMASK(8, 0);
+ sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ }
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_start_all_queues(pfvf->netdev);
+ else
+ netif_tx_start_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ u8 pfc_en = pfvf->pfc_en, pfc_bit_set;
+ struct mbox *mbox = &pfvf->mbox;
+ int err, prio;
+
+ mutex_lock(&mbox->lock);
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* tx scheduler was created but user wants to disable now */
+ if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) {
+ mutex_unlock(&mbox->lock);
+ if (if_up)
+ netif_tx_stop_all_queues(pfvf->netdev);
+
+ otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]);
+ if (if_up)
+ netif_tx_start_all_queues(pfvf->netdev);
+
+ /* delete the schq */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s failed to stop PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+ mutex_lock(&mbox->lock);
+ goto update_sq_smq_map;
+ }
+
+ /* Either PFC bit is not set
+ * or Tx scheduler is already mapped for the priority
+ */
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev,
+ "%s failed to allocate PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+update_sq_smq_map:
+ err = otx2_pfc_update_sq_smq_mapping(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio);
+ return err;
+ }
+ }
+
+ err = otx2_pfc_txschq_config(pfvf);
+ mutex_unlock(&mbox->lock);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Delete the existing scheduler */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+{
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int err = 0;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (pfvf->pfc_en) {
+ req->rx_pause = true;
+ req->tx_pause = true;
+ } else {
+ req->rx_pause = false;
+ req->tx_pause = false;
+ }
+ req->pfc_en = pfvf->pfc_en;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pfc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
+ dev_warn(pfvf->dev,
+ "Failed to config PFC\n");
+ err = -EPERM;
+ }
+ }
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ bool pfc_enable)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ struct npa_aq_enq_req *npa_aq;
+ struct nix_aq_enq_req *aq;
+ int err = 0;
+
+ if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
+ dev_warn(pfvf->dev,
+ "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
+ pfvf->queue_to_pfc_map[qidx], qidx);
+ return;
+ }
+
+ if (if_up) {
+ netif_tx_stop_all_queues(pfvf->netdev);
+ netif_carrier_off(pfvf->netdev);
+ }
+
+ pfvf->queue_to_pfc_map[qidx] = vlan_prio;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ aq->cq.bpid = pfvf->bpid[vlan_prio];
+ aq->cq_mask.bpid = GENMASK(8, 0);
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!npa_aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+ npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
+ npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
+
+ /* Fill NPA AQ info */
+ npa_aq->aura_id = qidx;
+ npa_aq->ctype = NPA_AQ_CTYPE_AURA;
+ npa_aq->op = NPA_AQ_INSTOP_WRITE;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+out:
+ if (if_up) {
+ netif_carrier_on(pfvf->netdev);
+ netif_tx_start_all_queues(pfvf->netdev);
+ }
+
+ if (err)
+ dev_warn(pfvf->dev,
+ "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
+ qidx, err);
+}
+
+static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc->pfc_en = pfvf->pfc_en;
+
+ return 0;
+}
+
+static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ u8 old_pfc_en;
+ int err;
+
+ old_pfc_en = pfvf->pfc_en;
+ pfvf->pfc_en = pfc->pfc_en;
+
+ if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
+ goto process_pfc;
+
+ /* Check if the PFC configuration can be
+ * supported by the tx queue configuration
+ */
+ err = otx2_check_pfc_config(pfvf);
+ if (err) {
+ pfvf->pfc_en = old_pfc_en;
+ return err;
+ }
+
+process_pfc:
+ err = otx2_config_priority_flow_ctrl(pfvf);
+ if (err) {
+ pfvf->pfc_en = old_pfc_en;
+ return err;
+ }
+
+ /* Request Per channel Bpids */
+ if (pfc->pfc_en)
+ otx2_nix_config_bp(pfvf, true);
+
+ err = otx2_pfc_txschq_update(pfvf);
+ if (err) {
+ if (pfc->pfc_en)
+ otx2_nix_config_bp(pfvf, false);
+
+ otx2_pfc_txschq_stop(pfvf);
+ pfvf->pfc_en = old_pfc_en;
+ otx2_config_priority_flow_ctrl(pfvf);
+ dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
+ return err;
+ }
+
+ return 0;
+}
+
+static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
+ .ieee_getpfc = otx2_dcbnl_ieee_getpfc,
+ .ieee_setpfc = otx2_dcbnl_ieee_setpfc,
+ .getdcbx = otx2_dcbnl_getdcbx,
+ .setdcbx = otx2_dcbnl_setdcbx,
+};
+
+int otx2_dcbnl_set_ops(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
+ GFP_KERNEL);
+ if (!pfvf->queue_to_pfc_map)
+ return -ENOMEM;
+ dev->dcbnl_ops = &otx2_dcbnl_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
new file mode 100644
index 000000000..4e1130496
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "otx2_common.h"
+
+/* Devlink Params APIs */
+static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct otx2_flow_config *flow_cfg;
+
+ if (!pfvf->flow_cfg) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "pfvf->flow_cfg not initialized");
+ return -EINVAL;
+ }
+
+ flow_cfg = pfvf->flow_cfg;
+ if (flow_cfg && flow_cfg->nr_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify count when there are active rules");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (!pfvf->flow_cfg)
+ return 0;
+
+ otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
+
+ return 0;
+}
+
+static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct otx2_flow_config *flow_cfg;
+
+ if (!pfvf->flow_cfg) {
+ ctx->val.vu16 = 0;
+ return 0;
+ }
+
+ flow_cfg = pfvf->flow_cfg;
+ ctx->val.vu16 = flow_cfg->max_flows;
+
+ return 0;
+}
+
+enum otx2_dl_param_id {
+ OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+};
+
+static const struct devlink_param otx2_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+ "mcam_count", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_mcam_count_get, otx2_dl_mcam_count_set,
+ otx2_dl_mcam_count_validate),
+};
+
+static const struct devlink_ops otx2_devlink_ops = {
+};
+
+int otx2_register_dl(struct otx2_nic *pfvf)
+{
+ struct otx2_devlink *otx2_dl;
+ struct devlink *dl;
+ int err;
+
+ dl = devlink_alloc(&otx2_devlink_ops,
+ sizeof(struct otx2_devlink), pfvf->dev);
+ if (!dl) {
+ dev_warn(pfvf->dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ otx2_dl = devlink_priv(dl);
+ otx2_dl->dl = dl;
+ otx2_dl->pfvf = pfvf;
+ pfvf->dl = otx2_dl;
+
+ err = devlink_params_register(dl, otx2_dl_params,
+ ARRAY_SIZE(otx2_dl_params));
+ if (err) {
+ dev_err(pfvf->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl;
+ }
+
+ devlink_register(dl);
+ return 0;
+
+err_dl:
+ devlink_free(dl);
+ return err;
+}
+
+void otx2_unregister_dl(struct otx2_nic *pfvf)
+{
+ struct otx2_devlink *otx2_dl = pfvf->dl;
+ struct devlink *dl = otx2_dl->dl;
+
+ devlink_unregister(dl);
+ devlink_params_unregister(dl, otx2_dl_params,
+ ARRAY_SIZE(otx2_dl_params));
+ devlink_free(dl);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
new file mode 100644
index 000000000..c7bd4f3c6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#ifndef OTX2_DEVLINK_H
+#define OTX2_DEVLINK_H
+
+struct otx2_devlink {
+ struct devlink *dl;
+ struct otx2_nic *pfvf;
+};
+
+/* Devlink APIs */
+int otx2_register_dl(struct otx2_nic *pfvf);
+void otx2_unregister_dl(struct otx2_nic *pfvf);
+
+#endif /* RVU_DEVLINK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
new file mode 100644
index 000000000..80d853b34
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
+ u32 *dmac_index)
+{
+ struct cgx_mac_addr_add_req *req;
+ struct cgx_mac_addr_add_rsp *rsp;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (!err) {
+ rsp = (struct cgx_mac_addr_add_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ *dmac_index = rsp->index;
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index)
+{
+ struct cgx_mac_addr_set_or_get *req;
+ struct cgx_mac_addr_set_or_get *rsp;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->index = *dmac_index;
+
+ ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_mac_addr_set_or_get *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+
+ if (IS_ERR_OR_NULL(rsp)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ *dmac_index = rsp->index;
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos)
+{
+ u32 *dmacindex;
+
+ /* Store dmacindex returned by CGX/RPM driver which will
+ * be used for macaddr update/remove
+ */
+ dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_add_pfmac(pf, dmacindex);
+ else
+ return otx2_dmacflt_do_add(pf, mac, dmacindex);
+}
+
+static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
+ u32 dmac_index)
+{
+ struct cgx_mac_addr_del_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->index = dmac_index;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return err;
+}
+
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index)
+{
+ struct cgx_mac_addr_reset_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->index = dmac_index;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
+ u32 bit_pos)
+{
+ u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_remove_pfmac(pf, dmacindex);
+ else
+ return otx2_dmacflt_do_remove(pf, mac, dmacindex);
+}
+
+/* CGX/RPM blocks support max unicast entries of 32.
+ * on typical configuration MAC block associated
+ * with 4 lmacs, each lmac will have 8 dmac entries
+ */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
+{
+ struct cgx_max_dmac_entries_get_rsp *rsp;
+ struct msg_req *msg;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_max_dmac_entries_get_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+
+ if (IS_ERR_OR_NULL(rsp)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
+
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
+{
+ struct cgx_mac_addr_update_req *req;
+ struct cgx_mac_addr_update_rsp *rsp;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
+
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ /* check the response and change index */
+
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct cgx_mac_addr_update_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+
+ pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;
+
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
new file mode 100644
index 000000000..53f6258a9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -0,0 +1,1480 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/stddef.h>
+#include <linux/etherdevice.h>
+#include <linux/log2.h>
+#include <linux/net_tstamp.h>
+#include <linux/linkmode.h>
+
+#include "otx2_common.h"
+#include "otx2_ptp.h"
+
+#define DRV_NAME "rvu-nicpf"
+#define DRV_VF_NAME "rvu-nicvf"
+
+struct otx2_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+/* HW device stats */
+#define OTX2_DEV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
+}
+
+enum link_mode {
+ OTX2_MODE_SUPPORTED,
+ OTX2_MODE_ADVERTISED
+};
+
+static const struct otx2_stat otx2_dev_stats[] = {
+ OTX2_DEV_STAT(rx_ucast_frames),
+ OTX2_DEV_STAT(rx_bcast_frames),
+ OTX2_DEV_STAT(rx_mcast_frames),
+
+ OTX2_DEV_STAT(tx_ucast_frames),
+ OTX2_DEV_STAT(tx_bcast_frames),
+ OTX2_DEV_STAT(tx_mcast_frames),
+};
+
+/* Driver level stats */
+#define OTX2_DRV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \
+}
+
+static const struct otx2_stat otx2_drv_stats[] = {
+ OTX2_DRV_STAT(rx_fcs_errs),
+ OTX2_DRV_STAT(rx_oversize_errs),
+ OTX2_DRV_STAT(rx_undersize_errs),
+ OTX2_DRV_STAT(rx_csum_errs),
+ OTX2_DRV_STAT(rx_len_errs),
+ OTX2_DRV_STAT(rx_other_errs),
+};
+
+static const struct otx2_stat otx2_queue_stats[] = {
+ { "bytes", 0 },
+ { "frames", 1 },
+};
+
+static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
+static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
+static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
+
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
+
+static void otx2_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
+}
+
+static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
+{
+ int start_qidx = qset * pfvf->hw.rx_queues;
+ int qidx, stats;
+
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ for (stats = 0; stats < otx2_n_queue_stats; stats++) {
+ sprintf(*data, "rxq%d: %s", qidx + start_qidx,
+ otx2_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
+ for (stats = 0; stats < otx2_n_queue_stats; stats++) {
+ if (qidx >= pfvf->hw.non_qos_queues)
+ sprintf(*data, "txq_qos%d: %s",
+ qidx + start_qidx - pfvf->hw.non_qos_queues,
+ otx2_queue_stats[stats].name);
+ else
+ sprintf(*data, "txq%d: %s", qidx + start_qidx,
+ otx2_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int stats;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < otx2_n_dev_stats; stats++) {
+ memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < otx2_n_drv_stats; stats++) {
+ memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ otx2_get_qset_strings(pfvf, &data, 0);
+
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ strcpy(data, "reset_count");
+ data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Corrected Errors: ");
+ data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Uncorrected Errors: ");
+ data += ETH_GSTRING_LEN;
+}
+
+static void otx2_get_qset_stats(struct otx2_nic *pfvf,
+ struct ethtool_stats *stats, u64 **data)
+{
+ int stat, qidx;
+
+ if (!pfvf)
+ return;
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ if (!otx2_update_rq_stats(pfvf, qidx)) {
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = 0;
+ continue;
+ }
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
+ [otx2_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
+ if (!otx2_update_sq_stats(pfvf, qidx)) {
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = 0;
+ continue;
+ }
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats)
+ [otx2_queue_stats[stat].index];
+ }
+}
+
+static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+ int rc = -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+/* Get device and per queue statistics */
+static void otx2_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ u64 fec_corr_blks, fec_uncorr_blks;
+ struct cgx_fw_data *rsp;
+ int stat;
+
+ otx2_get_dev_stats(pfvf);
+ for (stat = 0; stat < otx2_n_dev_stats; stat++)
+ *(data++) = ((u64 *)&pfvf->hw.dev_stats)
+ [otx2_dev_stats[stat].index];
+
+ for (stat = 0; stat < otx2_n_drv_stats; stat++)
+ *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats)
+ [otx2_drv_stats[stat].index]);
+
+ otx2_get_qset_stats(pfvf, stats, &data);
+
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ otx2_update_lmac_stats(pfvf);
+ for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_rx_stats[stat];
+ for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_tx_stats[stat];
+ }
+
+ *(data++) = pfvf->reset_count;
+
+ fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
+ fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
+ !otx2_get_phy_fec_stats(pfvf)) {
+ /* Fetch fwdata again because it's been recently populated with
+ * latest PHY FEC stats.
+ */
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp)) {
+ struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
+
+ if (pfvf->linfo.fec == OTX2_FEC_BASER) {
+ fec_corr_blks = p->brfec_corr_blks;
+ fec_uncorr_blks = p->brfec_uncorr_blks;
+ } else {
+ fec_corr_blks = p->rsfec_corr_cws;
+ fec_uncorr_blks = p->rsfec_uncorr_cws;
+ }
+ }
+ }
+
+ *(data++) = fec_corr_blks;
+ *(data++) = fec_uncorr_blks;
+}
+
+static int otx2_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int qstats_count, mac_stats = 0;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ qstats_count = otx2_n_queue_stats *
+ (pfvf->hw.rx_queues + otx2_get_total_tx_queues(pfvf));
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
+ mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
+ otx2_update_lmac_fec_stats(pfvf);
+
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
+ mac_stats + OTX2_FEC_STATS_CNT + 1;
+}
+
+/* Get no of queues device supports and current queue count */
+static void otx2_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ channel->max_rx = pfvf->hw.max_queues;
+ channel->max_tx = pfvf->hw.max_queues;
+
+ channel->rx_count = pfvf->hw.rx_queues;
+ channel->tx_count = pfvf->hw.tx_queues;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int otx2_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ bool if_up = netif_running(dev);
+ int err, qos_txqs;
+
+ if (!channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+
+ if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
+ netdev_err(dev,
+ "Receive queues are in use by TC police action\n");
+ return -EINVAL;
+ }
+
+ if (if_up)
+ dev->netdev_ops->ndo_stop(dev);
+
+ qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap,
+ OTX2_QOS_MAX_LEAF_NODES);
+
+ err = otx2_set_real_num_queues(dev, channel->tx_count + qos_txqs,
+ channel->rx_count);
+ if (err)
+ return err;
+
+ pfvf->hw.rx_queues = channel->rx_count;
+ pfvf->hw.tx_queues = channel->tx_count;
+ if (pfvf->xdp_prog)
+ pfvf->hw.xdp_queues = channel->rx_count;
+ pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues;
+
+ if (if_up)
+ err = dev->netdev_ops->ndo_open(dev);
+
+ netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+ pfvf->hw.tx_queues, pfvf->hw.rx_queues);
+
+ return err;
+}
+
+static void otx2_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_pause_frm_cfg *req, *rsp;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+ }
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pause_frm_cfg *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ pause->rx_pause = rsp->rx_pause;
+ pause->tx_pause = rsp->tx_pause;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
+static int otx2_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+
+ if (pause->tx_pause)
+ pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+
+ return otx2_config_pause_frm(pfvf);
+}
+
+static void otx2_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_qset *qs = &pfvf->qset;
+
+ ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX);
+ ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
+ ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
+ ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+ kernel_ring->rx_buf_len = pfvf->hw.rbuf_len;
+ kernel_ring->cqe_size = pfvf->hw.xqe_size;
+}
+
+static int otx2_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ u32 rx_buf_len = kernel_ring->rx_buf_len;
+ u32 old_rx_buf_len = pfvf->hw.rbuf_len;
+ u32 xqe_size = kernel_ring->cqe_size;
+ bool if_up = netif_running(netdev);
+ struct otx2_qset *qs = &pfvf->qset;
+ u32 rx_count, tx_count;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ /* Hardware supports max size of 32k for a receive buffer
+ * and 1536 is typical ethernet frame size.
+ */
+ if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) {
+ netdev_err(netdev,
+ "Receive buffer range is 1536 - 32768");
+ return -EINVAL;
+ }
+
+ if (xqe_size != 128 && xqe_size != 512) {
+ netdev_err(netdev,
+ "Completion event size must be 128 or 512");
+ return -EINVAL;
+ }
+
+ /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
+ rx_count = ring->rx_pending;
+ /* On some silicon variants a skid or reserved CQEs are
+ * needed to avoid CQ overflow.
+ */
+ if (rx_count < pfvf->hw.rq_skid)
+ rx_count = pfvf->hw.rq_skid;
+ rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
+
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to be maintained to avoid CQ overflow, hence the
+ * minimum 4K size.
+ */
+ tx_count = clamp_t(u32, ring->tx_pending,
+ Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
+ tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
+
+ if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt &&
+ rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size)
+ return 0;
+
+ if (if_up)
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ /* Assigned to the nearest possible exponent. */
+ qs->sqe_cnt = tx_count;
+ qs->rqe_cnt = rx_count;
+
+ pfvf->hw.rbuf_len = rx_buf_len;
+ pfvf->hw.xqe_size = xqe_size;
+
+ if (if_up)
+ return netdev->netdev_ops->ndo_open(netdev);
+
+ return 0;
+}
+
+static int otx2_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_hw *hw = &pfvf->hw;
+
+ cmd->rx_coalesce_usecs = hw->cq_time_wait;
+ cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
+ cmd->tx_coalesce_usecs = hw->cq_time_wait;
+ cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
+ if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
+ cmd->use_adaptive_rx_coalesce = 1;
+ cmd->use_adaptive_tx_coalesce = 1;
+ } else {
+ cmd->use_adaptive_rx_coalesce = 0;
+ cmd->use_adaptive_tx_coalesce = 0;
+ }
+
+ return 0;
+}
+
+static int otx2_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_hw *hw = &pfvf->hw;
+ u8 priv_coalesce_status;
+ int qidx;
+
+ if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
+ return 0;
+
+ if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) {
+ netdev_err(netdev,
+ "adaptive-rx should be same as adaptive-tx");
+ return -EINVAL;
+ }
+
+ /* Check and update coalesce status */
+ if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
+ priv_coalesce_status = 1;
+ if (!ec->use_adaptive_rx_coalesce)
+ pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
+ } else {
+ priv_coalesce_status = 0;
+ if (ec->use_adaptive_rx_coalesce)
+ pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
+ }
+
+ /* 'cq_time_wait' is 8bit and is in multiple of 100ns,
+ * so clamp the user given value to the range of 1 to 25usec.
+ */
+ ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs,
+ 1, CQ_TIMER_THRESH_MAX);
+ ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs,
+ 1, CQ_TIMER_THRESH_MAX);
+
+ /* Rx and Tx are mapped to same CQ, check which one
+ * is changed, if both then choose the min.
+ */
+ if (hw->cq_time_wait == ec->rx_coalesce_usecs)
+ hw->cq_time_wait = ec->tx_coalesce_usecs;
+ else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
+ hw->cq_time_wait = ec->rx_coalesce_usecs;
+ else
+ hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
+ ec->tx_coalesce_usecs);
+
+ /* Max ecount_wait supported is 16bit,
+ * so clamp the user given value to the range of 1 to 64k.
+ */
+ ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
+ 1, NAPI_POLL_WEIGHT);
+ ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
+ 1, NAPI_POLL_WEIGHT);
+
+ /* Rx and Tx are mapped to same CQ, check which one
+ * is changed, if both then choose the min.
+ */
+ if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
+ hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
+ else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
+ hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
+ else
+ hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
+ ec->tx_max_coalesced_frames);
+
+ /* Reset 'cq_time_wait' and 'cq_ecount_wait' to
+ * default values if coalesce status changed from
+ * 'on' to 'off'.
+ */
+ if (priv_coalesce_status &&
+ ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) !=
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
+ hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
+ hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
+ }
+
+ if (netif_running(netdev)) {
+ for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
+ otx2_config_irq_coalescing(pfvf, qidx);
+ }
+
+ return 0;
+}
+
+static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+
+ if (!(rss->flowkey_cfg &
+ (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
+ return 0;
+
+ /* Mimimum is IPv4 and IPv6, SIP/DIP */
+ nfc->data = RXH_IP_SRC | RXH_IP_DST;
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
+ nfc->data |= RXH_VLAN;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ u32 rss_cfg = rss->flowkey_cfg;
+
+ if (!rss->enable) {
+ netdev_err(pfvf->netdev,
+ "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ /* Mimimum is IPv4 and IPv6, SIP/DIP */
+ if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ if (nfc->data & RXH_VLAN)
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN;
+ else
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* Different config for v4 and v6 is not supported.
+ * Both of them have to be either 4-tuple or 2-tuple.
+ */
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_TCP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
+ NIX_FLOW_KEY_TYPE_AH);
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
+ NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ /* If VLAN hashing is also requested for ESP then do not
+ * allow because of hardware 40 bytes flow key limit.
+ */
+ if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
+ netdev_err(pfvf->netdev,
+ "RSS hash of ESP or AH with VLAN is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
+ /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
+ * and ESP SPI+sequence(8 bytes) uses hardware maximum
+ * limit of 40 byte flow key.
+ */
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rss->flowkey_cfg = rss_cfg;
+ otx2_set_flowkey_cfg(pfvf);
+ return 0;
+}
+
+static int otx2_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rules)
+{
+ bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ nfc->data = pfvf->hw.rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (netif_running(dev) && ntuple) {
+ nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_get_all_flows(pfvf, nfc, rules);
+ break;
+ case ETHTOOL_GRXFH:
+ return otx2_get_rss_hash_opts(pfvf, nfc);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
+{
+ bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ pfvf->flow_cfg->ntuple = ntuple;
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = otx2_set_rss_hash_opts(pfvf, nfc);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_add_flow(pfvf, nfc);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_remove_flow(pfvf, nfc->fs.location);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_rss_info *rss;
+
+ rss = &pfvf->hw.rss_info;
+
+ return sizeof(rss->key);
+}
+
+static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
+{
+ return MAX_RSS_INDIR_TBL_SIZE;
+}
+
+static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+
+ otx2_rss_ctx_flow_del(pfvf, ctx_id);
+ kfree(rss->rss_ctx[ctx_id]);
+ rss->rss_ctx[ctx_id] = NULL;
+
+ return 0;
+}
+
+static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
+ u32 *rss_context)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u8 ctx;
+
+ for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
+ if (!rss->rss_ctx[ctx])
+ break;
+ }
+ if (ctx == MAX_RSS_GROUPS)
+ return -EINVAL;
+
+ rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
+ if (!rss->rss_ctx[ctx])
+ return -ENOMEM;
+ *rss_context = ctx;
+
+ return 0;
+}
+
+/* RSS context configuration */
+static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc,
+ u32 *rss_context, bool delete)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
+ struct otx2_rss_info *rss;
+ int ret, idx;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ *rss_context >= MAX_RSS_GROUPS)
+ return -EINVAL;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (!rss->enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ if (hkey) {
+ memcpy(rss->key, hkey, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+ }
+ if (delete)
+ return otx2_rss_ctx_delete(pfvf, *rss_context);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = otx2_rss_ctx_create(pfvf, rss_context);
+ if (ret)
+ return ret;
+ }
+ if (indir) {
+ rss_ctx = rss->rss_ctx[*rss_context];
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss_ctx->ind_tbl[idx] = indir[idx];
+ }
+ otx2_set_rss_table(pfvf, *rss_context);
+
+ return 0;
+}
+
+static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc, u32 rss_context)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
+ struct otx2_rss_info *rss;
+ int idx, rx_queues;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
+ rx_queues = pfvf->hw.rx_queues;
+ for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
+ indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
+ return 0;
+ }
+ if (rss_context >= MAX_RSS_GROUPS)
+ return -ENOENT;
+
+ rss_ctx = rss->rss_ctx[rss_context];
+ if (!rss_ctx)
+ return -ENOENT;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss_ctx->ind_tbl[idx];
+ }
+ if (hkey)
+ memcpy(hkey, rss->key, sizeof(rss->key));
+
+ return 0;
+}
+
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc)
+{
+ return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
+ DEFAULT_RSS_CONTEXT_GROUP);
+}
+
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
+
+ return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
+}
+
+static u32 otx2_get_msglevel(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ return pfvf->msg_enable;
+}
+
+static void otx2_set_msglevel(struct net_device *netdev, u32 val)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ pfvf->msg_enable = val;
+}
+
+static u32 otx2_get_link(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ /* LBK link is internal and always UP */
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 1;
+ return pfvf->linfo.link_up;
+}
+
+static int otx2_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (!pfvf->ptp)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = otx2_ptp_clock_index(pfvf);
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
+{
+ struct cgx_fw_data *rsp = NULL;
+ struct msg_req *req;
+ int err = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!err) {
+ rsp = (struct cgx_fw_data *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ } else {
+ rsp = ERR_PTR(err);
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rsp;
+}
+
+static int otx2_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+ const int fec[] = {
+ ETHTOOL_FEC_OFF,
+ ETHTOOL_FEC_BASER,
+ ETHTOOL_FEC_RS,
+ ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
+#define FEC_MAX_INDEX 4
+ if (pfvf->linfo.fec < FEC_MAX_INDEX)
+ fecparam->active_fec = fec[pfvf->linfo.fec];
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
+ if (!rsp->fwdata.supported_fec)
+ fecparam->fec = ETHTOOL_FEC_NONE;
+ else
+ fecparam->fec = fec[rsp->fwdata.supported_fec];
+ }
+ return 0;
+}
+
+static int otx2_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct mbox *mbox = &pfvf->mbox;
+ struct fec_mode *req, *rsp;
+ int err = 0, fec = 0;
+
+ switch (fecparam->fec) {
+ /* Firmware does not support AUTO mode consider it as FEC_OFF */
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_AUTO:
+ fec = OTX2_FEC_OFF;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec = OTX2_FEC_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec = OTX2_FEC_BASER;
+ break;
+ default:
+ netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
+ fecparam->fec);
+ return -EINVAL;
+ }
+
+ if (fec == pfvf->linfo.fec)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto end;
+ }
+ req->fec = fec;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto end;
+
+ rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (rsp->fec >= 0)
+ pfvf->linfo.fec = rsp->fec;
+ else
+ err = rsp->fec;
+end:
+ mutex_unlock(&mbox->lock);
+ return err;
+}
+
+static void otx2_get_fec_info(u64 index, int req_mode,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
+
+ switch (index) {
+ case OTX2_FEC_NONE:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_BASER:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_RS:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_BASER | OTX2_FEC_RS:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ otx2_fec_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ otx2_fec_modes);
+ break;
+ }
+
+ /* Add fec modes to existing modes */
+ if (req_mode == OTX2_MODE_ADVERTISED)
+ linkmode_or(link_ksettings->link_modes.advertising,
+ link_ksettings->link_modes.advertising,
+ otx2_fec_modes);
+ else
+ linkmode_or(link_ksettings->link_modes.supported,
+ link_ksettings->link_modes.supported,
+ otx2_fec_modes);
+}
+
+static void otx2_get_link_mode_info(u64 link_mode_bmap,
+ bool req_mode,
+ struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
+ const int otx2_sgmii_features[6] = {
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ };
+ /* CGX link modes to Ethtool link mode mapping */
+ const int cgx_link_mode[27] = {
+ 0, /* SGMII Mode */
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ 0,
+ 0,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
+ };
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
+ /* SGMII mode is set */
+ if (bit == 0)
+ linkmode_set_bit_array(otx2_sgmii_features,
+ ARRAY_SIZE(otx2_sgmii_features),
+ otx2_link_modes);
+ else
+ linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
+ }
+
+ if (req_mode == OTX2_MODE_ADVERTISED)
+ linkmode_copy(link_ksettings->link_modes.advertising,
+ otx2_link_modes);
+ else
+ linkmode_copy(link_ksettings->link_modes.supported,
+ otx2_link_modes);
+}
+
+static int otx2_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp = NULL;
+
+ cmd->base.duplex = pfvf->linfo.full_duplex;
+ cmd->base.speed = pfvf->linfo.speed;
+ cmd->base.autoneg = pfvf->linfo.an;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_an)
+ ethtool_link_ksettings_add_link_mode(cmd,
+ supported,
+ Autoneg);
+
+ otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
+ OTX2_MODE_ADVERTISED, cmd);
+ otx2_get_fec_info(rsp->fwdata.advertised_fec,
+ OTX2_MODE_ADVERTISED, cmd);
+ otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
+ OTX2_MODE_SUPPORTED, cmd);
+ otx2_get_fec_info(rsp->fwdata.supported_fec,
+ OTX2_MODE_SUPPORTED, cmd);
+ return 0;
+}
+
+static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
+ u64 *mode)
+{
+ u32 bit_pos;
+
+ /* Firmware does not support requesting multiple advertised modes
+ * return first set bit
+ */
+ bit_pos = find_first_bit(cmd->link_modes.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
+ *mode = bit_pos;
+}
+
+static int otx2_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct ethtool_link_ksettings cur_ks;
+ struct cgx_set_link_mode_req *req;
+ struct mbox *mbox = &pf->mbox;
+ int err = 0;
+
+ memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
+
+ if (!ethtool_validate_speed(cmd->base.speed) ||
+ !ethtool_validate_duplex(cmd->base.duplex))
+ return -EINVAL;
+
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ otx2_get_link_ksettings(netdev, &cur_ks);
+
+ /* Check requested modes against supported modes by hardware */
+ if (!linkmode_subset(cmd->link_modes.advertising,
+ cur_ks.link_modes.supported))
+ return -EINVAL;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto end;
+ }
+
+ req->args.speed = cmd->base.speed;
+ /* firmware expects 1 for half duplex and 0 for full duplex
+ * hence inverting
+ */
+ req->args.duplex = cmd->base.duplex ^ 0x1;
+ req->args.an = cmd->base.autoneg;
+ otx2_get_advertised_mode(cmd, &req->args.mode);
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+end:
+ mutex_unlock(&mbox->lock);
+ return err;
+}
+
+static void otx2_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ otx2_update_lmac_fec_stats(pfvf);
+
+ /* Report MAC FEC stats */
+ fec_stats->corrected_blocks.total = pfvf->hw.cgx_fec_corr_blks;
+ fec_stats->uncorrectable_blocks.total = pfvf->hw.cgx_fec_uncorr_blks;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
+ !otx2_get_phy_fec_stats(pfvf)) {
+ /* Fetch fwdata again because it's been recently populated with
+ * latest PHY FEC stats.
+ */
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp)) {
+ struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
+
+ if (pfvf->linfo.fec == OTX2_FEC_BASER) {
+ fec_stats->corrected_blocks.total = p->brfec_corr_blks;
+ fec_stats->uncorrectable_blocks.total = p->brfec_uncorr_blks;
+ } else {
+ fec_stats->corrected_blocks.total = p->rsfec_corr_cws;
+ fec_stats->uncorrectable_blocks.total = p->rsfec_uncorr_cws;
+ }
+ }
+ }
+}
+
+static const struct ethtool_ops otx2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
+ .get_link = otx2_get_link,
+ .get_drvinfo = otx2_get_drvinfo,
+ .get_strings = otx2_get_strings,
+ .get_ethtool_stats = otx2_get_ethtool_stats,
+ .get_sset_count = otx2_get_sset_count,
+ .set_channels = otx2_set_channels,
+ .get_channels = otx2_get_channels,
+ .get_ringparam = otx2_get_ringparam,
+ .set_ringparam = otx2_set_ringparam,
+ .get_coalesce = otx2_get_coalesce,
+ .set_coalesce = otx2_set_coalesce,
+ .get_rxnfc = otx2_get_rxnfc,
+ .set_rxnfc = otx2_set_rxnfc,
+ .get_rxfh_key_size = otx2_get_rxfh_key_size,
+ .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
+ .get_rxfh = otx2_get_rxfh,
+ .set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
+ .get_msglevel = otx2_get_msglevel,
+ .set_msglevel = otx2_set_msglevel,
+ .get_pauseparam = otx2_get_pauseparam,
+ .set_pauseparam = otx2_set_pauseparam,
+ .get_ts_info = otx2_get_ts_info,
+ .get_fec_stats = otx2_get_fec_stats,
+ .get_fecparam = otx2_get_fecparam,
+ .set_fecparam = otx2_set_fecparam,
+ .get_link_ksettings = otx2_get_link_ksettings,
+ .set_link_ksettings = otx2_set_link_ksettings,
+};
+
+void otx2_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2_ethtool_ops;
+}
+
+/* VF's ethtool APIs */
+static void otx2vf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ strscpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
+}
+
+static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int stats;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < otx2_n_dev_stats; stats++) {
+ memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < otx2_n_drv_stats; stats++) {
+ memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ otx2_get_qset_strings(vf, &data, 0);
+
+ strcpy(data, "reset_count");
+ data += ETH_GSTRING_LEN;
+}
+
+static void otx2vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int stat;
+
+ otx2_get_dev_stats(vf);
+ for (stat = 0; stat < otx2_n_dev_stats; stat++)
+ *(data++) = ((u64 *)&vf->hw.dev_stats)
+ [otx2_dev_stats[stat].index];
+
+ for (stat = 0; stat < otx2_n_drv_stats; stat++)
+ *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
+ [otx2_drv_stats[stat].index]);
+
+ otx2_get_qset_stats(vf, stats, &data);
+ *(data++) = vf->reset_count;
+}
+
+static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int qstats_count;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ qstats_count = otx2_n_queue_stats *
+ (vf->hw.rx_queues + otx2_get_total_tx_queues(vf));
+
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
+}
+
+static int otx2vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_100000;
+ } else {
+ return otx2_get_link_ksettings(netdev, cmd);
+ }
+ return 0;
+}
+
+static const struct ethtool_ops otx2vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
+ .get_link = otx2_get_link,
+ .get_drvinfo = otx2vf_get_drvinfo,
+ .get_strings = otx2vf_get_strings,
+ .get_ethtool_stats = otx2vf_get_ethtool_stats,
+ .get_sset_count = otx2vf_get_sset_count,
+ .set_channels = otx2_set_channels,
+ .get_channels = otx2_get_channels,
+ .get_rxnfc = otx2_get_rxnfc,
+ .set_rxnfc = otx2_set_rxnfc,
+ .get_rxfh_key_size = otx2_get_rxfh_key_size,
+ .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
+ .get_rxfh = otx2_get_rxfh,
+ .set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
+ .get_ringparam = otx2_get_ringparam,
+ .set_ringparam = otx2_set_ringparam,
+ .get_coalesce = otx2_get_coalesce,
+ .set_coalesce = otx2_set_coalesce,
+ .get_msglevel = otx2_get_msglevel,
+ .set_msglevel = otx2_set_msglevel,
+ .get_pauseparam = otx2_get_pauseparam,
+ .set_pauseparam = otx2_set_pauseparam,
+ .get_link_ksettings = otx2vf_get_link_ksettings,
+ .get_ts_info = otx2_get_ts_info,
+};
+
+void otx2vf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2vf_ethtool_ops;
+}
+EXPORT_SYMBOL(otx2vf_set_ethtool_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
new file mode 100644
index 000000000..97a71e9b8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -0,0 +1,1517 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <net/ipv6.h>
+#include <linux/sort.h>
+
+#include "otx2_common.h"
+
+#define OTX2_DEFAULT_ACTION 0x1
+
+static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
+
+struct otx2_flow {
+ struct ethtool_rx_flow_spec flow_spec;
+ struct list_head list;
+ u32 location;
+ u32 entry;
+ bool is_vf;
+ u8 rss_ctx_id;
+#define DMAC_FILTER_RULE BIT(0)
+#define PFC_FLOWCTRL_RULE BIT(1)
+ u16 rule_type;
+ int vf;
+};
+
+enum dmac_req {
+ DMAC_ADDR_UPDATE,
+ DMAC_ADDR_DEL
+};
+
+static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
+{
+ devm_kfree(pfvf->dev, flow_cfg->flow_ent);
+ flow_cfg->flow_ent = NULL;
+ flow_cfg->max_flows = 0;
+}
+
+static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ int ent, err;
+
+ if (!flow_cfg->max_flows)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ for (ent = 0; ent < flow_cfg->max_flows; ent++) {
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req)
+ break;
+
+ req->entry = flow_cfg->flow_ent[ent];
+
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ break;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+}
+
+static int mcam_entry_cmp(const void *a, const void *b)
+{
+ return *(u16 *)a - *(u16 *)b;
+}
+
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int ent, allocated = 0;
+
+ /* Free current ones and allocate new ones with requested count */
+ otx2_free_ntuple_mcam_entries(pfvf);
+
+ if (!count)
+ return 0;
+
+ flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->flow_ent) {
+ netdev_err(pfvf->netdev,
+ "%s: Unable to allocate memory for flow entries\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
+ * can only be allocated.
+ */
+ while (allocated < count) {
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req)
+ goto exit;
+
+ req->contig = false;
+ req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
+ NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
+
+ /* Allocate higher priority entries for PFs, so that VF's entries
+ * will be on top of PF.
+ */
+ if (!is_otx2_vf(pfvf->pcifunc)) {
+ req->priority = NPC_MCAM_HIGHER_PRIO;
+ req->ref_entry = flow_cfg->def_ent[0];
+ }
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto exit;
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
+
+ allocated += rsp->count;
+
+ /* If this request is not fulfilled, no need to send
+ * further requests.
+ */
+ if (rsp->count != req->count)
+ break;
+ }
+
+ /* Multiple MCAM entry alloc requests could result in non-sequential
+ * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
+ * otherwise user installed ntuple filter index and MCAM entry index will
+ * not be in sync.
+ */
+ if (allocated)
+ sort(&flow_cfg->flow_ent[0], allocated,
+ sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
+
+exit:
+ mutex_unlock(&pfvf->mbox.lock);
+
+ flow_cfg->max_flows = allocated;
+
+ if (allocated) {
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ }
+
+ if (allocated != count)
+ netdev_info(pfvf->netdev,
+ "Unable to allocate %d MCAM entries, got only %d\n",
+ count, allocated);
+ return allocated;
+}
+EXPORT_SYMBOL(otx2_alloc_mcam_entries);
+
+static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_get_field_status_req *freq;
+ struct npc_get_field_status_rsp *frsp;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int vf_vlan_max_flows;
+ int ent, count;
+
+ vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
+ count = OTX2_MAX_UNICAST_FLOWS +
+ OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
+
+ flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->def_ent)
+ return -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->contig = false;
+ req->count = count;
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ if (rsp->count != req->count) {
+ netdev_info(pfvf->netdev,
+ "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
+ mutex_unlock(&pfvf->mbox.lock);
+ devm_kfree(pfvf->dev, flow_cfg->def_ent);
+ return 0;
+ }
+
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->def_ent[ent] = rsp->entry_list[ent];
+
+ flow_cfg->vf_vlan_offset = 0;
+ flow_cfg->unicast_offset = vf_vlan_max_flows;
+ flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
+ OTX2_MAX_UNICAST_FLOWS;
+ pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
+
+ /* Check if NPC_DMAC field is supported
+ * by the mkex profile before setting VLAN support flag.
+ */
+ freq = otx2_mbox_alloc_msg_npc_get_field_status(&pfvf->mbox);
+ if (!freq) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ freq->field = NPC_DMAC;
+ if (otx2_sync_mbox_msg(&pfvf->mbox)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+ frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &freq->hdr);
+
+ if (frsp->enable) {
+ pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+ }
+
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ mutex_unlock(&pfvf->mbox.lock);
+
+ /* Allocate entries for Ntuple filters */
+ count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ if (count <= 0) {
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+ }
+
+ pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+
+ return 0;
+}
+
+/* TODO : revisit on size */
+#define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
+
+int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg;
+
+ pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
+ sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pfvf->flow_cfg)
+ return -ENOMEM;
+
+ pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev,
+ BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
+ sizeof(long), GFP_KERNEL);
+ if (!pfvf->flow_cfg->dmacflt_bmap)
+ return -ENOMEM;
+
+ flow_cfg = pfvf->flow_cfg;
+ INIT_LIST_HEAD(&flow_cfg->flow_list);
+ INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
+ flow_cfg->max_flows = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2vf_mcam_flow_init);
+
+int otx2_mcam_flow_init(struct otx2_nic *pf)
+{
+ int err;
+
+ pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pf->flow_cfg)
+ return -ENOMEM;
+
+ pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev,
+ BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
+ sizeof(long), GFP_KERNEL);
+ if (!pf->flow_cfg->dmacflt_bmap)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
+ INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
+
+ /* Allocate bare minimum number of MCAM entries needed for
+ * unicast and ntuple filters.
+ */
+ err = otx2_mcam_entry_init(pf);
+ if (err)
+ return err;
+
+ /* Check if MCAM entries are allocate or not */
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return 0;
+
+ pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
+ * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
+ if (!pf->mac_table)
+ return -ENOMEM;
+
+ otx2_dmacflt_get_max_cnt(pf);
+
+ /* DMAC filters are not allocated */
+ if (!pf->flow_cfg->dmacflt_max_flows)
+ return 0;
+
+ pf->flow_cfg->bmap_to_dmacindex =
+ devm_kzalloc(pf->dev, sizeof(u32) *
+ pf->flow_cfg->dmacflt_max_flows,
+ GFP_KERNEL);
+
+ if (!pf->flow_cfg->bmap_to_dmacindex)
+ return -ENOMEM;
+
+ pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
+
+ return 0;
+}
+
+void otx2_mcam_flow_del(struct otx2_nic *pf)
+{
+ otx2_destroy_mcam_flows(pf);
+}
+EXPORT_SYMBOL(otx2_mcam_flow_del);
+
+/* On success adds mcam entry
+ * On failure enable promisous mode
+ */
+static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err, i;
+
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return -ENOMEM;
+
+ /* dont have free mcam entries or uc list is greater than alloted */
+ if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
+ return -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* unicast offset starts with 32 0..31 for ntuple */
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (pf->mac_table[i].inuse)
+ continue;
+ ether_addr_copy(pf->mac_table[i].addr, mac);
+ pf->mac_table[i].inuse = true;
+ pf->mac_table[i].mcam_entry =
+ flow_cfg->def_ent[i + flow_cfg->unicast_offset];
+ req->entry = pf->mac_table[i].mcam_entry;
+ break;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(netdev,
+ "Add %pM to CGX/RPM DMAC filters list as well\n",
+ mac);
+
+ return otx2_do_add_macfilter(pf, mac);
+}
+
+static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
+ int *mcam_entry)
+{
+ int i;
+
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (!pf->mac_table[i].inuse)
+ continue;
+
+ if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
+ *mcam_entry = pf->mac_table[i].mcam_entry;
+ pf->mac_table[i].inuse = false;
+ return true;
+ }
+ }
+ return false;
+}
+
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct npc_delete_flow_req *req;
+ int err, mcam_entry;
+
+ /* check does mcam entry exists for given mac */
+ if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
+ return 0;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->entry = mcam_entry;
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ struct list_head *head = &pfvf->flow_cfg->flow_list;
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location > flow->location)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&flow->list, head);
+}
+
+int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+{
+ if (!flow_cfg)
+ return 0;
+
+ if (flow_cfg->nr_flows == flow_cfg->max_flows ||
+ !bitmap_empty(flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows))
+ return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
+ else
+ return flow_cfg->max_flows;
+}
+EXPORT_SYMBOL(otx2_get_maxflows);
+
+int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 location)
+{
+ struct otx2_flow *iter;
+
+ if (location >= otx2_get_maxflows(pfvf->flow_cfg))
+ return -EINVAL;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location) {
+ nfc->fs = iter->flow_spec;
+ nfc->rss_context = iter->rss_ctx_id;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ u32 rule_cnt = nfc->rule_cnt;
+ u32 location = 0;
+ int idx = 0;
+ int err = 0;
+
+ nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
+ while ((!err || err == -ENOENT) && idx < rule_cnt) {
+ err = otx2_get_flow(pfvf, nfc, location);
+ if (!err)
+ rule_locs[idx++] = location;
+ location++;
+ }
+ nfc->rule_cnt = rule_cnt;
+
+ return err;
+}
+
+static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
+{
+ struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
+ struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ switch (flow_type) {
+ case IP_USER_FLOW:
+ if (ipv4_usr_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ipv4_usr_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ipv4_usr_mask->tos) {
+ pkt->tos = ipv4_usr_hdr->tos;
+ pmask->tos = ipv4_usr_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+ if (ipv4_usr_mask->proto) {
+ switch (ipv4_usr_hdr->proto) {
+ case IPPROTO_ICMP:
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ break;
+ case IPPROTO_TCP:
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ break;
+ case IPPROTO_UDP:
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ break;
+ case IPPROTO_SCTP:
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case IPPROTO_AH:
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ break;
+ case IPPROTO_ESP:
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (ipv4_l4_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ipv4_l4_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ipv4_l4_mask->tos) {
+ pkt->tos = ipv4_l4_hdr->tos;
+ pmask->tos = ipv4_l4_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+ if (ipv4_l4_mask->psrc) {
+ memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
+ sizeof(pkt->sport));
+ memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ if (ipv4_l4_mask->pdst) {
+ memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
+ sizeof(pkt->dport));
+ memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (ah_esp_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ah_esp_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ah_esp_mask->tos) {
+ pkt->tos = ah_esp_hdr->tos;
+ pmask->tos = ah_esp_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if (ah_esp_mask->spi & ah_esp_hdr->spi)
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
+{
+ struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
+ struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ switch (flow_type) {
+ case IPV6_USER_FLOW:
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
+ memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+ if (ipv6_usr_hdr->l4_proto == IPPROTO_FRAGMENT) {
+ pkt->next_header = ipv6_usr_hdr->l4_proto;
+ pmask->next_header = ipv6_usr_mask->l4_proto;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
+ }
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
+ memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+ if (ipv6_l4_mask->psrc) {
+ memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
+ sizeof(pkt->sport));
+ memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ if (ipv6_l4_mask->pdst) {
+ memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
+ sizeof(pkt->dport));
+ memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
+ memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tclass & ah_esp_hdr->tclass))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+ u32 flow_type;
+ int ret;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ switch (flow_type) {
+ /* bits not set in mask are don't care */
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(eth_mask->h_source)) {
+ ether_addr_copy(pkt->smac, eth_hdr->h_source);
+ ether_addr_copy(pmask->smac, eth_mask->h_source);
+ req->features |= BIT_ULL(NPC_SMAC);
+ }
+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
+ ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
+ ether_addr_copy(pmask->dmac, eth_mask->h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+ if (eth_hdr->h_proto) {
+ memcpy(&pkt->etype, &eth_hdr->h_proto,
+ sizeof(pkt->etype));
+ memcpy(&pmask->etype, &eth_mask->h_proto,
+ sizeof(pmask->etype));
+ req->features |= BIT_ULL(NPC_ETYPE);
+ }
+ break;
+ case IP_USER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
+ break;
+ case IPV6_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (fsp->flow_type & FLOW_EXT) {
+ u16 vlan_etype;
+
+ if (fsp->m_ext.vlan_etype) {
+ /* Partial masks not supported */
+ if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF)
+ return -EINVAL;
+
+ vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
+
+ /* Drop rule with vlan_etype == 802.1Q
+ * and vlan_id == 0 is not supported
+ */
+ if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci &&
+ fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ /* Only ETH_P_8021Q and ETH_P_802AD types supported */
+ if (vlan_etype != ETH_P_8021Q &&
+ vlan_etype != ETH_P_8021AD)
+ return -EINVAL;
+
+ memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype,
+ sizeof(pkt->vlan_etype));
+ memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype,
+ sizeof(pmask->vlan_etype));
+
+ if (vlan_etype == ETH_P_8021Q)
+ req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG);
+ else
+ req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG);
+ }
+
+ if (fsp->m_ext.vlan_tci) {
+ memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
+ sizeof(pkt->vlan_tci));
+ memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
+ sizeof(pmask->vlan_tci));
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+
+ if (fsp->m_ext.data[1]) {
+ if (flow_type == IP_USER_FLOW) {
+ if (be32_to_cpu(fsp->h_ext.data[1]) != IPV4_FLAG_MORE)
+ return -EINVAL;
+
+ pkt->ip_flag = be32_to_cpu(fsp->h_ext.data[1]);
+ pmask->ip_flag = be32_to_cpu(fsp->m_ext.data[1]);
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
+ } else if (fsp->h_ext.data[1] ==
+ cpu_to_be32(OTX2_DEFAULT_ACTION)) {
+ /* Not Drop/Direct to queue but use action
+ * in default entry
+ */
+ req->op = NIX_RX_ACTION_DEFAULT;
+ }
+ }
+ }
+
+ if (fsp->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fsp->m_ext.h_dest)) {
+ ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
+ ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+
+ if (!req->features)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ u64 ring_cookie = fsp->ring_cookie;
+ u32 flow_type;
+
+ if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
+ return false;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+ /* CGX/RPM block dmac filtering configured for white listing
+ * check for action other than DROP
+ */
+ if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
+ !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+ if (is_zero_ether_addr(eth_mask->h_dest) &&
+ is_valid_ether_addr(eth_hdr->h_dest))
+ return true;
+ }
+
+ return false;
+}
+
+static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ u64 ring_cookie = flow->flow_spec.ring_cookie;
+#ifdef CONFIG_DCB
+ int vlan_prio, qidx, pfc_rule = 0;
+#endif
+ struct npc_install_flow_req *req;
+ int err, vf = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_prepare_flow_request(&flow->flow_spec, req);
+ if (err) {
+ /* free the allocated msg above */
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ req->entry = flow->entry;
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ req->channel = pfvf->hw.rx_chan_base;
+ if (ring_cookie == RX_CLS_FLOW_DISC) {
+ req->op = NIX_RX_ACTIONOP_DROP;
+ } else {
+ /* change to unicast only if action of default entry is not
+ * requested by user
+ */
+ if (flow->flow_spec.flow_type & FLOW_RSS) {
+ req->op = NIX_RX_ACTIONOP_RSS;
+ req->index = flow->rss_ctx_id;
+ req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
+ } else {
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ }
+ vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
+ if (vf > pci_num_vf(pfvf->pdev)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_DCB
+ /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
+ if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
+ pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
+ vlan_prio = ntohs(req->packet.vlan_tci) &
+ ntohs(req->mask.vlan_tci);
+
+ /* Get the priority */
+ vlan_prio >>= 13;
+ flow->rule_type |= PFC_FLOWCTRL_RULE;
+ /* Check if PFC enabled for this priority */
+ if (pfvf->pfc_en & BIT(vlan_prio)) {
+ pfc_rule = true;
+ qidx = req->index;
+ }
+ }
+#endif
+ }
+
+ /* ethtool ring_cookie has (VF + 1) for VF */
+ if (vf) {
+ req->vf = vf;
+ flow->is_vf = true;
+ flow->vf = vf;
+ }
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+
+#ifdef CONFIG_DCB
+ if (!err && pfc_rule)
+ otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
+#endif
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
+ struct otx2_flow *flow)
+{
+ struct otx2_flow *pf_mac;
+ struct ethhdr *eth_hdr;
+
+ pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
+ if (!pf_mac)
+ return -ENOMEM;
+
+ pf_mac->entry = 0;
+ pf_mac->rule_type |= DMAC_FILTER_RULE;
+ pf_mac->location = pfvf->flow_cfg->max_flows;
+ memcpy(&pf_mac->flow_spec, &flow->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ pf_mac->flow_spec.location = pf_mac->location;
+
+ /* Copy PF mac address */
+ eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
+ ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
+
+ /* Install DMAC filter with PF mac address */
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
+
+ otx2_add_flow_to_list(pfvf, pf_mac);
+ pfvf->flow_cfg->nr_flows++;
+ set_bit(0, pfvf->flow_cfg->dmacflt_bmap);
+
+ return 0;
+}
+
+int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+ struct otx2_flow *flow;
+ struct ethhdr *eth_hdr;
+ bool new = false;
+ int err = 0;
+ u64 vf_num;
+ u32 ring;
+
+ if (!flow_cfg->max_flows) {
+ netdev_err(pfvf->netdev,
+ "Ntuple rule count is 0, allocate and retry\n");
+ return -EINVAL;
+ }
+
+ ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return -ENOMEM;
+
+ /* Number of queues on a VF can be greater or less than
+ * the PF's queue. Hence no need to check for the
+ * queue count. Hence no need to check queue count if PF
+ * is installing for its VF. Below is the expected vf_num value
+ * based on the ethtool commands.
+ *
+ * e.g.
+ * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
+ * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
+ * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
+ * vf_num:vf_idx+1
+ */
+ vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+ if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
+ ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, fsp->location);
+ if (!flow) {
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return -ENOMEM;
+ flow->location = fsp->location;
+ flow->entry = flow_cfg->flow_ent[flow->location];
+ new = true;
+ }
+ /* struct copy */
+ flow->flow_spec = *fsp;
+
+ if (fsp->flow_type & FLOW_RSS)
+ flow->rss_ctx_id = nfc->rss_context;
+
+ if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
+ eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* Sync dmac filter table with updated fields */
+ if (flow->rule_type & DMAC_FILTER_RULE)
+ return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
+ flow->entry);
+
+ if (bitmap_full(flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows)) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert the rule %d as max allowed dmac filters are %d\n",
+ flow->location +
+ flow_cfg->dmacflt_max_flows,
+ flow_cfg->dmacflt_max_flows);
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* Install PF mac address to DMAC filter list */
+ if (!test_bit(0, flow_cfg->dmacflt_bmap))
+ otx2_add_flow_with_pfmac(pfvf, flow);
+
+ flow->rule_type |= DMAC_FILTER_RULE;
+ flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows);
+ fsp->location = flow_cfg->max_flows + flow->entry;
+ flow->flow_spec.location = fsp->location;
+ flow->location = fsp->location;
+
+ set_bit(flow->entry, flow_cfg->dmacflt_bmap);
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
+
+ } else {
+ if (flow->location >= pfvf->flow_cfg->max_flows) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
+ flow->location,
+ flow_cfg->max_flows - 1);
+ err = -EINVAL;
+ } else {
+ err = otx2_add_flow_msg(pfvf, flow);
+ }
+ }
+
+ if (err) {
+ if (err == MBOX_MSG_INVALID)
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* add the new flow installed to list */
+ if (new) {
+ otx2_add_flow_to_list(pfvf, flow);
+ flow_cfg->nr_flows++;
+ }
+
+ if (flow->is_vf)
+ netdev_info(pfvf->netdev,
+ "Make sure that VF's queue number is within its queue limit\n");
+ return 0;
+}
+
+static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
+{
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+ if (all)
+ req->all = 1;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+ bool found = false;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ if (req == DMAC_ADDR_DEL) {
+ otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ 0);
+ clear_bit(0, pfvf->flow_cfg->dmacflt_bmap);
+ found = true;
+ } else {
+ ether_addr_copy(eth_hdr->h_dest,
+ pfvf->netdev->dev_addr);
+
+ otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
+ }
+ break;
+ }
+ }
+
+ if (found) {
+ list_del(&iter->list);
+ kfree(iter);
+ pfvf->flow_cfg->nr_flows--;
+ }
+}
+
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct otx2_flow *flow;
+ int err;
+
+ if (location >= otx2_get_maxflows(flow_cfg))
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, location);
+ if (!flow)
+ return -ENOENT;
+
+ if (flow->rule_type & DMAC_FILTER_RULE) {
+ struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* user not allowed to remove dmac filter with interface mac */
+ if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
+ return -EPERM;
+
+ err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ flow->entry);
+ clear_bit(flow->entry, flow_cfg->dmacflt_bmap);
+ /* If all dmac filters are removed delete macfilter with
+ * interface mac address and configure CGX/RPM block in
+ * promiscuous mode
+ */
+ if (bitmap_weight(flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows) == 1)
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
+ } else {
+#ifdef CONFIG_DCB
+ if (flow->rule_type & PFC_FLOWCTRL_RULE)
+ otx2_update_bpid_in_rqctx(pfvf, 0,
+ flow->flow_spec.ring_cookie,
+ false);
+#endif
+
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ }
+
+ if (err)
+ return err;
+
+ list_del(&flow->list);
+ kfree(flow);
+ flow_cfg->nr_flows--;
+
+ return 0;
+}
+
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
+{
+ struct otx2_flow *flow, *tmp;
+ int err;
+
+ list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
+ if (flow->rss_ctx_id != ctx_id)
+ continue;
+ err = otx2_remove_flow(pfvf, flow->location);
+ if (err)
+ netdev_warn(pfvf->netdev,
+ "Can't delete the rule %d associated with this rss group err:%d",
+ flow->location, err);
+ }
+}
+
+int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return 0;
+
+ if (!flow_cfg->max_flows)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->start = flow_cfg->flow_ent[0];
+ req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+ return err;
+}
+
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return 0;
+
+ /* remove all flows */
+ err = otx2_remove_flow_msg(pfvf, 0, true);
+ if (err)
+ return err;
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->all = 1;
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
+ req->intf = NIX_INTF_RX;
+ ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->channel = pfvf->hw.rx_chan_base;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
+{
+ struct nix_vtag_config *req;
+ struct mbox_msghdr *rsp_hdr;
+ int err;
+
+ /* Dont have enough mcam entries */
+ if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
+ return -ENOMEM;
+
+ if (enable) {
+ err = otx2_install_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ } else {
+ err = otx2_delete_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* config strip, capture and size */
+ req->vtag_size = VTAGSIZE_T4;
+ req->cfg_type = 1; /* rx vlan cfg */
+ req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req->rx.strip_vtag = enable;
+ req->rx.capture_vtag = enable;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+
+ rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp_hdr)) {
+ mutex_unlock(&pf->mbox.lock);
+ return PTR_ERR(rsp_hdr);
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return rsp_hdr->rc;
+}
+
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+
+ list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
+ if (iter->rule_type & DMAC_FILTER_RULE) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ otx2_dmacflt_add(pf, eth_hdr->h_dest,
+ iter->entry);
+ }
+ }
+}
+
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
+{
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
new file mode 100644
index 000000000..a57455aeb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -0,0 +1,3264 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Physical Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+#include <linux/if_vlan.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/bitfield.h>
+#include <net/page_pool/types.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_txrx.h"
+#include "otx2_struct.h"
+#include "otx2_ptp.h"
+#include "cn10k.h"
+#include "qos.h"
+#include <rvu_trace.h>
+
+#define DRV_NAME "rvu_nicpf"
+#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
+
+/* Supported devices */
+static const struct pci_device_id otx2_pf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
+
+static void otx2_vf_link_event_task(struct work_struct *work);
+
+enum {
+ TYPE_PFAF,
+ TYPE_PFVF,
+};
+
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
+
+static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ bool if_up = netif_running(netdev);
+ int err = 0;
+
+ if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
+ if (if_up)
+ otx2_stop(netdev);
+
+ netdev_info(netdev, "Changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (if_up)
+ err = otx2_open(netdev);
+
+ return err;
+}
+
+static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
+{
+ int irq, vfs = pf->total_vfs;
+
+ /* Disable VFs ME interrupts */
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(irq, pf);
+
+ /* Disable VFs FLR interrupts */
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(irq, pf);
+
+ if (vfs <= 64)
+ return;
+
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
+ free_irq(irq, pf);
+
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(irq, pf);
+}
+
+static void otx2_flr_wq_destroy(struct otx2_nic *pf)
+{
+ if (!pf->flr_wq)
+ return;
+ destroy_workqueue(pf->flr_wq);
+ pf->flr_wq = NULL;
+ devm_kfree(pf->dev, pf->flr_wrk);
+}
+
+static void otx2_flr_handler(struct work_struct *work)
+{
+ struct flr_work *flrwork = container_of(work, struct flr_work, work);
+ struct otx2_nic *pf = flrwork->pf;
+ struct mbox *mbox = &pf->mbox;
+ struct msg_req *req;
+ int vf, reg = 0;
+
+ vf = flrwork - pf->flr_wrk;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_vf_flr(mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return;
+ }
+ req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
+ req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
+
+ if (!otx2_sync_mbox_msg(&pf->mbox)) {
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* clear transcation pending bit */
+ otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+ }
+
+ mutex_unlock(&mbox->lock);
+}
+
+static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ int reg, dev, vf, start_vf, num_reg = 1;
+ u64 intr;
+
+ if (pf->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ continue;
+ start_vf = 64 * reg;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf;
+ queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
+ /* Clear interrupt */
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ /* Disable the interrupt */
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
+ BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ int vf, reg, num_reg = 1;
+ u64 intr;
+
+ if (pf->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
+ if (!intr)
+ continue;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ /* clear trpend bit */
+ otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ /* clear interrupt */
+ otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ char *irq_name;
+ int ret;
+
+ /* Register ME interrupt handler*/
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
+ otx2_pf_me_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for ME0\n");
+ }
+
+ /* Register FLR interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
+ otx2_pf_flr_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for FLR0\n");
+ return ret;
+ }
+
+ if (numvfs > 64) {
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
+ rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_VFME1),
+ otx2_pf_me_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for ME1\n");
+ }
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
+ rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
+ otx2_pf_flr_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for FLR1\n");
+ return ret;
+ }
+ }
+
+ /* Enable ME interrupt for all VFs*/
+ otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+
+ /* Enable FLR interrupt for all VFs*/
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+
+ if (numvfs > 64) {
+ numvfs -= 64;
+
+ otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+ return 0;
+}
+
+static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
+{
+ int vf;
+
+ pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
+ if (!pf->flr_wq)
+ return -ENOMEM;
+
+ pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
+ sizeof(struct flr_work), GFP_KERNEL);
+ if (!pf->flr_wrk) {
+ destroy_workqueue(pf->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ pf->flr_wrk[vf].pf = pf;
+ INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
+ }
+
+ return 0;
+}
+
+static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr, int type)
+{
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ int i;
+
+ for (i = first; i < mdevs; i++) {
+ /* start from 0 */
+ if (!(intr & BIT_ULL(i - first)))
+ continue;
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[i];
+ if (type == TYPE_PFAF)
+ otx2_sync_mbox_bbuf(mbox, i);
+ hdr = mdev->mbase + mbox->rx_start;
+ /* The hdr->num_msgs is set to zero immediately in the interrupt
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called.
+ * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+ * pf>mbox.up_num_msgs holds the data for use in
+ * pfaf_mbox_up_handler.
+ */
+ if (hdr->num_msgs) {
+ mw[i].num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ if (type == TYPE_PFAF)
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr),
+ sizeof(u64)));
+
+ queue_work(mbox_wq, &mw[i].mbox_wrk);
+ }
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[i];
+ if (type == TYPE_PFAF)
+ otx2_sync_mbox_bbuf(mbox, i);
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs) {
+ mw[i].up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ if (type == TYPE_PFAF)
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr),
+ sizeof(u64)));
+
+ queue_work(mbox_wq, &mw[i].mbox_up_wrk);
+ }
+ }
+}
+
+static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
+ struct otx2_mbox *pfvf_mbox, void *bbuf_base,
+ int devid)
+{
+ struct otx2_mbox_dev *src_mdev = mdev;
+ int offset;
+
+ /* Msgs are already copied, trigger VF's mbox irq */
+ smp_wmb();
+
+ offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
+ writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
+
+ /* Restore VF's mbox bounce buffer region address */
+ src_mdev->mbase = bbuf_base;
+}
+
+static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
+ struct otx2_mbox *src_mbox,
+ int dir, int vf, int num_msgs)
+{
+ struct otx2_mbox_dev *src_mdev, *dst_mdev;
+ struct mbox_hdr *mbox_hdr;
+ struct mbox_hdr *req_hdr;
+ struct mbox *dst_mbox;
+ int dst_size, err;
+
+ if (dir == MBOX_DIR_PFAF) {
+ /* Set VF's mailbox memory as PF's bounce buffer memory, so
+ * that explicit copying of VF's msgs to PF=>AF mbox region
+ * and AF=>PF responses to VF's mbox region can be avoided.
+ */
+ src_mdev = &src_mbox->dev[vf];
+ mbox_hdr = src_mbox->hwbase +
+ src_mbox->rx_start + (vf * MBOX_SIZE);
+
+ dst_mbox = &pf->mbox;
+ dst_size = dst_mbox->mbox.tx_size -
+ ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
+ /* Check if msgs fit into destination area and has valid size */
+ if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
+ return -EINVAL;
+
+ dst_mdev = &dst_mbox->mbox.dev[0];
+
+ mutex_lock(&pf->mbox.lock);
+ dst_mdev->mbase = src_mdev->mbase;
+ dst_mdev->msg_size = mbox_hdr->msg_size;
+ dst_mdev->num_msgs = num_msgs;
+ err = otx2_sync_mbox_msg(dst_mbox);
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
+ if (err == -EIO) {
+ dev_warn(pf->dev,
+ "AF not responding to VF%d messages\n", vf);
+ /* restore PF mbase and exit */
+ dst_mdev->mbase = pf->mbox.bbuf_base;
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+ /* At this point, all the VF messages sent to AF are acked
+ * with proper responses and responses are copied to VF
+ * mailbox hence raise interrupt to VF.
+ */
+ req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
+ dst_mbox->mbox.rx_start);
+ req_hdr->num_msgs = num_msgs;
+
+ otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
+ pf->mbox.bbuf_base, vf);
+ mutex_unlock(&pf->mbox.lock);
+ } else if (dir == MBOX_DIR_PFVF_UP) {
+ src_mdev = &src_mbox->dev[0];
+ mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
+ req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
+ src_mbox->rx_start);
+ req_hdr->num_msgs = num_msgs;
+
+ dst_mbox = &pf->mbox_pfvf[0];
+ dst_size = dst_mbox->mbox_up.tx_size -
+ ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
+ /* Check if msgs fit into destination area */
+ if (mbox_hdr->msg_size > dst_size)
+ return -EINVAL;
+
+ dst_mdev = &dst_mbox->mbox_up.dev[vf];
+ dst_mdev->mbase = src_mdev->mbase;
+ dst_mdev->msg_size = mbox_hdr->msg_size;
+ dst_mdev->num_msgs = mbox_hdr->num_msgs;
+ err = otx2_sync_mbox_up_msg(dst_mbox, vf);
+ if (err) {
+ dev_warn(pf->dev,
+ "VF%d is not responding to mailbox\n", vf);
+ return err;
+ }
+ } else if (dir == MBOX_DIR_VFPF_UP) {
+ req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
+ src_mbox->rx_start);
+ req_hdr->num_msgs = num_msgs;
+ otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
+ &pf->mbox.mbox_up,
+ pf->mbox_pfvf[vf].bbuf_base,
+ 0);
+ }
+
+ return 0;
+}
+
+static void otx2_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct mbox_msghdr *msg = NULL;
+ int offset, vf_idx, id, err;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct otx2_mbox *mbox;
+ struct mbox *vf_mbox;
+ struct otx2_nic *pf;
+
+ vf_mbox = container_of(work, struct mbox, mbox_wrk);
+ pf = vf_mbox->pfvf;
+ vf_idx = vf_mbox - pf->mbox_pfvf;
+
+ mbox = &pf->mbox_pfvf[0].mbox;
+ mdev = &mbox->dev[vf_idx];
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ if (msg->sig != OTX2_MBOX_REQ_SIG)
+ goto inval_msg;
+
+ /* Set VF's number in each of the msg */
+ msg->pcifunc &= RVU_PFVF_FUNC_MASK;
+ msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
+ offset = msg->next_msgoff;
+ }
+ err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
+ vf_mbox->num_msgs);
+ if (err)
+ goto inval_msg;
+ return;
+
+inval_msg:
+ otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
+ otx2_mbox_msg_send(mbox, vf_idx);
+}
+
+static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
+{
+ struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ struct otx2_nic *pf = vf_mbox->pfvf;
+ struct otx2_mbox_dev *mdev;
+ int offset, id, vf_idx = 0;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+
+ vf_idx = vf_mbox - pf->mbox_pfvf;
+ mbox = &pf->mbox_pfvf[0].mbox_up;
+ mdev = &mbox->dev[vf_idx];
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(pf->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(pf->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ break;
+ default:
+ if (msg->rc)
+ dev_err(pf->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+
+end:
+ offset = mbox->rx_start + msg->next_msgoff;
+ if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
+ __otx2_mbox_reset(mbox, 0);
+ mdev->msgs_acked++;
+ }
+}
+
+static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
+ int vfs = pf->total_vfs;
+ struct mbox *mbox;
+ u64 intr;
+
+ mbox = pf->mbox_pfvf;
+ /* Handle VF interrupts */
+ if (vfs > 64) {
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
+ TYPE_PFVF);
+ if (intr)
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+ vfs = 64;
+ }
+
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+
+ if (intr)
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+
+ return IRQ_HANDLED;
+}
+
+static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
+{
+ void __iomem *hwbase;
+ struct mbox *mbox;
+ int err, vf;
+ u64 base;
+
+ if (!numvfs)
+ return -EINVAL;
+
+ pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
+ sizeof(struct mbox), GFP_KERNEL);
+ if (!pf->mbox_pfvf)
+ return -ENOMEM;
+
+ pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ if (!pf->mbox_pfvf_wq)
+ return -ENOMEM;
+
+ /* On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ MBOX_SIZE;
+ else
+ base = readq((void __iomem *)((u64)pf->reg_base +
+ RVU_PF_VF_BAR4_ADDR));
+
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+ if (!hwbase) {
+ err = -ENOMEM;
+ goto free_wq;
+ }
+
+ mbox = &pf->mbox_pfvf[0];
+ err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFVF, numvfs);
+ if (err)
+ goto free_iomem;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFVF_UP, numvfs);
+ if (err)
+ goto free_iomem;
+
+ for (vf = 0; vf < numvfs; vf++) {
+ mbox->pfvf = pf;
+ INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
+ mbox++;
+ }
+
+ return 0;
+
+free_iomem:
+ if (hwbase)
+ iounmap(hwbase);
+free_wq:
+ destroy_workqueue(pf->mbox_pfvf_wq);
+ return err;
+}
+
+static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox_pfvf[0];
+
+ if (!mbox)
+ return;
+
+ if (pf->mbox_pfvf_wq) {
+ destroy_workqueue(pf->mbox_pfvf_wq);
+ pf->mbox_pfvf_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap(mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+}
+
+static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ /* Clear PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
+
+ /* Enable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ if (numvfs > 64) {
+ numvfs -= 64;
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+}
+
+static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ int vector;
+
+ /* Disable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
+
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, pf);
+
+ if (numvfs > 64) {
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, pf);
+ }
+}
+
+static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ char *irq_name;
+ int err;
+
+ /* Register MBOX0 interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
+ err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
+ otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
+ return err;
+ }
+
+ if (numvfs > 64) {
+ /* Register MBOX1 interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
+ err = request_irq(pci_irq_vector(pf->pdev,
+ RVU_PF_INT_VEC_VFPF_MBOX1),
+ otx2_pfvf_mbox_intr_handler,
+ 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
+ return err;
+ }
+ }
+
+ otx2_enable_pfvf_mbox_intr(pf, numvfs);
+
+ return 0;
+}
+
+static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
+ struct mbox_msghdr *msg)
+{
+ int devid;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(pf->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ return;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(pf->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ /* message response heading VF */
+ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ if (devid) {
+ struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
+ struct delayed_work *dwork;
+
+ switch (msg->id) {
+ case MBOX_MSG_NIX_LF_START_RX:
+ config->intf_down = false;
+ dwork = &config->link_event_work;
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ break;
+ case MBOX_MSG_NIX_LF_STOP_RX:
+ config->intf_down = true;
+ break;
+ }
+
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ pf->pcifunc = msg->pcifunc;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
+ break;
+ case MBOX_MSG_NPA_LF_ALLOC:
+ mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_LF_ALLOC:
+ mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_BP_ENABLE:
+ mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
+ break;
+ case MBOX_MSG_CGX_STATS:
+ mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
+ break;
+ case MBOX_MSG_CGX_FEC_STATS:
+ mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
+ break;
+ default:
+ if (msg->rc)
+ dev_err(pf->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+}
+
+static void otx2_pfaf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *af_mbox;
+ struct otx2_nic *pf;
+ int offset, id;
+
+ af_mbox = container_of(work, struct mbox, mbox_wrk);
+ mbox = &af_mbox->mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ pf = af_mbox->pfvf;
+
+ for (id = 0; id < af_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2_process_pfaf_mbox_msg(pf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
+ __otx2_mbox_reset(mbox, 0);
+ mdev->msgs_acked++;
+ }
+
+}
+
+static void otx2_handle_link_event(struct otx2_nic *pf)
+{
+ struct cgx_link_user_info *linfo = &pf->linfo;
+ struct net_device *netdev = pf->netdev;
+
+ pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
+ linfo->link_up ? "UP" : "DOWN", linfo->speed,
+ linfo->full_duplex ? "Full" : "Half");
+ if (linfo->link_up) {
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+}
+
+int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
+ struct mcs_intr_info *event,
+ struct msg_rsp *rsp)
+{
+ cn10k_handle_mcs_event(pf, event);
+
+ return 0;
+}
+
+int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
+ struct cgx_link_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ int i;
+
+ /* Copy the link info sent by AF */
+ pf->linfo = msg->link_info;
+
+ /* notify VFs about link event */
+ for (i = 0; i < pci_num_vf(pf->pdev); i++) {
+ struct otx2_vf_config *config = &pf->vf_configs[i];
+ struct delayed_work *dwork = &config->link_event_work;
+
+ if (config->intf_down)
+ continue;
+
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ }
+
+ /* interface has not been fully configured yet */
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return 0;
+
+ otx2_handle_link_event(pf);
+ return 0;
+}
+
+static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
+ struct mbox_msghdr *req)
+{
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+
+ switch (req->id) {
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &pf->mbox.mbox_up, 0, \
+ sizeof(struct _rsp_type)); \
+ if (!rsp) \
+ return -ENOMEM; \
+ \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = 0; \
+ rsp->hdr.rc = 0; \
+ \
+ err = otx2_mbox_up_handler_ ## _fn_name( \
+ pf, (struct _req_type *)req, rsp); \
+ return err; \
+ }
+MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
+#undef M
+ break;
+ default:
+ otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
+{
+ struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ struct otx2_mbox *mbox = &af_mbox->mbox_up;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct otx2_nic *pf = af_mbox->pfvf;
+ int offset, id, devid = 0;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < af_mbox->up_num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ /* Skip processing VF's messages */
+ if (!devid)
+ otx2_process_mbox_msg_up(pf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+ if (devid) {
+ otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
+ MBOX_DIR_PFVF_UP, devid - 1,
+ af_mbox->up_num_msgs);
+ return;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ struct mbox *mbox;
+
+ /* Clear the IRQ */
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+
+ mbox = &pf->mbox;
+
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
+
+ otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
+
+ return IRQ_HANDLED;
+}
+
+static void otx2_disable_mbox_intr(struct otx2_nic *pf)
+{
+ int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+
+ /* Disable AF => PF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ free_irq(vector, pf);
+}
+
+static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
+{
+ struct otx2_hw *hw = &pf->hw;
+ struct msg_req *req;
+ char *irq_name;
+ int err;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
+ err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFAF mbox irq\n");
+ return err;
+ }
+
+ /* Enable mailbox interrupt for msgs coming from AF.
+ * First clear to avoid spurious interrupts, if any.
+ */
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+
+ if (!probe_af)
+ return 0;
+
+ /* Check mailbox communication with AF */
+ req = otx2_mbox_alloc_msg_ready(&pf->mbox);
+ if (!req) {
+ otx2_disable_mbox_intr(pf);
+ return -ENOMEM;
+ }
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ dev_warn(pf->dev,
+ "AF not responding to mailbox, deferring probe\n");
+ otx2_disable_mbox_intr(pf);
+ return -EPROBE_DEFER;
+ }
+
+ return 0;
+}
+
+static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+
+ if (pf->mbox_wq) {
+ destroy_workqueue(pf->mbox_wq);
+ pf->mbox_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap((void __iomem *)mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+ otx2_mbox_destroy(&mbox->mbox_up);
+}
+
+static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+ void __iomem *hwbase;
+ int err;
+
+ mbox->pfvf = pf;
+ pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ if (!pf->mbox_wq)
+ return -ENOMEM;
+
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e AF) and this PF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
+ MBOX_SIZE);
+ if (!hwbase) {
+ dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFAF, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFAF_UP, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_bbuf_init(mbox, pf->pdev);
+ if (err)
+ goto exit;
+
+ INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
+ mutex_init(&mbox->lock);
+
+ return 0;
+exit:
+ otx2_pfaf_mbox_destroy(pf);
+ return err;
+}
+
+static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(pf->netdev,
+ "CGX/RPM internal loopback might not work as DMAC filters are active\n");
+
+ mutex_lock(&pf->mbox.lock);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues)
+{
+ int err;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set no of Tx queues: %d\n", tx_queues);
+ return err;
+ }
+
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev,
+ "Failed to set no of Rx queues: %d\n", rx_queues);
+ return err;
+}
+EXPORT_SYMBOL(otx2_set_real_num_queues);
+
+static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = {
+ "NIX_SQOPERR_OOR",
+ "NIX_SQOPERR_CTX_FAULT",
+ "NIX_SQOPERR_CTX_POISON",
+ "NIX_SQOPERR_DISABLED",
+ "NIX_SQOPERR_SIZE_ERR",
+ "NIX_SQOPERR_OFLOW",
+ "NIX_SQOPERR_SQB_NULL",
+ "NIX_SQOPERR_SQB_FAULT",
+ "NIX_SQOPERR_SQE_SZ_ZERO",
+};
+
+static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
+ "NIX_MNQERR_SQ_CTX_FAULT",
+ "NIX_MNQERR_SQ_CTX_POISON",
+ "NIX_MNQERR_SQB_FAULT",
+ "NIX_MNQERR_SQB_POISON",
+ "NIX_MNQERR_TOTAL_ERR",
+ "NIX_MNQERR_LSO_ERR",
+ "NIX_MNQERR_CQ_QUERY_ERR",
+ "NIX_MNQERR_MAX_SQE_SIZE_ERR",
+ "NIX_MNQERR_MAXLEN_ERR",
+ "NIX_MNQERR_SQE_SIZEM1_ZERO",
+};
+
+static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
+ [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
+ [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
+ [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
+ [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
+ [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
+ [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
+ [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
+ [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
+ [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
+ [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
+ [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
+ [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
+ [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
+ [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
+ [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
+ [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
+ [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
+ [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
+ [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
+ [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
+ [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
+ [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
+ [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+ [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+ [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
+ [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
+};
+
+static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+{
+ struct otx2_nic *pf = data;
+ struct otx2_snd_queue *sq;
+ u64 val, *ptr;
+ u64 qidx = 0;
+
+ /* CQ */
+ for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
+ ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
+ val = otx2_atomic64_add((qidx << 44), ptr);
+
+ otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
+ (val & NIX_CQERRINT_BITS));
+ if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
+ continue;
+
+ if (val & BIT_ULL(42)) {
+ netdev_err(pf->netdev,
+ "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ } else {
+ if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ netdev_err(pf->netdev, "CQ%lld: Doorbell error",
+ qidx);
+ if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+ netdev_err(pf->netdev,
+ "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ qidx);
+ }
+
+ schedule_work(&pf->reset_task);
+ }
+
+ /* SQ */
+ for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
+ u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
+ u8 sq_op_err_code, mnq_err_code, snd_err_code;
+
+ sq = &pf->qset.sq[qidx];
+ if (!sq->sqb_ptrs)
+ continue;
+
+ /* Below debug registers captures first errors corresponding to
+ * those registers. We don't have to check against SQ qid as
+ * these are fatal errors.
+ */
+
+ ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
+ val = otx2_atomic64_add((qidx << 44), ptr);
+ otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
+ (val & NIX_SQINT_BITS));
+
+ if (val & BIT_ULL(42)) {
+ netdev_err(pf->netdev,
+ "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ goto done;
+ }
+
+ sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG);
+ if (!(sq_op_err_dbg & BIT(44)))
+ goto chk_mnq_err_dbg;
+
+ sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
+ netdev_err(pf->netdev,
+ "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n",
+ qidx, sq_op_err_dbg,
+ nix_sqoperr_e_str[sq_op_err_code],
+ sq_op_err_code);
+
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
+
+ if (sq_op_err_code == NIX_SQOPERR_SQB_NULL)
+ goto chk_mnq_err_dbg;
+
+ /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure.
+ * TODO: But we are in irq context. How to call mbox functions which does sleep
+ */
+
+chk_mnq_err_dbg:
+ mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG);
+ if (!(mnq_err_dbg & BIT(44)))
+ goto chk_snd_err_dbg;
+
+ mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
+ netdev_err(pf->netdev,
+ "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n",
+ qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code],
+ mnq_err_code);
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
+
+chk_snd_err_dbg:
+ snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
+ if (snd_err_dbg & BIT(44)) {
+ snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
+ netdev_err(pf->netdev,
+ "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
+ qidx, snd_err_dbg,
+ nix_snd_status_e_str[snd_err_code],
+ snd_err_code);
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
+ }
+
+done:
+ /* Print values and reset */
+ if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+ netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
+ qidx);
+
+ schedule_work(&pf->reset_task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
+{
+ struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
+ struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
+ int qidx = cq_poll->cint_idx;
+
+ /* Disable interrupts.
+ *
+ * Completion interrupts behave in a level-triggered interrupt
+ * fashion, and hence have to be cleared only after it is serviced.
+ */
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ /* Schedule NAPI */
+ pf->napi_events++;
+ napi_schedule_irqoff(&cq_poll->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void otx2_disable_napi(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_cq_poll *cq_poll;
+ int qidx;
+
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ cancel_work_sync(&cq_poll->dim.work);
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ }
+}
+
+static void otx2_free_cq_res(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_cq_queue *cq;
+ int qidx;
+
+ /* Disable CQs */
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
+ for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
+ cq = &qset->cq[qidx];
+ qmem_free(pf->dev, cq->cqe);
+ }
+}
+
+static void otx2_free_sq_res(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_snd_queue *sq;
+ int qidx;
+
+ /* Disable SQs */
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
+ /* Free SQB pointers */
+ otx2_sq_free_sqbs(pf);
+ for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
+ sq = &qset->sq[qidx];
+ /* Skip freeing Qos queues if they are not initialized */
+ if (!sq->sqe)
+ continue;
+ qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->tso_hdrs);
+ kfree(sq->sg);
+ kfree(sq->sqb_ptrs);
+ }
+}
+
+static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
+{
+ int frame_size;
+ int total_size;
+ int rbuf_size;
+
+ if (pf->hw.rbuf_len)
+ return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
+
+ /* The data transferred by NIX to memory consists of actual packet
+ * plus additional data which has timestamp and/or EDSA/HIGIG2
+ * headers if interface is configured in corresponding modes.
+ * NIX transfers entire data using 6 segments/buffers and writes
+ * a CQE_RX descriptor with those segment addresses. First segment
+ * has additional data prepended to packet. Also software omits a
+ * headroom of 128 bytes in each segment. Hence the total size of
+ * memory needed to receive a packet with 'mtu' is:
+ * frame size = mtu + additional data;
+ * memory = frame_size + headroom * 6;
+ * each receive buffer size = memory / 6;
+ */
+ frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ total_size = frame_size + OTX2_HEAD_ROOM * 6;
+ rbuf_size = total_size / 6;
+
+ return ALIGN(rbuf_size, 2048);
+}
+
+static int otx2_init_hw_resources(struct otx2_nic *pf)
+{
+ struct nix_lf_free_req *free_req;
+ struct mbox *mbox = &pf->mbox;
+ struct otx2_hw *hw = &pf->hw;
+ struct msg_req *req;
+ int err = 0, lvl;
+
+ /* Set required NPA LF's pool counts
+ * Auras and Pools are used in a 1:1 mapping,
+ * so, aura count = pool count.
+ */
+ hw->rqpool_cnt = hw->rx_queues;
+ hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
+ hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
+
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
+
+ pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
+
+ mutex_lock(&mbox->lock);
+ /* NPA init */
+ err = otx2_config_npa(pf);
+ if (err)
+ goto exit;
+
+ /* NIX init */
+ err = otx2_config_nix(pf);
+ if (err)
+ goto err_free_npa_lf;
+
+ /* Enable backpressure for CGX mapped PF/VFs */
+ if (!is_otx2_lbkvf(pf->pdev))
+ otx2_nix_config_bp(pf, true);
+
+ /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
+ err = otx2_rq_aura_pool_init(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_lf;
+ }
+ /* Init Auras and pools used by NIX SQ, for queueing SQEs */
+ err = otx2_sq_aura_pool_init(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_rq_ptrs;
+ }
+
+ err = otx2_txsch_alloc(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_sq_ptrs;
+ }
+
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_alloc(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_sq_ptrs;
+ }
+ }
+#endif
+
+ err = otx2_config_nix_queues(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_txsch;
+ }
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ err = otx2_txschq_config(pf, lvl, 0, false);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_queues;
+ }
+ }
+
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_config(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_queues;
+ }
+ }
+#endif
+
+ mutex_unlock(&mbox->lock);
+ return err;
+
+err_free_nix_queues:
+ otx2_free_sq_res(pf);
+ otx2_free_cq_res(pf);
+ otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
+err_free_txsch:
+ otx2_txschq_stop(pf);
+err_free_sq_ptrs:
+ otx2_sq_free_sqbs(pf);
+err_free_rq_ptrs:
+ otx2_free_aura_ptr(pf, AURA_NIX_RQ);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
+ otx2_aura_pool_free(pf);
+err_free_nix_lf:
+ mutex_lock(&mbox->lock);
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
+ free_req->flags = NIX_LF_DISABLE_FLOWS;
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
+ }
+err_free_npa_lf:
+ /* Reset NPA LF */
+ req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
+exit:
+ mutex_unlock(&mbox->lock);
+ return err;
+}
+
+static void otx2_free_hw_resources(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct nix_lf_free_req *free_req;
+ struct mbox *mbox = &pf->mbox;
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
+ struct msg_req *req;
+ int pool_id;
+ int qidx;
+
+ /* Ensure all SQE are processed */
+ otx2_sqb_flush(pf);
+
+ /* Stop transmission */
+ otx2_txschq_stop(pf);
+
+#ifdef CONFIG_DCB
+ if (pf->pfc_en)
+ otx2_pfc_txschq_stop(pf);
+#endif
+
+ otx2_clean_qos_queues(pf);
+
+ mutex_lock(&mbox->lock);
+ /* Disable backpressure */
+ if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ otx2_nix_config_bp(pf, false);
+ mutex_unlock(&mbox->lock);
+
+ /* Disable RQs */
+ otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
+
+ /*Dequeue all CQEs */
+ for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
+ cq = &qset->cq[qidx];
+ if (cq->cq_type == CQ_RX)
+ otx2_cleanup_rx_cqes(pf, cq, qidx);
+ else
+ otx2_cleanup_tx_cqes(pf, cq);
+ }
+ otx2_free_pending_sqe(pf);
+
+ otx2_free_sq_res(pf);
+
+ /* Free RQ buffer pointers*/
+ otx2_free_aura_ptr(pf, AURA_NIX_RQ);
+
+ for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
+ pool = &pf->qset.pool[pool_id];
+ page_pool_destroy(pool->page_pool);
+ pool->page_pool = NULL;
+ }
+
+ otx2_free_cq_res(pf);
+
+ /* Free all ingress bandwidth profiles allocated */
+ cn10k_free_all_ipolicers(pf);
+
+ mutex_lock(&mbox->lock);
+ /* Reset NIX LF */
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
+ free_req->flags = NIX_LF_DISABLE_FLOWS;
+ if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
+ free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
+ }
+ mutex_unlock(&mbox->lock);
+
+ /* Disable NPA Pool and Aura hw context */
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
+ otx2_aura_pool_free(pf);
+
+ mutex_lock(&mbox->lock);
+ /* Reset NPA LF */
+ req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
+ mutex_unlock(&mbox->lock);
+}
+
+static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
+{
+ int vf;
+
+ /* The AF driver will determine whether to allow the VF netdev or not */
+ if (is_otx2_vf(pfvf->pcifunc))
+ return true;
+
+ /* check if there are any trusted VFs associated with the PF netdev */
+ for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
+ if (pfvf->vf_configs[vf].trusted)
+ return true;
+ return false;
+}
+
+static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+{
+ struct net_device *netdev = pf->netdev;
+ struct nix_rx_mode *req;
+ bool promisc = false;
+
+ if (!(netdev->flags & IFF_UP))
+ return;
+
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ promisc = true;
+ }
+
+ /* Write unicast address to mcam entries or del from mcam */
+ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
+ __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (promisc)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ if (otx2_promisc_use_mce_list(pf))
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
+static void otx2_set_irq_coalesce(struct otx2_nic *pfvf)
+{
+ int cint;
+
+ for (cint = 0; cint < pfvf->hw.cint_cnt; cint++)
+ otx2_config_irq_coalescing(pfvf, cint);
+}
+
+static void otx2_dim_work(struct work_struct *w)
+{
+ struct dim_cq_moder cur_moder;
+ struct otx2_cq_poll *cq_poll;
+ struct otx2_nic *pfvf;
+ struct dim *dim;
+
+ dim = container_of(w, struct dim, work);
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ cq_poll = container_of(dim, struct otx2_cq_poll, dim);
+ pfvf = (struct otx2_nic *)cq_poll->dev;
+ pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ?
+ CQ_TIMER_THRESH_MAX : cur_moder.usec;
+ pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
+ NAPI_POLL_WEIGHT : cur_moder.pkts;
+ otx2_set_irq_coalesce(pfvf);
+ dim->state = DIM_START_MEASURE;
+}
+
+int otx2_open(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+ int err = 0, qidx, vec;
+ char *irq_name;
+
+ netif_carrier_off(netdev);
+
+ /* RQ and SQs are mapped to different CQs,
+ * so find out max CQ IRQs (i.e CINTs) needed.
+ */
+ pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
+ pf->hw.tc_tx_queues);
+
+ pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf);
+
+ qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
+ if (!qset->napi)
+ return -ENOMEM;
+
+ /* CQ size of RQ */
+ qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
+ /* CQ size of SQ */
+ qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+
+ err = -ENOMEM;
+ qset->cq = kcalloc(pf->qset.cq_cnt,
+ sizeof(struct otx2_cq_queue), GFP_KERNEL);
+ if (!qset->cq)
+ goto err_free_mem;
+
+ qset->sq = kcalloc(otx2_get_total_tx_queues(pf),
+ sizeof(struct otx2_snd_queue), GFP_KERNEL);
+ if (!qset->sq)
+ goto err_free_mem;
+
+ qset->rq = kcalloc(pf->hw.rx_queues,
+ sizeof(struct otx2_rcv_queue), GFP_KERNEL);
+ if (!qset->rq)
+ goto err_free_mem;
+
+ err = otx2_init_hw_resources(pf);
+ if (err)
+ goto err_free_mem;
+
+ /* Register NAPI handler */
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ cq_poll->cint_idx = qidx;
+ /* RQ0 & SQ0 are mapped to CINT0 and so on..
+ * 'cq_ids[0]' points to RQ's CQ and
+ * 'cq_ids[1]' points to SQ's CQ and
+ * 'cq_ids[2]' points to XDP's CQ and
+ */
+ cq_poll->cq_ids[CQ_RX] =
+ (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
+ qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
+ if (pf->xdp_prog)
+ cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
+ (qidx + pf->hw.rx_queues +
+ pf->hw.tx_queues) :
+ CINT_INVALID_CQ;
+ else
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+
+ cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ?
+ (qidx + pf->hw.rx_queues +
+ pf->hw.non_qos_queues) :
+ CINT_INVALID_CQ;
+
+ cq_poll->dev = (void *)pf;
+ cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
+ netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler);
+ napi_enable(&cq_poll->napi);
+ }
+
+ /* Set maximum frame size allowed in HW */
+ err = otx2_hw_set_mtu(pf, netdev->mtu);
+ if (err)
+ goto err_disable_napi;
+
+ /* Setup segmentation algorithms, if failed, clear offload capability */
+ otx2_setup_segmentation(pf);
+
+ /* Initialize RSS */
+ err = otx2_rss_init(pf);
+ if (err)
+ goto err_disable_napi;
+
+ /* Register Queue IRQ handlers */
+ vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
+ irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
+
+ err = request_irq(pci_irq_vector(pf->pdev, vec),
+ otx2_q_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for QERR\n",
+ rvu_get_pf(pf->pcifunc));
+ goto err_disable_napi;
+ }
+
+ /* Enable QINT IRQ */
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
+
+ /* Register CQ IRQ handlers */
+ vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
+ qidx);
+
+ err = request_irq(pci_irq_vector(pf->pdev, vec),
+ otx2_cq_intr_handler, 0, irq_name,
+ &qset->napi[qidx]);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for CQ%d\n",
+ rvu_get_pf(pf->pcifunc), qidx);
+ goto err_free_cints;
+ }
+ vec++;
+
+ otx2_config_irq_coalescing(pf, qidx);
+
+ /* Enable CQ IRQ */
+ otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
+ }
+
+ otx2_set_cints_affinity(pf);
+
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_enable_rxvlan(pf, true);
+
+ /* When reinitializing enable time stamping if it is enabled before */
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ otx2_config_hw_tx_tstamp(pf, true);
+ }
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ otx2_config_hw_rx_tstamp(pf, true);
+ }
+
+ pf->flags &= ~OTX2_FLAG_INTF_DOWN;
+ /* 'intf_down' may be checked on any cpu */
+ smp_wmb();
+
+ /* Enable QoS configuration before starting tx queues */
+ otx2_qos_config_txschq(pf);
+
+ /* we have already received link status notification */
+ if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ otx2_handle_link_event(pf);
+
+ /* Install DMAC Filters */
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
+
+ otx2_tc_apply_ingress_police_rules(pf);
+
+ err = otx2_rxtx_enable(pf, true);
+ /* If a mbox communication error happens at this point then interface
+ * will end up in a state such that it is in down state but hardware
+ * mcam entries are enabled to receive the packets. Hence disable the
+ * packet I/O.
+ */
+ if (err == EIO)
+ goto err_disable_rxtx;
+ else if (err)
+ goto err_tx_stop_queues;
+
+ otx2_do_set_rx_mode(pf);
+
+ return 0;
+
+err_disable_rxtx:
+ otx2_rxtx_enable(pf, false);
+err_tx_stop_queues:
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
+err_free_cints:
+ otx2_free_cints(pf, qidx);
+ vec = pci_irq_vector(pf->pdev,
+ pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+ free_irq(vec, pf);
+err_disable_napi:
+ otx2_disable_napi(pf);
+ otx2_free_hw_resources(pf);
+err_free_mem:
+ kfree(qset->sq);
+ kfree(qset->cq);
+ kfree(qset->rq);
+ kfree(qset->napi);
+ return err;
+}
+EXPORT_SYMBOL(otx2_open);
+
+int otx2_stop(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_rss_info *rss;
+ int qidx, vec, wrk;
+
+ /* If the DOWN flag is set resources are already freed */
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return 0;
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
+ /* 'intf_down' may be checked on any cpu */
+ smp_wmb();
+
+ /* First stop packet Rx/Tx */
+ otx2_rxtx_enable(pf, false);
+
+ /* Clear RSS enable flag */
+ rss = &pf->hw.rss_info;
+ rss->enable = false;
+ if (!netif_is_rxfh_configured(netdev))
+ kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+
+ /* Cleanup Queue IRQ */
+ vec = pci_irq_vector(pf->pdev,
+ pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+ free_irq(vec, pf);
+
+ /* Cleanup CQ NAPI and IRQ */
+ vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ /* Disable interrupt */
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ synchronize_irq(pci_irq_vector(pf->pdev, vec));
+
+ cq_poll = &qset->napi[qidx];
+ napi_synchronize(&cq_poll->napi);
+ vec++;
+ }
+
+ netif_tx_disable(netdev);
+
+ for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
+ cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
+ devm_kfree(pf->dev, pf->refill_wrk);
+
+ otx2_free_hw_resources(pf);
+ otx2_free_cints(pf, pf->hw.cint_cnt);
+ otx2_disable_napi(pf);
+
+ for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+
+
+ kfree(qset->sq);
+ kfree(qset->cq);
+ kfree(qset->rq);
+ kfree(qset->napi);
+ /* Do not clear RQ/SQ ringsize settings */
+ memset_startat(qset, 0, sqe_cnt);
+ return 0;
+}
+EXPORT_SYMBOL(otx2_stop);
+
+static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = skb_get_queue_mapping(skb);
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+ int sq_idx;
+
+ /* XDP SQs are not mapped with TXQs
+ * advance qid to derive correct sq mapped with QOS
+ */
+ sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx;
+
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ sq = &pf->qset.sq[sq_idx];
+ txq = netdev_get_tx_queue(netdev, qidx);
+
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, incase SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int otx2_qos_select_htb_queue(struct otx2_nic *pf, struct sk_buff *skb,
+ u16 htb_maj_id)
+{
+ u16 classid;
+
+ if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
+ classid = TC_H_MIN(skb->priority);
+ else
+ classid = READ_ONCE(pf->qos.defcls);
+
+ if (!classid)
+ return 0;
+
+ return otx2_get_txq_by_classid(pf, classid);
+}
+
+u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ bool qos_enabled;
+#ifdef CONFIG_DCB
+ u8 vlan_prio;
+#endif
+ int txq;
+
+ qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues;
+ if (unlikely(qos_enabled)) {
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * otx2_qos_root_add() called from htb offload root creation
+ */
+ u16 htb_maj_id = smp_load_acquire(&pf->qos.maj_id);
+
+ if (unlikely(htb_maj_id)) {
+ txq = otx2_qos_select_htb_queue(pf, skb, htb_maj_id);
+ if (txq > 0)
+ return txq;
+ goto process_pfc;
+ }
+ }
+
+process_pfc:
+#ifdef CONFIG_DCB
+ if (!skb_vlan_tag_present(skb))
+ goto pick_tx;
+
+ vlan_prio = skb->vlan_tci >> 13;
+ if ((vlan_prio > pf->hw.tx_queues - 1) ||
+ !pf->pfc_alloc_status[vlan_prio])
+ goto pick_tx;
+
+ return vlan_prio;
+
+pick_tx:
+#endif
+ txq = netdev_pick_tx(netdev, skb, NULL);
+ if (unlikely(qos_enabled))
+ return txq % pf->hw.tx_queues;
+
+ return txq;
+}
+EXPORT_SYMBOL(otx2_select_queue);
+
+static netdev_features_t otx2_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ else
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ return features;
+}
+
+static void otx2_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ queue_work(pf->otx2_wq, &pf->rx_mode_work);
+}
+
+static void otx2_rx_mode_wrk_handler(struct work_struct *work)
+{
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
+
+ otx2_do_set_rx_mode(pf);
+}
+
+static int otx2_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
+ return otx2_cgx_config_loopback(pf,
+ features & NETIF_F_LOOPBACK);
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
+ return otx2_enable_rxvlan(pf,
+ features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ return otx2_handle_ntuple_tc_features(netdev, features);
+}
+
+static void otx2_reset_task(struct work_struct *work)
+{
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
+
+ if (!netif_running(pf->netdev))
+ return;
+
+ rtnl_lock();
+ otx2_stop(pf->netdev);
+ pf->reset_count++;
+ otx2_open(pf->netdev);
+ netif_trans_update(pf->netdev);
+ rtnl_unlock();
+}
+
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ return 0;
+}
+
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
+ pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
+
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+ otx2_config_hw_tx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ return -ERANGE;
+ pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
+ schedule_delayed_work(&pfvf->ptp->synctstamp_work,
+ msecs_to_jiffies(500));
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ otx2_config_hw_tx_tstamp(pfvf, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ otx2_config_hw_rx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ otx2_config_hw_rx_tstamp(pfvf, true);
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ memcpy(&pfvf->tstamp, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(otx2_config_hwtstamp);
+
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config *cfg = &pfvf->tstamp;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return otx2_config_hwtstamp(netdev, req);
+ case SIOCGHWTSTAMP:
+ return copy_to_user(req->ifr_data, cfg,
+ sizeof(*cfg)) ? -EFAULT : 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL(otx2_ioctl);
+
+static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
+{
+ struct npc_install_flow_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->default_rule = 1;
+ req->append = 1;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+ int ret;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ether_addr_copy(config->mac, mac);
+
+ ret = otx2_do_set_vf_mac(pf, vf, mac);
+ if (ret == 0)
+ dev_info(&pdev->dev,
+ "Load/Reload VF driver\n");
+
+ return ret;
+}
+
+static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct nix_vtag_config_rsp *vtag_rsp;
+ struct npc_delete_flow_req *del_req;
+ struct nix_vtag_config *vtag_req;
+ struct npc_install_flow_req *req;
+ struct otx2_vf_config *config;
+ int err = 0;
+ u32 idx;
+
+ config = &pf->vf_configs[vf];
+
+ if (!vlan && !config->vlan)
+ goto out;
+
+ mutex_lock(&pf->mbox.lock);
+
+ /* free old tx vtag entry */
+ if (config->vlan) {
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ vtag_req->cfg_type = 0;
+ vtag_req->tx.free_vtag0 = 1;
+ vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+ }
+
+ if (!vlan && config->vlan) {
+ /* rx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ del_req->entry =
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ del_req->entry =
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ goto out;
+ }
+
+ /* rx */
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ req->packet.vlan_tci = htons(vlan);
+ req->mask.vlan_tci = htons(VLAN_VID_MASK);
+ /* af fills the destination mac addr */
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* configure tx vtag params */
+ vtag_req->vtag_size = VTAGSIZE_T4;
+ vtag_req->cfg_type = 0; /* tx vlan cfg */
+ vtag_req->tx.cfg_vtag0 = 1;
+ vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
+ (&pf->mbox.mbox, 0, &vtag_req->hdr);
+ if (IS_ERR(vtag_rsp)) {
+ err = PTR_ERR(vtag_rsp);
+ goto out;
+ }
+ config->tx_vtag_idx = vtag_rsp->vtag0_idx;
+
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ eth_zero_addr((u8 *)&req->mask.dmac);
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.tx_chan_base;
+ req->intf = NIX_INTF_TX;
+ req->vf = vf + 1;
+ req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
+ req->vtag0_def = vtag_rsp->vtag0_idx;
+ req->vtag0_op = VTAG_INSERT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ config->vlan = vlan;
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ /* qos is currently unsupported */
+ if (vlan >= VLAN_N_VID || qos)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
+ return -EOPNOTSUPP;
+
+ return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
+}
+
+static int otx2_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, config->mac);
+ ivi->vlan = config->vlan;
+ ivi->trusted = config->trusted;
+
+ return 0;
+}
+
+static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
+ int qidx)
+{
+ struct page *page;
+ u64 dma_addr;
+ int err = 0;
+
+ dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
+ offset_in_page(xdpf->data), xdpf->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pf->dev, dma_addr))
+ return -ENOMEM;
+
+ err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ if (!err) {
+ otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
+ page = virt_to_page(xdpf->data);
+ put_page(page);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int otx2_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = smp_processor_id();
+ struct otx2_snd_queue *sq;
+ int drops = 0, i;
+
+ if (!netif_running(netdev))
+ return -ENETDOWN;
+
+ qidx += pf->hw.tx_queues;
+ sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
+
+ /* Abort xmit if xdp queue is not */
+ if (unlikely(!sq))
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
+ if (err)
+ drops++;
+ }
+ return n - drops;
+}
+
+static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
+{
+ struct net_device *dev = pf->netdev;
+ bool if_up = netif_running(pf->netdev);
+ struct bpf_prog *old_prog;
+
+ if (prog && dev->mtu > MAX_XDP_MTU) {
+ netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (if_up)
+ otx2_stop(pf->netdev);
+
+ old_prog = xchg(&pf->xdp_prog, prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (pf->xdp_prog)
+ bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
+
+ /* Network stack and XDP shared same rx queues.
+ * Use separate tx queues for XDP and network stack.
+ */
+ if (pf->xdp_prog) {
+ pf->hw.xdp_queues = pf->hw.rx_queues;
+ xdp_features_set_redirect_target(dev, false);
+ } else {
+ pf->hw.xdp_queues = 0;
+ xdp_features_clear_redirect_target(dev);
+ }
+
+ pf->hw.non_qos_queues += pf->hw.xdp_queues;
+
+ if (if_up)
+ otx2_open(pf->netdev);
+
+ return 0;
+}
+
+static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return otx2_xdp_setup(pf, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
+ int req_perm)
+{
+ struct set_vf_perm *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Let AF reset VF permissions as sriov is disabled */
+ if (req_perm == OTX2_RESET_VF_PERM) {
+ req->flags |= RESET_VF_PERM;
+ } else if (req_perm == OTX2_TRUSTED_VF) {
+ if (pf->vf_configs[vf].trusted)
+ req->flags |= VF_TRUSTED;
+ }
+
+ req->vf = vf;
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
+
+static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
+ bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ int rc;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ if (pf->vf_configs[vf].trusted == enable)
+ return 0;
+
+ pf->vf_configs[vf].trusted = enable;
+ rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
+
+ if (rc) {
+ pf->vf_configs[vf].trusted = !enable;
+ } else {
+ netdev_info(pf->netdev, "VF %d is %strusted\n",
+ vf, enable ? "" : "not ");
+ otx2_set_rx_mode(netdev);
+ }
+
+ return rc;
+}
+
+static const struct net_device_ops otx2_netdev_ops = {
+ .ndo_open = otx2_open,
+ .ndo_stop = otx2_stop,
+ .ndo_start_xmit = otx2_xmit,
+ .ndo_select_queue = otx2_select_queue,
+ .ndo_fix_features = otx2_fix_features,
+ .ndo_set_mac_address = otx2_set_mac_address,
+ .ndo_change_mtu = otx2_change_mtu,
+ .ndo_set_rx_mode = otx2_set_rx_mode,
+ .ndo_set_features = otx2_set_features,
+ .ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_get_stats64 = otx2_get_stats64,
+ .ndo_eth_ioctl = otx2_ioctl,
+ .ndo_set_vf_mac = otx2_set_vf_mac,
+ .ndo_set_vf_vlan = otx2_set_vf_vlan,
+ .ndo_get_vf_config = otx2_get_vf_config,
+ .ndo_bpf = otx2_xdp,
+ .ndo_xdp_xmit = otx2_xdp_xmit,
+ .ndo_setup_tc = otx2_setup_tc,
+ .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
+};
+
+static int otx2_wq_init(struct otx2_nic *pf)
+{
+ pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
+ if (!pf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
+ INIT_WORK(&pf->reset_task, otx2_reset_task);
+ return 0;
+}
+
+static int otx2_check_pf_usable(struct otx2_nic *nic)
+{
+ u64 rev;
+
+ rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /* Check if AF has setup revision for RVUM block,
+ * otherwise this driver probe should be deferred
+ * until AF driver comes up.
+ */
+ if (!rev) {
+ dev_warn(nic->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
+{
+ struct otx2_hw *hw = &pf->hw;
+ int num_vec, err;
+
+ /* NPA interrupts are inot registered, so alloc only
+ * upto NIX vector offset.
+ */
+ num_vec = hw->nix_msixoff;
+ num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+
+ otx2_disable_mbox_intr(pf);
+ pci_free_irq_vectors(hw->pdev);
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ return otx2_register_mbox_intr(pf, false);
+}
+
+static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
+{
+ int i;
+
+ pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
+ sizeof(struct otx2_vf_config),
+ GFP_KERNEL);
+ if (!pf->vf_configs)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ pf->vf_configs[i].pf = pf;
+ pf->vf_configs[i].intf_down = true;
+ pf->vf_configs[i].trusted = false;
+ INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
+ otx2_vf_link_event_task);
+ }
+
+ return 0;
+}
+
+static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
+{
+ int i;
+
+ if (!pf->vf_configs)
+ return;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
+ }
+}
+
+static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ int err, qcount, qos_txqs;
+ struct net_device *netdev;
+ struct otx2_nic *pf;
+ struct otx2_hw *hw;
+ int num_vec;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ /* Set number of queues */
+ qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
+ qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
+
+ netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pf = netdev_priv(netdev);
+ pf->netdev = netdev;
+ pf->pdev = pdev;
+ pf->dev = dev;
+ pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
+
+ hw = &pf->hw;
+ hw->pdev = pdev;
+ hw->rx_queues = qcount;
+ hw->tx_queues = qcount;
+ hw->non_qos_queues = qcount;
+ hw->max_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
+
+ num_vec = pci_msix_vec_count(pdev);
+ hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
+ GFP_KERNEL);
+ if (!hw->irq_name) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
+ sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!hw->affinity_mask) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ /* Map CSRs */
+ pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!pf->reg_base) {
+ dev_err(dev, "Unable to map physical function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = otx2_check_pf_usable(pf);
+ if (err)
+ goto err_free_netdev;
+
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, num_vec);
+ goto err_free_netdev;
+ }
+
+ otx2_setup_dev_hw_settings(pf);
+
+ /* Init PF <=> AF mailbox stuff */
+ err = otx2_pfaf_mbox_init(pf);
+ if (err)
+ goto err_free_irq_vectors;
+
+ /* Register mailbox interrupt */
+ err = otx2_register_mbox_intr(pf, true);
+ if (err)
+ goto err_mbox_destroy;
+
+ /* Request AF to attach NPA and NIX LFs to this PF.
+ * NIX and NPA LFs are needed for this PF to function as a NIC.
+ */
+ err = otx2_attach_npa_nix(pf);
+ if (err)
+ goto err_disable_mbox_intr;
+
+ err = otx2_realloc_msix_vectors(pf);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = cn10k_lmtst_init(pf);
+ if (err)
+ goto err_detach_rsrc;
+
+ /* Assign default mac address */
+ otx2_get_mac_from_af(netdev);
+
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(pf);
+
+ /* NPA's pool is a stack to which SW frees buffer pointers via Aura.
+ * HW allocates buffer pointer from stack and uses it for DMA'ing
+ * ingress packet. In some scenarios HW can free back allocated buffer
+ * pointers to pool. This makes it impossible for SW to maintain a
+ * parallel list where physical addresses of buffer pointers (IOVAs)
+ * given to HW can be saved for later reference.
+ *
+ * So the only way to convert Rx packet's buffer address is to use
+ * IOMMU's iova_to_phys() handler which translates the address by
+ * walking through the translation tables.
+ */
+ pf->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4);
+ netdev->features |= netdev->hw_features;
+
+ err = otx2_mcam_flow_init(pf);
+ if (err)
+ goto err_ptp_destroy;
+
+ err = cn10k_mcs_init(pf);
+ if (err)
+ goto err_del_mcam_entries;
+
+ if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
+ if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ netdev->features |= netdev->hw_features;
+
+ /* HW supports tc offload but mutually exclusive with n-tuple filters */
+ if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
+ netdev->hw_features |= NETIF_F_HW_TC;
+
+ netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
+
+ netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
+ netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+
+ netdev->netdev_ops = &otx2_netdev_ops;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+
+ netdev->min_mtu = OTX2_MIN_MTU;
+ netdev->max_mtu = otx2_get_max_mtu(pf);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_mcs_free;
+ }
+
+ err = otx2_wq_init(pf);
+ if (err)
+ goto err_unreg_netdev;
+
+ otx2_set_ethtool_ops(netdev);
+
+ err = otx2_init_tc(pf);
+ if (err)
+ goto err_mcam_flow_del;
+
+ err = otx2_register_dl(pf);
+ if (err)
+ goto err_mcam_flow_del;
+
+ /* Initialize SR-IOV resources */
+ err = otx2_sriov_vfcfg_init(pf);
+ if (err)
+ goto err_pf_sriov_init;
+
+ /* Enable link notifications */
+ otx2_cgx_config_linkevents(pf, true);
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_pf_sriov_init;
+#endif
+
+ otx2_qos_init(pf, qos_txqs);
+
+ return 0;
+
+err_pf_sriov_init:
+ otx2_shutdown_tc(pf);
+err_mcam_flow_del:
+ otx2_mcam_flow_del(pf);
+err_unreg_netdev:
+ unregister_netdev(netdev);
+err_mcs_free:
+ cn10k_mcs_free(pf);
+err_del_mcam_entries:
+ otx2_mcam_flow_del(pf);
+err_ptp_destroy:
+ otx2_ptp_destroy(pf);
+err_detach_rsrc:
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
+ otx2_detach_resources(&pf->mbox);
+err_disable_mbox_intr:
+ otx2_disable_mbox_intr(pf);
+err_mbox_destroy:
+ otx2_pfaf_mbox_destroy(pf);
+err_free_irq_vectors:
+ pci_free_irq_vectors(hw->pdev);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+ return err;
+}
+
+static void otx2_vf_link_event_task(struct work_struct *work)
+{
+ struct otx2_vf_config *config;
+ struct cgx_link_info_msg *req;
+ struct mbox_msghdr *msghdr;
+ struct otx2_nic *pf;
+ int vf_idx;
+
+ config = container_of(work, struct otx2_vf_config,
+ link_event_work.work);
+ vf_idx = config - config->pf->vf_configs;
+ pf = config->pf;
+
+ msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (!msghdr) {
+ dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ return;
+ }
+
+ req = (struct cgx_link_info_msg *)msghdr;
+ req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
+
+ otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+}
+
+static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret;
+
+ /* Init PF <=> VF mailbox stuff */
+ ret = otx2_pfvf_mbox_init(pf, numvfs);
+ if (ret)
+ return ret;
+
+ ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
+ if (ret)
+ goto free_mbox;
+
+ ret = otx2_pf_flr_init(pf, numvfs);
+ if (ret)
+ goto free_intr;
+
+ ret = otx2_register_flr_me_intr(pf, numvfs);
+ if (ret)
+ goto free_flr;
+
+ ret = pci_enable_sriov(pdev, numvfs);
+ if (ret)
+ goto free_flr_intr;
+
+ return numvfs;
+free_flr_intr:
+ otx2_disable_flr_me_intr(pf);
+free_flr:
+ otx2_flr_wq_destroy(pf);
+free_intr:
+ otx2_disable_pfvf_mbox_intr(pf, numvfs);
+free_mbox:
+ otx2_pfvf_mbox_destroy(pf);
+ return ret;
+}
+
+static int otx2_sriov_disable(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int numvfs = pci_num_vf(pdev);
+
+ if (!numvfs)
+ return 0;
+
+ pci_disable_sriov(pdev);
+
+ otx2_disable_flr_me_intr(pf);
+ otx2_flr_wq_destroy(pf);
+ otx2_disable_pfvf_mbox_intr(pf, numvfs);
+ otx2_pfvf_mbox_destroy(pf);
+
+ return 0;
+}
+
+static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ if (numvfs == 0)
+ return otx2_sriov_disable(pdev);
+ else
+ return otx2_sriov_enable(pdev, numvfs);
+}
+
+static void otx2_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf;
+
+ if (!netdev)
+ return;
+
+ pf = netdev_priv(netdev);
+
+ pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
+
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
+ otx2_config_hw_tx_tstamp(pf, false);
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
+ otx2_config_hw_rx_tstamp(pf, false);
+
+ /* Disable 802.3x pause frames */
+ if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(pf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (pf->pfc_en) {
+ pf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(pf);
+ }
+#endif
+ cancel_work_sync(&pf->reset_task);
+ /* Disable link notifications */
+ otx2_cgx_config_linkevents(pf, false);
+
+ otx2_unregister_dl(pf);
+ unregister_netdev(netdev);
+ cn10k_mcs_free(pf);
+ otx2_sriov_disable(pf->pdev);
+ otx2_sriov_vfcfg_cleanup(pf);
+ if (pf->otx2_wq)
+ destroy_workqueue(pf->otx2_wq);
+
+ otx2_ptp_destroy(pf);
+ otx2_mcam_flow_del(pf);
+ otx2_shutdown_tc(pf);
+ otx2_shutdown_qos(pf);
+ otx2_detach_resources(&pf->mbox);
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
+ otx2_disable_mbox_intr(pf);
+ otx2_pfaf_mbox_destroy(pf);
+ pci_free_irq_vectors(pf->pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver otx2_pf_driver = {
+ .name = DRV_NAME,
+ .id_table = otx2_pf_id_table,
+ .probe = otx2_probe,
+ .shutdown = otx2_remove,
+ .remove = otx2_remove,
+ .sriov_configure = otx2_sriov_configure
+};
+
+static int __init otx2_rvupf_init_module(void)
+{
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ return pci_register_driver(&otx2_pf_driver);
+}
+
+static void __exit otx2_rvupf_cleanup_module(void)
+{
+ pci_unregister_driver(&otx2_pf_driver);
+}
+
+module_init(otx2_rvupf_init_module);
+module_exit(otx2_rvupf_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
new file mode 100644
index 000000000..3a72b0793
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+
+#include "otx2_common.h"
+#include "otx2_ptp.h"
+
+static bool is_tstmp_atomic_update_supported(struct otx2_ptp *ptp)
+{
+ struct ptp_get_cap_rsp *rsp;
+ struct msg_req *req;
+ int err;
+
+ if (!ptp->nic)
+ return false;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_ptp_get_cap(&ptp->nic->mbox);
+ if (!req) {
+ mutex_unlock(&ptp->nic->mbox.lock);
+ return false;
+ }
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err) {
+ mutex_unlock(&ptp->nic->mbox.lock);
+ return false;
+ }
+ rsp = (struct ptp_get_cap_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ if (IS_ERR(rsp))
+ return false;
+
+ if (rsp->cap & PTP_CAP_HW_ATOMIC_UPDATE)
+ return true;
+
+ return false;
+}
+
+static int otx2_ptp_hw_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ struct ptp_req *req;
+ int rc;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->op = PTP_OP_ADJTIME;
+ req->delta = delta;
+ rc = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return rc;
+}
+
+static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
+static int otx2_ptp_hw_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ u64 tstamp;
+
+ tstamp = otx2_ptp_get_clock(ptp);
+
+ *ts = ns_to_timespec64(tstamp);
+ return 0;
+}
+
+static int otx2_ptp_hw_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ struct ptp_req *req;
+ u64 nsec;
+ int rc;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->op = PTP_OP_SET_CLOCK;
+ req->clk = nsec;
+ rc = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return rc;
+}
+
+static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct ptp_req *req;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_ADJFINE;
+ req->scaled_ppm = scaled_ppm;
+
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
+
+static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
+{
+ struct ptp_req *req;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_SET_THRESH;
+ req->thresh = thresh;
+
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
+
+static int ptp_extts_on(struct otx2_ptp *ptp, int on)
+{
+ struct ptp_req *req;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_EXTTS_ON;
+ req->extts_on = on;
+
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
+
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
+
+ return otx2_ptp_get_clock(ptp);
+}
+
+static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_TSTMP;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
+static int otx2_ptp_tc_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+
+ mutex_lock(&pfvf->mbox.lock);
+ timecounter_adjtime(&ptp->time_counter, delta);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_tc_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ u64 tstamp;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ tstamp = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&ptp->nic->mbox.lock);
+ *ts = ns_to_timespec64(tstamp);
+
+ return 0;
+}
+
+static int otx2_ptp_tc_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static u64 otx2_ptp_hw_tstamp2time(const struct timecounter *time_counter, u64 tstamp)
+{
+ /* On HW which supports atomic updates, timecounter is not initialized */
+ return tstamp;
+}
+
+static void otx2_ptp_extts_check(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ extts_work.work);
+ struct ptp_clock_event event;
+ u64 tstmp, new_thresh;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ tstmp = ptp_tstmp_read(ptp);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ if (tstmp != ptp->last_extts) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstmp);
+ ptp_clock_event(ptp->ptp_clock, &event);
+ new_thresh = tstmp % 500000000;
+ if (ptp->thresh != new_thresh) {
+ mutex_lock(&ptp->nic->mbox.lock);
+ ptp_set_thresh(ptp, new_thresh);
+ mutex_unlock(&ptp->nic->mbox.lock);
+ ptp->thresh = new_thresh;
+ }
+ ptp->last_extts = tstmp;
+ }
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+}
+
+static void otx2_sync_tstamp(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ synctstamp_work.work);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 tstamp;
+
+ mutex_lock(&pfvf->mbox.lock);
+ tstamp = otx2_ptp_get_clock(ptp);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp->tstamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstamp);
+ ptp->base_ns = tstamp % NSEC_PER_SEC;
+
+ schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250));
+}
+
+static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ int pin;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on) {
+ ptp_extts_on(ptp, on);
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+ } else {
+ ptp_extts_on(ptp, on);
+ cancel_delayed_work_sync(&ptp->extts_work);
+ }
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+int otx2_ptp_init(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp_ptr;
+ struct cyclecounter *cc;
+ struct ptp_req *req;
+ int err;
+
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ pfvf->ptp = NULL;
+ return 0;
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* check if PTP block is available */
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp_ptr = kzalloc(sizeof(*ptp_ptr), GFP_KERNEL);
+ if (!ptp_ptr) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp_ptr->nic = pfvf;
+
+ snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP");
+ ptp_ptr->extts_config.index = 0;
+ ptp_ptr->extts_config.func = PTP_PF_NONE;
+
+ ptp_ptr->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "OcteonTX2 PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 1,
+ .n_pins = 1,
+ .pps = 0,
+ .pin_config = &ptp_ptr->extts_config,
+ .adjfine = otx2_ptp_adjfine,
+ .enable = otx2_ptp_enable,
+ .verify = otx2_ptp_verify_pin,
+ };
+
+ /* Check whether hardware supports atomic updates to timestamp */
+ if (is_tstmp_atomic_update_supported(ptp_ptr)) {
+ ptp_ptr->ptp_info.adjtime = otx2_ptp_hw_adjtime;
+ ptp_ptr->ptp_info.gettime64 = otx2_ptp_hw_gettime;
+ ptp_ptr->ptp_info.settime64 = otx2_ptp_hw_settime;
+
+ ptp_ptr->ptp_tstamp2nsec = otx2_ptp_hw_tstamp2time;
+ } else {
+ ptp_ptr->ptp_info.adjtime = otx2_ptp_tc_adjtime;
+ ptp_ptr->ptp_info.gettime64 = otx2_ptp_tc_gettime;
+ ptp_ptr->ptp_info.settime64 = otx2_ptp_tc_settime;
+
+ cc = &ptp_ptr->cycle_counter;
+ cc->read = ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+ ptp_ptr->ptp_tstamp2nsec = timecounter_cyc2time;
+
+ timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+ }
+
+ INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check);
+
+ ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
+ if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
+ err = ptp_ptr->ptp_clock ?
+ PTR_ERR(ptp_ptr->ptp_clock) : -ENODEV;
+ kfree(ptp_ptr);
+ goto error;
+ }
+
+ if (is_dev_otx2(pfvf->pdev)) {
+ ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp;
+ } else {
+ ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ }
+
+ INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp);
+
+ pfvf->ptp = ptp_ptr;
+
+error:
+ return err;
+}
+EXPORT_SYMBOL_GPL(otx2_ptp_init);
+
+void otx2_ptp_destroy(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp = pfvf->ptp;
+
+ if (!ptp)
+ return;
+
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+ pfvf->ptp = NULL;
+}
+EXPORT_SYMBOL_GPL(otx2_ptp_destroy);
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ return ptp_clock_index(pfvf->ptp->ptp_clock);
+}
+EXPORT_SYMBOL_GPL(otx2_ptp_clock_index);
+
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ *tsns = pfvf->ptp->ptp_tstamp2nsec(&pfvf->ptp->time_counter, tstamp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(otx2_ptp_tstamp2time);
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION("Marvell RVU NIC PTP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
new file mode 100644
index 000000000..7ff41927c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef OTX2_PTP_H
+#define OTX2_PTP_H
+
+static inline u64 otx2_ptp_convert_rx_timestamp(u64 timestamp)
+{
+ return be64_to_cpu(*(__be64 *)&timestamp);
+}
+
+static inline u64 otx2_ptp_convert_tx_timestamp(u64 timestamp)
+{
+ return timestamp;
+}
+
+static inline u64 cn10k_ptp_convert_timestamp(u64 timestamp)
+{
+ return ((timestamp >> 32) * NSEC_PER_SEC) + (timestamp & 0xFFFFFFFFUL);
+}
+
+int otx2_ptp_init(struct otx2_nic *pfvf);
+void otx2_ptp_destroy(struct otx2_nic *pfvf);
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf);
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
new file mode 100644
index 000000000..45a32e4b4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef OTX2_REG_H
+#define OTX2_REG_H
+
+#include <rvu_struct.h>
+
+/* RVU PF registers */
+#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3)
+#define RVU_PF_INT (0xc20)
+#define RVU_PF_INT_W1S (0xc28)
+#define RVU_PF_INT_ENA_W1S (0xc30)
+#define RVU_PF_INT_ENA_W1C (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
+
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+#define RVU_VF_VFPF_MBOXX(a) (0x00 | (a) << 3)
+#define RVU_VF_INT (0x20)
+#define RVU_VF_INT_W1S (0x28)
+#define RVU_VF_INT_ENA_W1S (0x30)
+#define RVU_VF_INT_ENA_W1C (0x38)
+#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_VF_MBOX_REGION (0xC0000)
+
+#define RVU_FUNC_BLKADDR_SHIFT 20
+#define RVU_FUNC_BLKADDR_MASK 0x1FULL
+
+/* NPA LF registers */
+#define NPA_LFBASE (BLKTYPE_NPA << RVU_FUNC_BLKADDR_SHIFT)
+#define NPA_LF_AURA_OP_ALLOCX(a) (NPA_LFBASE | 0x10 | (a) << 3)
+#define NPA_LF_AURA_OP_FREE0 (NPA_LFBASE | 0x20)
+#define NPA_LF_AURA_OP_FREE1 (NPA_LFBASE | 0x28)
+#define NPA_LF_AURA_OP_CNT (NPA_LFBASE | 0x30)
+#define NPA_LF_AURA_OP_LIMIT (NPA_LFBASE | 0x50)
+#define NPA_LF_AURA_OP_INT (NPA_LFBASE | 0x60)
+#define NPA_LF_AURA_OP_THRESH (NPA_LFBASE | 0x70)
+#define NPA_LF_POOL_OP_PC (NPA_LFBASE | 0x100)
+#define NPA_LF_POOL_OP_AVAILABLE (NPA_LFBASE | 0x110)
+#define NPA_LF_POOL_OP_PTR_START0 (NPA_LFBASE | 0x120)
+#define NPA_LF_POOL_OP_PTR_START1 (NPA_LFBASE | 0x128)
+#define NPA_LF_POOL_OP_PTR_END0 (NPA_LFBASE | 0x130)
+#define NPA_LF_POOL_OP_PTR_END1 (NPA_LFBASE | 0x138)
+#define NPA_LF_POOL_OP_INT (NPA_LFBASE | 0x160)
+#define NPA_LF_POOL_OP_THRESH (NPA_LFBASE | 0x170)
+#define NPA_LF_ERR_INT (NPA_LFBASE | 0x200)
+#define NPA_LF_ERR_INT_W1S (NPA_LFBASE | 0x208)
+#define NPA_LF_ERR_INT_ENA_W1C (NPA_LFBASE | 0x210)
+#define NPA_LF_ERR_INT_ENA_W1S (NPA_LFBASE | 0x218)
+#define NPA_LF_RAS (NPA_LFBASE | 0x220)
+#define NPA_LF_RAS_W1S (NPA_LFBASE | 0x228)
+#define NPA_LF_RAS_ENA_W1C (NPA_LFBASE | 0x230)
+#define NPA_LF_RAS_ENA_W1S (NPA_LFBASE | 0x238)
+#define NPA_LF_QINTX_CNT(a) (NPA_LFBASE | 0x300 | (a) << 12)
+#define NPA_LF_QINTX_INT(a) (NPA_LFBASE | 0x310 | (a) << 12)
+#define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12)
+#define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12)
+#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12)
+#define NPA_LF_AURA_BATCH_FREE0 (NPA_LFBASE | 0x400)
+
+/* NIX LF registers */
+#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
+#define NIX_LF_RX_SECRETX(a) (NIX_LFBASE | 0x0 | (a) << 3)
+#define NIX_LF_CFG (NIX_LFBASE | 0x100)
+#define NIX_LF_GINT (NIX_LFBASE | 0x200)
+#define NIX_LF_GINT_W1S (NIX_LFBASE | 0x208)
+#define NIX_LF_GINT_ENA_W1C (NIX_LFBASE | 0x210)
+#define NIX_LF_GINT_ENA_W1S (NIX_LFBASE | 0x218)
+#define NIX_LF_ERR_INT (NIX_LFBASE | 0x220)
+#define NIX_LF_ERR_INT_W1S (NIX_LFBASE | 0x228)
+#define NIX_LF_ERR_INT_ENA_W1C (NIX_LFBASE | 0x230)
+#define NIX_LF_ERR_INT_ENA_W1S (NIX_LFBASE | 0x238)
+#define NIX_LF_RAS (NIX_LFBASE | 0x240)
+#define NIX_LF_RAS_W1S (NIX_LFBASE | 0x248)
+#define NIX_LF_RAS_ENA_W1C (NIX_LFBASE | 0x250)
+#define NIX_LF_RAS_ENA_W1S (NIX_LFBASE | 0x258)
+#define NIX_LF_SQ_OP_ERR_DBG (NIX_LFBASE | 0x260)
+#define NIX_LF_MNQ_ERR_DBG (NIX_LFBASE | 0x270)
+#define NIX_LF_SEND_ERR_DBG (NIX_LFBASE | 0x280)
+#define NIX_LF_TX_STATX(a) (NIX_LFBASE | 0x300 | (a) << 3)
+#define NIX_LF_RX_STATX(a) (NIX_LFBASE | 0x400 | (a) << 3)
+#define NIX_LF_OP_SENDX(a) (NIX_LFBASE | 0x800 | (a) << 3)
+#define NIX_LF_RQ_OP_INT (NIX_LFBASE | 0x900)
+#define NIX_LF_RQ_OP_OCTS (NIX_LFBASE | 0x910)
+#define NIX_LF_RQ_OP_PKTS (NIX_LFBASE | 0x920)
+#define NIX_LF_OP_IPSEC_DYNO_CN (NIX_LFBASE | 0x980)
+#define NIX_LF_SQ_OP_INT (NIX_LFBASE | 0xa00)
+#define NIX_LF_SQ_OP_OCTS (NIX_LFBASE | 0xa10)
+#define NIX_LF_SQ_OP_PKTS (NIX_LFBASE | 0xa20)
+#define NIX_LF_SQ_OP_STATUS (NIX_LFBASE | 0xa30)
+#define NIX_LF_CQ_OP_INT (NIX_LFBASE | 0xb00)
+#define NIX_LF_CQ_OP_DOOR (NIX_LFBASE | 0xb30)
+#define NIX_LF_CQ_OP_STATUS (NIX_LFBASE | 0xb40)
+#define NIX_LF_QINTX_CNT(a) (NIX_LFBASE | 0xC00 | (a) << 12)
+#define NIX_LF_QINTX_INT(a) (NIX_LFBASE | 0xC10 | (a) << 12)
+#define NIX_LF_QINTX_INT_W1S(a) (NIX_LFBASE | 0xC18 | (a) << 12)
+#define NIX_LF_QINTX_ENA_W1S(a) (NIX_LFBASE | 0xC20 | (a) << 12)
+#define NIX_LF_QINTX_ENA_W1C(a) (NIX_LFBASE | 0xC30 | (a) << 12)
+#define NIX_LF_CINTX_CNT(a) (NIX_LFBASE | 0xD00 | (a) << 12)
+#define NIX_LF_CINTX_WAIT(a) (NIX_LFBASE | 0xD10 | (a) << 12)
+#define NIX_LF_CINTX_INT(a) (NIX_LFBASE | 0xD20 | (a) << 12)
+#define NIX_LF_CINTX_INT_W1S(a) (NIX_LFBASE | 0xD30 | (a) << 12)
+#define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12)
+#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
+
+/* NIX AF transmit scheduler registers */
+#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+
+/* LMT LF registers */
+#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
+#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
+#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400)
+
+#endif /* OTX2_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
new file mode 100644
index 000000000..4e5899d8f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef OTX2_STRUCT_H
+#define OTX2_STRUCT_H
+
+/* NIX WQE/CQE size 128 byte or 512 byte */
+enum nix_cqesz_e {
+ NIX_XQESZ_W64 = 0x0,
+ NIX_XQESZ_W16 = 0x1,
+};
+
+enum nix_sqes_e {
+ NIX_SQESZ_W16 = 0x0,
+ NIX_SQESZ_W8 = 0x1,
+};
+
+enum nix_send_ldtype {
+ NIX_SEND_LDTYPE_LDD = 0x0,
+ NIX_SEND_LDTYPE_LDT = 0x1,
+ NIX_SEND_LDTYPE_LDWB = 0x2,
+};
+
+/* CSUM offload */
+enum nix_sendl3type {
+ NIX_SENDL3TYPE_NONE = 0x0,
+ NIX_SENDL3TYPE_IP4 = 0x2,
+ NIX_SENDL3TYPE_IP4_CKSUM = 0x3,
+ NIX_SENDL3TYPE_IP6 = 0x4,
+};
+
+enum nix_sendl4type {
+ NIX_SENDL4TYPE_NONE,
+ NIX_SENDL4TYPE_TCP_CKSUM,
+ NIX_SENDL4TYPE_SCTP_CKSUM,
+ NIX_SENDL4TYPE_UDP_CKSUM,
+};
+
+/* NIX wqe/cqe types */
+enum nix_xqe_type {
+ NIX_XQE_TYPE_INVALID = 0x0,
+ NIX_XQE_TYPE_RX = 0x1,
+ NIX_XQE_TYPE_RX_IPSECS = 0x2,
+ NIX_XQE_TYPE_RX_IPSECH = 0x3,
+ NIX_XQE_TYPE_RX_IPSECD = 0x4,
+ NIX_XQE_TYPE_SEND = 0x8,
+};
+
+/* NIX CQE/SQE subdescriptor types */
+enum nix_subdc {
+ NIX_SUBDC_NOP = 0x0,
+ NIX_SUBDC_EXT = 0x1,
+ NIX_SUBDC_CRC = 0x2,
+ NIX_SUBDC_IMM = 0x3,
+ NIX_SUBDC_SG = 0x4,
+ NIX_SUBDC_MEM = 0x5,
+ NIX_SUBDC_JUMP = 0x6,
+ NIX_SUBDC_WORK = 0x7,
+ NIX_SUBDC_SOD = 0xf,
+};
+
+/* Algorithm for nix_sqe_mem_s header (value of the `alg` field) */
+enum nix_sendmemalg {
+ NIX_SENDMEMALG_E_SET = 0x0,
+ NIX_SENDMEMALG_E_SETTSTMP = 0x1,
+ NIX_SENDMEMALG_E_SETRSLT = 0x2,
+ NIX_SENDMEMALG_E_ADD = 0x8,
+ NIX_SENDMEMALG_E_SUB = 0x9,
+ NIX_SENDMEMALG_E_ADDLEN = 0xa,
+ NIX_SENDMEMALG_E_SUBLEN = 0xb,
+ NIX_SENDMEMALG_E_ADDMBUF = 0xc,
+ NIX_SENDMEMALG_E_SUBMBUF = 0xd,
+ NIX_SENDMEMALG_E_ENUM_LAST = 0xe,
+};
+
+/* NIX CQE header structure */
+struct nix_cqe_hdr_s {
+ u64 flow_tag : 32;
+ u64 q : 20;
+ u64 reserved_52_57 : 6;
+ u64 node : 2;
+ u64 cqe_type : 4;
+};
+
+/* NIX CQE RX parse structure */
+struct nix_rx_parse_s {
+ u64 chan : 12;
+ u64 desc_sizem1 : 5;
+ u64 rsvd_17 : 1;
+ u64 express : 1;
+ u64 wqwd : 1;
+ u64 errlev : 4;
+ u64 errcode : 8;
+ u64 latype : 4;
+ u64 lbtype : 4;
+ u64 lctype : 4;
+ u64 ldtype : 4;
+ u64 letype : 4;
+ u64 lftype : 4;
+ u64 lgtype : 4;
+ u64 lhtype : 4;
+ u64 pkt_lenm1 : 16; /* W1 */
+ u64 l2m : 1;
+ u64 l2b : 1;
+ u64 l3m : 1;
+ u64 l3b : 1;
+ u64 vtag0_valid : 1;
+ u64 vtag0_gone : 1;
+ u64 vtag1_valid : 1;
+ u64 vtag1_gone : 1;
+ u64 pkind : 6;
+ u64 rsvd_95_94 : 2;
+ u64 vtag0_tci : 16;
+ u64 vtag1_tci : 16;
+ u64 laflags : 8; /* W2 */
+ u64 lbflags : 8;
+ u64 lcflags : 8;
+ u64 ldflags : 8;
+ u64 leflags : 8;
+ u64 lfflags : 8;
+ u64 lgflags : 8;
+ u64 lhflags : 8;
+ u64 eoh_ptr : 8; /* W3 */
+ u64 wqe_aura : 20;
+ u64 pb_aura : 20;
+ u64 match_id : 16;
+ u64 laptr : 8; /* W4 */
+ u64 lbptr : 8;
+ u64 lcptr : 8;
+ u64 ldptr : 8;
+ u64 leptr : 8;
+ u64 lfptr : 8;
+ u64 lgptr : 8;
+ u64 lhptr : 8;
+ u64 vtag0_ptr : 8; /* W5 */
+ u64 vtag1_ptr : 8;
+ u64 flow_key_alg : 5;
+ u64 rsvd_359_341 : 19;
+ u64 color : 2;
+ u64 rsvd_383_362 : 22;
+ u64 rsvd_447_384; /* W6 */
+};
+
+/* NIX CQE RX scatter/gather subdescriptor structure */
+struct nix_rx_sg_s {
+ u64 seg_size : 16; /* W0 */
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_59_50 : 10;
+ u64 subdc : 4;
+ u64 seg_addr;
+ u64 seg2_addr;
+ u64 seg3_addr;
+};
+
+struct nix_send_comp_s {
+ u64 status : 8;
+ u64 sqe_id : 16;
+ u64 rsvd_24_63 : 40;
+};
+
+struct nix_cqe_rx_s {
+ struct nix_cqe_hdr_s hdr;
+ struct nix_rx_parse_s parse;
+ struct nix_rx_sg_s sg;
+};
+
+struct nix_cqe_tx_s {
+ struct nix_cqe_hdr_s hdr;
+ struct nix_send_comp_s comp;
+};
+
+/* NIX SQE header structure */
+struct nix_sqe_hdr_s {
+ u64 total : 18; /* W0 */
+ u64 reserved_18 : 1;
+ u64 df : 1;
+ u64 aura : 20;
+ u64 sizem1 : 3;
+ u64 pnc : 1;
+ u64 sq : 20;
+ u64 ol3ptr : 8; /* W1 */
+ u64 ol4ptr : 8;
+ u64 il3ptr : 8;
+ u64 il4ptr : 8;
+ u64 ol3type : 4;
+ u64 ol4type : 4;
+ u64 il3type : 4;
+ u64 il4type : 4;
+ u64 sqe_id : 16;
+
+};
+
+/* NIX send extended header subdescriptor structure */
+struct nix_sqe_ext_s {
+ u64 lso_mps : 14; /* W0 */
+ u64 lso : 1;
+ u64 tstmp : 1;
+ u64 lso_sb : 8;
+ u64 lso_format : 5;
+ u64 rsvd_31_29 : 3;
+ u64 shp_chg : 9;
+ u64 shp_dis : 1;
+ u64 shp_ra : 2;
+ u64 markptr : 8;
+ u64 markform : 7;
+ u64 mark_en : 1;
+ u64 subdc : 4;
+ u64 vlan0_ins_ptr : 8; /* W1 */
+ u64 vlan0_ins_tci : 16;
+ u64 vlan1_ins_ptr : 8;
+ u64 vlan1_ins_tci : 16;
+ u64 vlan0_ins_ena : 1;
+ u64 vlan1_ins_ena : 1;
+ u64 init_color : 2;
+ u64 rsvd_127_116 : 12;
+};
+
+struct nix_sqe_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_54_50 : 5;
+ u64 i1 : 1;
+ u64 i2 : 1;
+ u64 i3 : 1;
+ u64 ld_type : 2;
+ u64 subdc : 4;
+};
+
+/* NIX send memory subdescriptor structure */
+struct nix_sqe_mem_s {
+ u64 start_offset : 8;
+ u64 rsvd_11_8 : 4;
+ u64 rsvd_12 : 1;
+ u64 udp_csum_crt : 1;
+ u64 update64 : 1;
+ u64 rsvd_15_16 : 1;
+ u64 base_ns : 32;
+ u64 step_type : 1;
+ u64 rsvd_51_49 : 3;
+ u64 per_lso_seg : 1;
+ u64 wmem : 1;
+ u64 dsz : 2;
+ u64 alg : 4;
+ u64 subdc : 4;
+ u64 addr; /* W1 */
+};
+
+enum nix_cqerrint_e {
+ NIX_CQERRINT_DOOR_ERR = 0,
+ NIX_CQERRINT_WR_FULL = 1,
+ NIX_CQERRINT_CQE_FAULT = 2,
+};
+
+#define NIX_CQERRINT_BITS (BIT_ULL(NIX_CQERRINT_DOOR_ERR) | \
+ BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+
+enum nix_rqint_e {
+ NIX_RQINT_DROP = 0,
+ NIX_RQINT_RED = 1,
+};
+
+#define NIX_RQINT_BITS (BIT_ULL(NIX_RQINT_DROP) | BIT_ULL(NIX_RQINT_RED))
+
+enum nix_sqint_e {
+ NIX_SQINT_LMT_ERR = 0,
+ NIX_SQINT_MNQ_ERR = 1,
+ NIX_SQINT_SEND_ERR = 2,
+ NIX_SQINT_SQB_ALLOC_FAIL = 3,
+};
+
+#define NIX_SQINT_BITS (BIT_ULL(NIX_SQINT_LMT_ERR) | \
+ BIT_ULL(NIX_SQINT_MNQ_ERR) | \
+ BIT_ULL(NIX_SQINT_SEND_ERR) | \
+ BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+
+enum nix_sqoperr_e {
+ NIX_SQOPERR_OOR = 0,
+ NIX_SQOPERR_CTX_FAULT = 1,
+ NIX_SQOPERR_CTX_POISON = 2,
+ NIX_SQOPERR_DISABLED = 3,
+ NIX_SQOPERR_SIZE_ERR = 4,
+ NIX_SQOPERR_OFLOW = 5,
+ NIX_SQOPERR_SQB_NULL = 6,
+ NIX_SQOPERR_SQB_FAULT = 7,
+ NIX_SQOPERR_SQE_SZ_ZERO = 8,
+ NIX_SQOPERR_MAX,
+};
+
+enum nix_mnqerr_e {
+ NIX_MNQERR_SQ_CTX_FAULT = 0,
+ NIX_MNQERR_SQ_CTX_POISON = 1,
+ NIX_MNQERR_SQB_FAULT = 2,
+ NIX_MNQERR_SQB_POISON = 3,
+ NIX_MNQERR_TOTAL_ERR = 4,
+ NIX_MNQERR_LSO_ERR = 5,
+ NIX_MNQERR_CQ_QUERY_ERR = 6,
+ NIX_MNQERR_MAX_SQE_SIZE_ERR = 7,
+ NIX_MNQERR_MAXLEN_ERR = 8,
+ NIX_MNQERR_SQE_SIZEM1_ZERO = 9,
+ NIX_MNQERR_MAX,
+};
+
+enum nix_snd_status_e {
+ NIX_SND_STATUS_GOOD = 0x0,
+ NIX_SND_STATUS_SQ_CTX_FAULT = 0x1,
+ NIX_SND_STATUS_SQ_CTX_POISON = 0x2,
+ NIX_SND_STATUS_SQB_FAULT = 0x3,
+ NIX_SND_STATUS_SQB_POISON = 0x4,
+ NIX_SND_STATUS_HDR_ERR = 0x5,
+ NIX_SND_STATUS_EXT_ERR = 0x6,
+ NIX_SND_STATUS_JUMP_FAULT = 0x7,
+ NIX_SND_STATUS_JUMP_POISON = 0x8,
+ NIX_SND_STATUS_CRC_ERR = 0x10,
+ NIX_SND_STATUS_IMM_ERR = 0x11,
+ NIX_SND_STATUS_SG_ERR = 0x12,
+ NIX_SND_STATUS_MEM_ERR = 0x13,
+ NIX_SND_STATUS_INVALID_SUBDC = 0x14,
+ NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15,
+ NIX_SND_STATUS_DATA_FAULT = 0x16,
+ NIX_SND_STATUS_DATA_POISON = 0x17,
+ NIX_SND_STATUS_NPC_DROP_ACTION = 0x20,
+ NIX_SND_STATUS_LOCK_VIOL = 0x21,
+ NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22,
+ NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23,
+ NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24,
+ NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25,
+ NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26,
+ NIX_SND_STATUS_SEND_MEM_FAULT = 0x27,
+ NIX_SND_STATUS_SEND_STATS_ERR = 0x28,
+ NIX_SND_STATUS_MAX,
+};
+
+#endif /* OTX2_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
new file mode 100644
index 000000000..423ce54ea
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -0,0 +1,1456 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/bitfield.h>
+#include <net/flow_dissector.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+#include <net/ipv6.h>
+
+#include "cn10k.h"
+#include "otx2_common.h"
+#include "qos.h"
+
+#define CN10K_MAX_BURST_MANTISSA 0x7FFFULL
+#define CN10K_MAX_BURST_SIZE 8453888ULL
+
+#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
+#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
+
+struct otx2_tc_flow_stats {
+ u64 bytes;
+ u64 pkts;
+ u64 used;
+};
+
+struct otx2_tc_flow {
+ struct list_head list;
+ unsigned long cookie;
+ struct rcu_head rcu;
+ struct otx2_tc_flow_stats stats;
+ spinlock_t lock; /* lock for stats */
+ u16 rq;
+ u16 entry;
+ u16 leaf_profile;
+ bool is_act_police;
+ u32 prio;
+ struct npc_install_flow_req req;
+ u64 rate;
+ u32 burst;
+ bool is_pps;
+};
+
+static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+ u32 *burst_exp, u32 *burst_mantissa)
+{
+ int max_burst, max_mantissa;
+ unsigned int tmp;
+
+ if (is_dev_otx2(nic->pdev)) {
+ max_burst = MAX_BURST_SIZE;
+ max_mantissa = MAX_BURST_MANTISSA;
+ } else {
+ max_burst = CN10K_MAX_BURST_SIZE;
+ max_mantissa = CN10K_MAX_BURST_MANTISSA;
+ }
+
+ /* Burst is calculated as
+ * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
+ * Max supported burst size is 130,816 bytes.
+ */
+ burst = min_t(u32, burst, max_burst);
+ if (burst) {
+ *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
+ tmp = burst - rounddown_pow_of_two(burst);
+ if (burst < max_mantissa)
+ *burst_mantissa = tmp * 2;
+ else
+ *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
+ } else {
+ *burst_exp = MAX_BURST_EXPONENT;
+ *burst_mantissa = max_mantissa;
+ }
+}
+
+static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
+ u32 *mantissa, u32 *div_exp)
+{
+ u64 tmp;
+
+ /* Rate calculation by hardware
+ *
+ * PIR_ADD = ((256 + mantissa) << exp) / 256
+ * rate = (2 * PIR_ADD) / ( 1 << div_exp)
+ * The resultant rate is in Mbps.
+ */
+
+ /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
+ * Setting this to '0' will ease the calculation of
+ * exponent and mantissa.
+ */
+ *div_exp = 0;
+
+ if (maxrate) {
+ *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
+ tmp = maxrate - rounddown_pow_of_two(maxrate);
+ if (maxrate < MAX_RATE_MANTISSA)
+ *mantissa = tmp * 2;
+ else
+ *mantissa = tmp / (1ULL << (*exp - 7));
+ } else {
+ /* Instead of disabling rate limiting, set all values to max */
+ *exp = MAX_RATE_EXPONENT;
+ *mantissa = MAX_RATE_MANTISSA;
+ }
+}
+
+u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
+ u64 maxrate, u32 burst)
+{
+ u32 burst_exp, burst_mantissa;
+ u32 exp, mantissa, div_exp;
+ u64 regval = 0;
+
+ /* Get exponent and mantissa values from the desired rate */
+ otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
+ otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+ if (is_dev_otx2(nic->pdev)) {
+ regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
+ FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ } else {
+ regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
+ FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ }
+
+ return regval;
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
+ u32 burst, u64 maxrate)
+{
+ struct otx2_hw *hw = &nic->hw;
+ struct nix_txschq_config *req;
+ int txschq, err;
+
+ /* All SQs share the same TL4, so pick the first scheduler */
+ txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_TL4;
+ req->num_regs = 1;
+ req->reg[0] = NIX_AF_TL4X_PIR(txschq);
+ req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
+
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ mutex_unlock(&nic->mbox.lock);
+ return err;
+}
+
+static int otx2_tc_validate_flow(struct otx2_nic *nic,
+ struct flow_action *actions,
+ struct netlink_ext_ack *extack)
+{
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload supports only 1 policing action");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int otx2_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one Egress MATCHALL ratelimiter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ err = otx2_policer_validate(&cls->rule->action, entry, extack);
+ if (err)
+ return err;
+
+ if (entry->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+ err = otx2_set_matchall_egress_rate(nic, entry->police.burst,
+ otx2_convert_rate(entry->police.rate_bytes_ps));
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action is supported with Egress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = otx2_set_matchall_egress_rate(nic, 0, 0);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node)
+{
+ int rc;
+
+ mutex_lock(&nic->mbox.lock);
+
+ rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
+ if (rc) {
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+ }
+
+ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
+ node->burst, node->rate, node->is_pps);
+ if (rc)
+ goto free_leaf;
+
+ rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
+ if (rc)
+ goto free_leaf;
+
+ mutex_unlock(&nic->mbox.lock);
+
+ return 0;
+
+free_leaf:
+ if (cn10k_free_leaf_profile(nic, node->leaf_profile))
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ node->leaf_profile);
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
+static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ u64 rate, u32 burst, u32 mark,
+ struct npc_install_flow_req *req, bool pps)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct otx2_hw *hw = &nic->hw;
+ int rq_idx, rc;
+
+ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+ if (rq_idx >= hw->rx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+ return -EINVAL;
+ }
+
+ req->match_id = mark & 0xFFFFULL;
+ req->index = rq_idx;
+ req->op = NIX_RX_ACTIONOP_UCAST;
+
+ node->is_act_police = true;
+ node->rq = rq_idx;
+ node->burst = burst;
+ node->rate = rate;
+ node->is_pps = pps;
+
+ rc = otx2_tc_act_set_hw_police(nic, node);
+ if (!rc)
+ set_bit(rq_idx, &nic->rq_bmap);
+
+ return rc;
+}
+
+static int otx2_tc_parse_actions(struct otx2_nic *nic,
+ struct flow_action *flow_action,
+ struct npc_install_flow_req *req,
+ struct flow_cls_offload *f,
+ struct otx2_tc_flow *node)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_action_entry *act;
+ struct net_device *target;
+ struct otx2_nic *priv;
+ u32 burst, mark = 0;
+ u8 nr_police = 0;
+ bool pps = false;
+ u64 rate;
+ int err;
+ int i;
+
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
+ return -EINVAL;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ req->op = NIX_RX_ACTIONOP_DROP;
+ return 0;
+ case FLOW_ACTION_ACCEPT:
+ req->op = NIX_RX_ACTION_DEFAULT;
+ return 0;
+ case FLOW_ACTION_REDIRECT_INGRESS:
+ target = act->dev;
+ priv = netdev_priv(target);
+ /* npc_install_flow_req doesn't support passing a target pcifunc */
+ if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't redirect to other pf/vf");
+ return -EOPNOTSUPP;
+ }
+ req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
+
+ /* if op is already set; avoid overwriting the same */
+ if (!req->op)
+ req->op = NIX_RX_ACTION_DEFAULT;
+ break;
+
+ case FLOW_ACTION_VLAN_POP:
+ req->vtag0_valid = true;
+ /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+ break;
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ err = otx2_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
+
+ if (act->police.rate_bytes_ps > 0) {
+ rate = act->police.rate_bytes_ps * 8;
+ burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps > 0) {
+ /* The algorithm used to calculate rate
+ * mantissa, exponent values for a given token
+ * rate (token can be byte or packet) requires
+ * token rate to be mutiplied by 8.
+ */
+ rate = act->police.rate_pkt_ps * 8;
+ burst = act->police.burst_pkt;
+ pps = true;
+ }
+ nr_police++;
+ break;
+ case FLOW_ACTION_MARK:
+ mark = act->mark;
+ break;
+
+ case FLOW_ACTION_RX_QUEUE_MAPPING:
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ req->index = act->rx_queue;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (nr_police > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rate limit police offload requires a single action");
+ return -EOPNOTSUPP;
+ }
+
+ if (nr_police)
+ return otx2_tc_act_set_police(nic, node, f, rate, burst,
+ mark, req, pps);
+
+ return 0;
+}
+
+static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec,
+ struct flow_msg *flow_mask, struct flow_rule *rule,
+ struct npc_install_flow_req *req, bool is_inner)
+{
+ struct flow_match_vlan match;
+ u16 vlan_tci, vlan_tci_mask;
+
+ if (is_inner)
+ flow_rule_match_cvlan(rule, &match);
+ else
+ flow_rule_match_vlan(rule, &match);
+
+ if (!eth_type_vlan(match.key->vlan_tpid)) {
+ netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
+ ntohs(match.key->vlan_tpid));
+ return -EOPNOTSUPP;
+ }
+
+ if (!match.mask->vlan_id) {
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id == FLOW_ACTION_DROP) {
+ netdev_err(nic->netdev,
+ "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
+ ntohs(match.key->vlan_tpid), match.key->vlan_id);
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ if (match.mask->vlan_id ||
+ match.mask->vlan_dei ||
+ match.mask->vlan_priority) {
+ vlan_tci = match.key->vlan_id |
+ match.key->vlan_dei << 12 |
+ match.key->vlan_priority << 13;
+
+ vlan_tci_mask = match.mask->vlan_id |
+ match.mask->vlan_dei << 12 |
+ match.mask->vlan_priority << 13;
+ if (is_inner) {
+ flow_spec->vlan_itci = htons(vlan_tci);
+ flow_mask->vlan_itci = htons(vlan_tci_mask);
+ req->features |= BIT_ULL(NPC_INNER_VID);
+ } else {
+ flow_spec->vlan_tci = htons(vlan_tci);
+ flow_mask->vlan_tci = htons(vlan_tci_mask);
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ struct npc_install_flow_req *req)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_msg *flow_spec = &req->packet;
+ struct flow_msg *flow_mask = &req->mask;
+ struct flow_dissector *dissector;
+ struct flow_rule *rule;
+ u8 ip_proto = 0;
+
+ rule = flow_cls_offload_flow_rule(f);
+ dissector = rule->match.dissector;
+
+ if ((dissector->used_keys &
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_IPSEC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
+ netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ /* All EtherTypes can be matched, no hw limitation */
+ flow_spec->etype = match.key->n_proto;
+ flow_mask->etype = match.mask->n_proto;
+ req->features |= BIT_ULL(NPC_ETYPE);
+
+ if (match.mask->ip_proto &&
+ (match.key->ip_proto != IPPROTO_TCP &&
+ match.key->ip_proto != IPPROTO_UDP &&
+ match.key->ip_proto != IPPROTO_SCTP &&
+ match.key->ip_proto != IPPROTO_ICMP &&
+ match.key->ip_proto != IPPROTO_ESP &&
+ match.key->ip_proto != IPPROTO_AH &&
+ match.key->ip_proto != IPPROTO_ICMPV6)) {
+ netdev_info(nic->netdev,
+ "ip_proto=0x%x not supported\n",
+ match.key->ip_proto);
+ return -EOPNOTSUPP;
+ }
+ if (match.mask->ip_proto)
+ ip_proto = match.key->ip_proto;
+
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ else if (ip_proto == IPPROTO_ICMP)
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ else if (ip_proto == IPPROTO_ICMPV6)
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
+ else if (ip_proto == IPPROTO_ESP)
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ else if (ip_proto == IPPROTO_AH)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ if (ntohs(flow_spec->etype) == ETH_P_IP) {
+ flow_spec->ip_flag = IPV4_FLAG_MORE;
+ flow_mask->ip_flag = IPV4_FLAG_MORE;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
+ } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
+ flow_spec->next_header = IPPROTO_FRAGMENT;
+ flow_mask->next_header = 0xff;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
+ ether_addr_copy(flow_mask->dmac,
+ (u8 *)&match.mask->dst);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) {
+ struct flow_match_ipsec match;
+
+ flow_rule_match_ipsec(rule, &match);
+ if (!match.mask->spi) {
+ NL_SET_ERR_MSG_MOD(extack, "spi index not specified");
+ return -EOPNOTSUPP;
+ }
+ if (ip_proto != IPPROTO_ESP &&
+ ip_proto != IPPROTO_AH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "SPI index is valid only for ESP/AH proto");
+ return -EOPNOTSUPP;
+ }
+
+ flow_spec->spi = match.key->spi;
+ flow_mask->spi = match.mask->spi;
+ req->features |= BIT_ULL(NPC_IPSEC_SPI);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
+ match.mask->tos) {
+ NL_SET_ERR_MSG_MOD(extack, "tos not supported");
+ return -EOPNOTSUPP;
+ }
+ if (match.mask->ttl) {
+ NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
+ return -EOPNOTSUPP;
+ }
+ flow_spec->tos = match.key->tos;
+ flow_mask->tos = match.mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ int ret;
+
+ ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false);
+ if (ret)
+ return ret;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ int ret;
+
+ ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true);
+ if (ret)
+ return ret;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+
+ flow_spec->ip4dst = match.key->dst;
+ flow_mask->ip4dst = match.mask->dst;
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+
+ flow_spec->ip4src = match.key->src;
+ flow_mask->ip4src = match.mask->src;
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+
+ if (ipv6_addr_loopback(&match.key->dst) ||
+ ipv6_addr_loopback(&match.key->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flow matching IPv6 loopback addr not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ipv6_addr_any(&match.mask->dst)) {
+ memcpy(&flow_spec->ip6dst,
+ (struct in6_addr *)&match.key->dst,
+ sizeof(flow_spec->ip6dst));
+ memcpy(&flow_mask->ip6dst,
+ (struct in6_addr *)&match.mask->dst,
+ sizeof(flow_spec->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ if (!ipv6_addr_any(&match.mask->src)) {
+ memcpy(&flow_spec->ip6src,
+ (struct in6_addr *)&match.key->src,
+ sizeof(flow_spec->ip6src));
+ memcpy(&flow_mask->ip6src,
+ (struct in6_addr *)&match.mask->src,
+ sizeof(flow_spec->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+
+ flow_spec->dport = match.key->dst;
+ flow_mask->dport = match.mask->dst;
+
+ if (flow_mask->dport) {
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+
+ flow_spec->sport = match.key->src;
+ flow_mask->sport = match.mask->src;
+
+ if (flow_mask->sport) {
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ }
+
+ return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
+}
+
+static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct otx2_tc_flow *iter, *tmp;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return;
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+}
+
+static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
+ unsigned long cookie)
+{
+ struct otx2_tc_flow *tmp;
+
+ list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+
+ return NULL;
+}
+
+static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
+ int index)
+{
+ struct otx2_tc_flow *tmp;
+ int i = 0;
+
+ list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
+ if (i == index)
+ return tmp;
+ i++;
+ }
+
+ return NULL;
+}
+
+static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct list_head *pos, *n;
+ struct otx2_tc_flow *tmp;
+
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ if (node == tmp) {
+ list_del(&node->list);
+ return;
+ }
+ }
+}
+
+static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct list_head *pos, *n;
+ struct otx2_tc_flow *tmp;
+ int index = 0;
+
+ /* If the flow list is empty then add the new node */
+ if (list_empty(&flow_cfg->flow_list_tc)) {
+ list_add(&node->list, &flow_cfg->flow_list_tc);
+ return index;
+ }
+
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ if (node->prio < tmp->prio)
+ break;
+ index++;
+ }
+
+ list_add(&node->list, pos->prev);
+ return index;
+}
+
+static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
+{
+ struct npc_install_flow_req *tmp_req;
+ int err;
+
+ mutex_lock(&nic->mbox.lock);
+ tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!tmp_req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ memcpy(tmp_req, req, sizeof(struct npc_install_flow_req));
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n",
+ req->entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+ return 0;
+}
+
+static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
+{
+ struct npc_delete_flow_rsp *rsp;
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
+ entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ if (cntr_val) {
+ rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
+ entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ *cntr_val = rsp->cntr_val;
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+ return 0;
+}
+
+static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct list_head *pos, *n;
+ struct otx2_tc_flow *tmp;
+ int i = 0, index = 0;
+ u16 cntr_val = 0;
+
+ /* Find and delete the entry from the list and re-install
+ * all the entries from beginning to the index of the
+ * deleted entry to higher mcam indexes.
+ */
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ if (node == tmp) {
+ list_del(&tmp->list);
+ break;
+ }
+
+ otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
+ tmp->entry++;
+ tmp->req.entry = tmp->entry;
+ tmp->req.cntr_val = cntr_val;
+ index++;
+ }
+
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ if (i == index)
+ break;
+
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ otx2_add_mcam_flow_entry(nic, &tmp->req);
+ i++;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1;
+ struct otx2_tc_flow *tmp;
+ int list_idx, i;
+ u16 cntr_val = 0;
+
+ /* Find the index of the entry(list_idx) whose priority
+ * is greater than the new entry and re-install all
+ * the entries from beginning to list_idx to higher
+ * mcam indexes.
+ */
+ list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
+ for (i = 0; i < list_idx; i++) {
+ tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
+ if (!tmp)
+ return -ENOMEM;
+
+ otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
+ tmp->entry = flow_cfg->flow_ent[mcam_idx];
+ tmp->req.entry = tmp->entry;
+ tmp->req.cntr_val = cntr_val;
+ otx2_add_mcam_flow_entry(nic, &tmp->req);
+ mcam_idx++;
+ }
+
+ return mcam_idx;
+}
+
+static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node,
+ bool add_req)
+{
+ if (add_req)
+ return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
+
+ return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
+}
+
+static int otx2_tc_del_flow(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct otx2_tc_flow *flow_node;
+ int err;
+
+ flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
+ if (!flow_node) {
+ netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
+ tc_flow_cmd->cookie);
+ return -EINVAL;
+ }
+
+ if (flow_node->is_act_police) {
+ __clear_bit(flow_node->rq, &nic->rq_bmap);
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN)
+ goto free_mcam_flow;
+
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+ flow_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ flow_node->rq, flow_node->leaf_profile);
+
+ err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ flow_node->leaf_profile);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
+free_mcam_flow:
+ otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
+ otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
+ kfree_rcu(flow_node, rcu);
+ flow_cfg->nr_flows--;
+ return 0;
+}
+
+static int otx2_tc_add_flow(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct otx2_tc_flow *new_node, *old_node;
+ struct npc_install_flow_req *req, dummy;
+ int rc, err, mcam_idx;
+
+ if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ return -ENOMEM;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ if (flow_cfg->nr_flows == flow_cfg->max_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Free MCAM entry not available to add the flow");
+ return -ENOMEM;
+ }
+
+ /* allocate memory for the new flow and it's node */
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node)
+ return -ENOMEM;
+ spin_lock_init(&new_node->lock);
+ new_node->cookie = tc_flow_cmd->cookie;
+ new_node->prio = tc_flow_cmd->common.prio;
+
+ memset(&dummy, 0, sizeof(struct npc_install_flow_req));
+
+ rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
+ if (rc) {
+ kfree_rcu(new_node, rcu);
+ return rc;
+ }
+
+ /* If a flow exists with the same cookie, delete it */
+ old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
+ if (old_node)
+ otx2_tc_del_flow(nic, tc_flow_cmd);
+
+ mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ rc = -ENOMEM;
+ goto free_leaf;
+ }
+
+ memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
+ req->channel = nic->hw.rx_chan_base;
+ req->entry = flow_cfg->flow_ent[mcam_idx];
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ new_node->entry = req->entry;
+
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
+ mutex_unlock(&nic->mbox.lock);
+ goto free_leaf;
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+ memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req));
+
+ flow_cfg->nr_flows++;
+ return 0;
+
+free_leaf:
+ otx2_tc_del_from_flow_list(flow_cfg, new_node);
+ kfree_rcu(new_node, rcu);
+ if (new_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
+ new_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ new_node->rq, new_node->leaf_profile);
+ err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ new_node->leaf_profile);
+
+ __clear_bit(new_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
+ return rc;
+}
+
+static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct npc_mcam_get_stats_req *req;
+ struct npc_mcam_get_stats_rsp *rsp;
+ struct otx2_tc_flow_stats *stats;
+ struct otx2_tc_flow *flow_node;
+ int err;
+
+ flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie);
+ if (!flow_node) {
+ netdev_info(nic->netdev, "tc flow not found for cookie %lx",
+ tc_flow_cmd->cookie);
+ return -EINVAL;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_node->entry;
+
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
+ req->entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
+ (&nic->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&nic->mbox.lock);
+ return PTR_ERR(rsp);
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+
+ if (!rsp->stat_ena)
+ return -EINVAL;
+
+ stats = &flow_node->stats;
+
+ spin_lock(&flow_node->lock);
+ flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+ stats->pkts = rsp->stat;
+ spin_unlock(&flow_node->lock);
+
+ return 0;
+}
+
+static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return otx2_tc_add_flow(nic, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return otx2_tc_del_flow(nic, cls_flower);
+ case FLOW_CLS_STATS:
+ return otx2_tc_get_flow_stats(nic, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ u64 rate;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one ingress MATCHALL ratelimitter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ err = cn10k_alloc_matchall_ipolicer(nic);
+ if (err)
+ return err;
+
+ /* Convert to bits per second */
+ rate = entry->police.rate_bytes_ps * 8;
+ err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action supported with Ingress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = cn10k_free_matchall_ipolicer(nic);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_ingress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct otx2_nic *nic = cb_priv;
+ bool ntuple;
+
+ if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ ntuple = nic->netdev->features & NETIF_F_NTUPLE;
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ if (ntuple) {
+ netdev_warn(nic->netdev,
+ "Can't install TC flower offload rule when NTUPLE is active");
+ return -EOPNOTSUPP;
+ }
+
+ return otx2_setup_tc_cls_flower(nic, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_ingress_matchall(nic, type_data);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_egress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_egress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct otx2_nic *nic = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_egress_matchall(nic, type_data);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static LIST_HEAD(otx2_block_cb_list);
+
+static int otx2_setup_tc_block(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct otx2_nic *nic = netdev_priv(netdev);
+ flow_setup_cb_t *cb;
+ bool ingress;
+
+ if (f->block_shared)
+ return -EOPNOTSUPP;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = otx2_setup_tc_block_ingress_cb;
+ ingress = true;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = otx2_setup_tc_block_egress_cb;
+ ingress = false;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
+ nic, nic, ingress);
+}
+
+int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return otx2_setup_tc_block(netdev, type_data);
+ case TC_SETUP_QDISC_HTB:
+ return otx2_setup_tc_htb(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL(otx2_setup_tc);
+
+int otx2_init_tc(struct otx2_nic *nic)
+{
+ /* Exclude receive queue 0 being used for police action */
+ set_bit(0, &nic->rq_bmap);
+
+ if (!nic->flow_cfg) {
+ netdev_err(nic->netdev,
+ "Can't init TC, nic->flow_cfg is not setup\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_init_tc);
+
+void otx2_shutdown_tc(struct otx2_nic *nic)
+{
+ otx2_destroy_tc_flow_list(nic);
+}
+EXPORT_SYMBOL(otx2_shutdown_tc);
+
+static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
+ struct otx2_tc_flow *node)
+{
+ struct npc_install_flow_req *req;
+
+ if (otx2_tc_act_set_hw_police(nic, node))
+ return;
+
+ mutex_lock(&nic->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req)
+ goto err;
+
+ memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
+
+ if (otx2_sync_mbox_msg(&nic->mbox))
+ netdev_err(nic->netdev,
+ "Failed to install MCAM flow entry for ingress rule");
+err:
+ mutex_unlock(&nic->mbox.lock);
+}
+
+void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
+{
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct otx2_tc_flow *node;
+
+ /* If any ingress policer rules exist for the interface then
+ * apply those rules. Ingress policer rules depend on bandwidth
+ * profiles linked to the receive queues. Since no receive queues
+ * exist when interface is down, ingress policer rules are stored
+ * and configured in hardware after all receive queues are allocated
+ * in otx2_open.
+ */
+ list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
+ if (node->is_act_police)
+ otx2_tc_config_ingress_rule(nic, node);
+ }
+}
+EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
new file mode 100644
index 000000000..4d519ea83
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -0,0 +1,1461 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include <net/tso.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <net/ip6_checksum.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "otx2_txrx.h"
+#include "otx2_ptp.h"
+#include "cn10k.h"
+
+#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+#define PTP_PORT 0x13F
+/* PTPv2 header Original Timestamp starts at byte offset 34 and
+ * contains 6 byte seconds field and 4 byte nano seconds field.
+ */
+#define PTP_SYNC_SEC_OFFSET 34
+
+static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct bpf_prog *prog,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq,
+ bool *need_xdp_flush);
+
+static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq)
+{
+ u64 incr = (u64)(cq->cq_idx) << 32;
+ u64 status;
+
+ status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
+
+ if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
+ status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
+ dev_err(pfvf->dev, "CQ stopped due to error");
+ return -EINVAL;
+ }
+
+ cq->cq_tail = status & 0xFFFFF;
+ cq->cq_head = (status >> 20) & 0xFFFFF;
+ if (cq->cq_tail < cq->cq_head)
+ cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
+ cq->cq_tail;
+ else
+ cq->pend_cqe = cq->cq_tail - cq->cq_head;
+
+ return 0;
+}
+
+static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
+{
+ struct nix_cqe_hdr_s *cqe_hdr;
+
+ cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
+ if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
+ return NULL;
+
+ cq->cq_head++;
+ cq->cq_head &= (cq->cqe_cnt - 1);
+
+ return cqe_hdr;
+}
+
+static unsigned int frag_num(unsigned int i)
+{
+#ifdef __BIG_ENDIAN
+ return (i & ~3) + 3 - (i & 3);
+#else
+ return i;
+#endif
+}
+
+static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
+}
+
+static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ int seg;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], DMA_TO_DEVICE);
+ }
+ sg->num_segs = 0;
+}
+
+static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct sg_list *sg;
+ struct page *page;
+ u64 pa;
+
+ sg = &sq->sg[snd_comp->sqe_id];
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
+ sg->size[0], DMA_TO_DEVICE);
+ page = virt_to_page(phys_to_virt(pa));
+ put_page(page);
+}
+
+static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe,
+ int budget, int *tx_pkts, int *tx_bytes)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct skb_shared_hwtstamps ts;
+ struct sk_buff *skb = NULL;
+ u64 timestamp, tsns;
+ struct sg_list *sg;
+ int err;
+
+ if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
+ net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
+ pfvf->netdev->name, cq->cint_idx,
+ snd_comp->status);
+
+ sg = &sq->sg[snd_comp->sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(!skb))
+ return;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
+ if (timestamp != 1) {
+ timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
+ if (!err) {
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(tsns);
+ skb_tstamp_tx(skb, &ts);
+ }
+ }
+ }
+
+ *tx_bytes += skb->len;
+ (*tx_pkts)++;
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ napi_consume_skb(skb, budget);
+ sg->skb = (u64)NULL;
+}
+
+static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
+ struct sk_buff *skb, void *data)
+{
+ u64 timestamp, tsns;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
+ return;
+
+ timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
+ /* The first 8 bytes is the timestamp */
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
+ if (err)
+ return;
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
+}
+
+static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len, struct nix_rx_parse_s *parse,
+ int qidx)
+{
+ struct page *page;
+ int off = 0;
+ void *va;
+
+ va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
+
+ if (likely(!skb_shinfo(skb)->nr_frags)) {
+ /* Check if data starts at some nonzero offset
+ * from the start of the buffer. For now the
+ * only possible offset is 8 bytes in the case
+ * where packet is prepended by a timestamp.
+ */
+ if (parse->laptr) {
+ otx2_set_rxtstamp(pfvf, skb, va);
+ off = OTX2_HW_TIMESTAMP_LEN;
+ }
+ }
+
+ page = virt_to_page(va);
+ if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page) + off,
+ len - off, pfvf->rbsize);
+ return true;
+ }
+
+ /* If more than MAX_SKB_FRAGS fragments are received then
+ * give back those buffer pointers to hardware for reuse.
+ */
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
+
+ return false;
+}
+
+static void otx2_set_rxhash(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
+{
+ enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
+ struct otx2_rss_info *rss;
+ u32 hash = 0;
+
+ if (!(pfvf->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ rss = &pfvf->hw.rss_info;
+ if (rss->flowkey_cfg) {
+ if (rss->flowkey_cfg &
+ ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
+ hash_type = PKT_HASH_TYPE_L4;
+ else
+ hash_type = PKT_HASH_TYPE_L3;
+ hash = cqe->hdr.flow_tag;
+ }
+ skb_set_hash(skb, hash, hash_type);
+}
+
+static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
+ int qidx)
+{
+ struct nix_rx_sg_s *sg = &cqe->sg;
+ void *end, *start;
+ u64 *seg_addr;
+ int seg;
+
+ start = (void *)sg;
+ end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
+ while (start < end) {
+ sg = (struct nix_rx_sg_s *)start;
+ seg_addr = &sg->seg_addr;
+ for (seg = 0; seg < sg->segs; seg++, seg_addr++)
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx,
+ *seg_addr & ~0x07ULL);
+ start += sizeof(*sg);
+ }
+}
+
+static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe, int qidx)
+{
+ struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
+ struct nix_rx_parse_s *parse = &cqe->parse;
+
+ if (netif_msg_rx_err(pfvf))
+ netdev_err(pfvf->netdev,
+ "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
+ qidx, parse->errlev, parse->errcode);
+
+ if (parse->errlev == NPC_ERRLVL_RE) {
+ switch (parse->errcode) {
+ case ERRCODE_FCS:
+ case ERRCODE_FCS_RCV:
+ atomic_inc(&stats->rx_fcs_errs);
+ break;
+ case ERRCODE_UNDERSIZE:
+ atomic_inc(&stats->rx_undersize_errs);
+ break;
+ case ERRCODE_OVERSIZE:
+ atomic_inc(&stats->rx_oversize_errs);
+ break;
+ case ERRCODE_OL2_LEN_MISMATCH:
+ atomic_inc(&stats->rx_len_errs);
+ break;
+ default:
+ atomic_inc(&stats->rx_other_errs);
+ break;
+ }
+ } else if (parse->errlev == NPC_ERRLVL_NIX) {
+ switch (parse->errcode) {
+ case ERRCODE_OL3_LEN:
+ case ERRCODE_OL4_LEN:
+ case ERRCODE_IL3_LEN:
+ case ERRCODE_IL4_LEN:
+ atomic_inc(&stats->rx_len_errs);
+ break;
+ case ERRCODE_OL4_CSUM:
+ case ERRCODE_IL4_CSUM:
+ atomic_inc(&stats->rx_csum_errs);
+ break;
+ default:
+ atomic_inc(&stats->rx_other_errs);
+ break;
+ }
+ } else {
+ atomic_inc(&stats->rx_other_errs);
+ /* For now ignore all the NPC parser errors and
+ * pass the packets to stack.
+ */
+ return false;
+ }
+
+ /* If RXALL is enabled pass on packets to stack. */
+ if (pfvf->netdev->features & NETIF_F_RXALL)
+ return false;
+
+ /* Free buffer back to pool */
+ if (cqe->sg.segs)
+ otx2_free_rcv_seg(pfvf, cqe, qidx);
+ return true;
+}
+
+static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq,
+ struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
+{
+ struct nix_rx_parse_s *parse = &cqe->parse;
+ struct nix_rx_sg_s *sg = &cqe->sg;
+ struct sk_buff *skb = NULL;
+ void *end, *start;
+ u64 *seg_addr;
+ u16 *seg_size;
+ int seg;
+
+ if (unlikely(parse->errlev || parse->errcode)) {
+ if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
+ return;
+ }
+
+ if (pfvf->xdp_prog)
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
+ return;
+
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb))
+ return;
+
+ start = (void *)sg;
+ end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
+ while (start < end) {
+ sg = (struct nix_rx_sg_s *)start;
+ seg_addr = &sg->seg_addr;
+ seg_size = (void *)sg;
+ for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
+ if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
+ seg_size[seg], parse, cq->cq_idx))
+ cq->pool_ptrs++;
+ }
+ start += sizeof(*sg);
+ }
+ otx2_set_rxhash(pfvf, cqe, skb);
+
+ skb_record_rx_queue(skb, cq->cq_idx);
+ if (pfvf->netdev->features & NETIF_F_RXCSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_mark_for_recycle(skb);
+
+ napi_gro_frags(napi);
+}
+
+static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq, int budget)
+{
+ bool need_xdp_flush = false;
+ struct nix_cqe_rx_s *cqe;
+ int processed_cqe = 0;
+
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
+ cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
+ if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
+ !cqe->sg.seg_addr) {
+ if (!processed_cqe)
+ return 0;
+ break;
+ }
+ cq->cq_head++;
+ cq->cq_head &= (cq->cqe_cnt - 1);
+
+ otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
+
+ cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ cqe->sg.seg_addr = 0x00;
+ processed_cqe++;
+ cq->pend_cqe--;
+ }
+ if (need_xdp_flush)
+ xdp_do_flush();
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+
+ return processed_cqe;
+}
+
+int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+{
+ struct otx2_nic *pfvf = dev;
+ int cnt = cq->pool_ptrs;
+ dma_addr_t bufptr;
+
+ while (cq->pool_ptrs) {
+ if (otx2_alloc_buffer(pfvf, cq, &bufptr))
+ break;
+ otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
+ cq->pool_ptrs--;
+ }
+
+ return cnt - cq->pool_ptrs;
+}
+
+static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq, int budget)
+{
+ int tx_pkts = 0, tx_bytes = 0, qidx;
+ struct otx2_snd_queue *sq;
+ struct nix_cqe_tx_s *cqe;
+ int processed_cqe = 0;
+
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
+
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ if (unlikely(!cqe)) {
+ if (!processed_cqe)
+ return 0;
+ break;
+ }
+
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+
+ if (cq->cq_type == CQ_XDP)
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
+ else
+ otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
+ cqe, budget, &tx_pkts, &tx_bytes);
+
+ cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ processed_cqe++;
+ cq->pend_cqe--;
+
+ sq->cons_head++;
+ sq->cons_head &= (sq->sqe_cnt - 1);
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+
+ if (likely(tx_pkts)) {
+ struct netdev_queue *txq;
+
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+
+ if (qidx >= pfvf->hw.tx_queues)
+ qidx -= pfvf->hw.xdp_queues;
+ txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ /* Check if queue was stopped earlier due to ring full */
+ smp_mb();
+ if (netif_tx_queue_stopped(txq) &&
+ netif_carrier_ok(pfvf->netdev))
+ netif_tx_wake_queue(txq);
+ }
+ return 0;
+}
+
+static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
+{
+ struct dim_sample dim_sample;
+ u64 rx_frames, rx_bytes;
+ u64 tx_frames, tx_bytes;
+
+ rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
+ OTX2_GET_RX_STATS(RX_UCAST);
+ rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+ tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
+ tx_frames = OTX2_GET_TX_STATS(TX_UCAST);
+
+ dim_update_sample(pfvf->napi_events,
+ rx_frames + tx_frames,
+ rx_bytes + tx_bytes,
+ &dim_sample);
+ net_dim(&cq_poll->dim, dim_sample);
+}
+
+int otx2_napi_handler(struct napi_struct *napi, int budget)
+{
+ struct otx2_cq_queue *rx_cq = NULL;
+ struct otx2_cq_poll *cq_poll;
+ int workdone = 0, cq_idx, i;
+ struct otx2_cq_queue *cq;
+ struct otx2_qset *qset;
+ struct otx2_nic *pfvf;
+ int filled_cnt = -1;
+
+ cq_poll = container_of(napi, struct otx2_cq_poll, napi);
+ pfvf = (struct otx2_nic *)cq_poll->dev;
+ qset = &pfvf->qset;
+
+ for (i = 0; i < CQS_PER_CINT; i++) {
+ cq_idx = cq_poll->cq_ids[i];
+ if (unlikely(cq_idx == CINT_INVALID_CQ))
+ continue;
+ cq = &qset->cq[cq_idx];
+ if (cq->cq_type == CQ_RX) {
+ rx_cq = cq;
+ workdone += otx2_rx_napi_handler(pfvf, napi,
+ cq, budget);
+ } else {
+ workdone += otx2_tx_napi_handler(pfvf, cq, budget);
+ }
+ }
+
+ if (rx_cq && rx_cq->pool_ptrs)
+ filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+ /* Clear the IRQ */
+ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
+
+ if (workdone < budget && napi_complete_done(napi, workdone)) {
+ /* If interface is going down, don't re-enable IRQ */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return workdone;
+
+ /* Adjust irq coalese using net_dim */
+ if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
+ otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+
+ if (unlikely(!filled_cnt)) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ } else {
+ /* Re-enable interrupts */
+ otx2_write64(pfvf,
+ NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
+ BIT_ULL(0));
+ }
+ }
+ return workdone;
+}
+
+void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx)
+{
+ u64 status;
+
+ /* Packet data stores should finish before SQE is flushed to HW */
+ dma_wmb();
+
+ do {
+ memcpy(sq->lmt_addr, sq->sqe_base, size);
+ status = otx2_lmt_flush(sq->io_addr);
+ } while (status == 0);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
+
+#define MAX_SEGS_PER_SG 3
+/* Add SQE scatter/gather subdescriptor structure */
+static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+/* Add SQE extended header subdescriptor */
+static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int *offset)
+{
+ struct nix_sqe_ext_s *ext;
+
+ ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
+ ext->subdc = NIX_SUBDC_EXT;
+ if (skb_shinfo(skb)->gso_size) {
+ ext->lso = 1;
+ ext->lso_sb = skb_tcp_all_headers(skb);
+ ext->lso_mps = skb_shinfo(skb)->gso_size;
+
+ /* Only TSOv4 and TSOv6 GSO offloads are supported */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ ext->lso_format = pfvf->hw.lso_tsov4_idx;
+
+ /* HW adds payload size to 'ip_hdr->tot_len' while
+ * sending TSO segment, hence set payload length
+ * in IP header of the packet to just header length.
+ */
+ ip_hdr(skb)->tot_len =
+ htons(ext->lso_sb - skb_network_offset(skb));
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ ext->lso_format = pfvf->hw.lso_tsov6_idx;
+ ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb));
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ __be16 l3_proto = vlan_get_protocol(skb);
+ struct udphdr *udph = udp_hdr(skb);
+ u16 iplen;
+
+ ext->lso_sb = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+
+ /* HW adds payload size to length fields in IP and
+ * UDP headers while segmentation, hence adjust the
+ * lengths to just header sizes.
+ */
+ iplen = htons(ext->lso_sb - skb_network_offset(skb));
+ if (l3_proto == htons(ETH_P_IP)) {
+ ip_hdr(skb)->tot_len = iplen;
+ ext->lso_format = pfvf->hw.lso_udpv4_idx;
+ } else {
+ ipv6_hdr(skb)->payload_len = iplen;
+ ext->lso_format = pfvf->hw.lso_udpv6_idx;
+ }
+
+ udph->len = htons(sizeof(struct udphdr));
+ }
+ } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ ext->tstmp = 1;
+ }
+
+#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
+ if (skb_vlan_tag_present(skb)) {
+ if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ ext->vlan1_ins_ena = 1;
+ ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
+ } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ ext->vlan0_ins_ena = 1;
+ ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
+ }
+ }
+
+ *offset += sizeof(*ext);
+}
+
+static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
+ int alg, u64 iova, int ptp_offset,
+ u64 base_ns, bool udp_csum_crt)
+{
+ struct nix_sqe_mem_s *mem;
+
+ mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
+ mem->subdc = NIX_SUBDC_MEM;
+ mem->alg = alg;
+ mem->wmem = 1; /* wait for the memory operation */
+ mem->addr = iova;
+
+ if (ptp_offset) {
+ mem->start_offset = ptp_offset;
+ mem->udp_csum_crt = !!udp_csum_crt;
+ mem->base_ns = base_ns;
+ mem->step_type = 1;
+ }
+
+ *offset += sizeof(*mem);
+}
+
+/* Add SQE header subdescriptor structure */
+static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct nix_sqe_hdr_s *sqe_hdr,
+ struct sk_buff *skb, u16 qidx)
+{
+ int proto = 0;
+
+ /* Check if SQE was framed before, if yes then no need to
+ * set these constants again and again.
+ */
+ if (!sqe_hdr->total) {
+ /* Don't free Tx buffers to Aura */
+ sqe_hdr->df = 1;
+ sqe_hdr->aura = sq->aura_id;
+ /* Post a CQE Tx after pkt transmission */
+ sqe_hdr->pnc = 1;
+ sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ?
+ qidx + pfvf->hw.xdp_queues : qidx;
+ }
+ sqe_hdr->total = skb->len;
+ /* Set SQE identifier which will be used later for freeing SKB */
+ sqe_hdr->sqe_id = sq->head;
+
+ /* Offload TCP/UDP checksum to HW */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ sqe_hdr->ol3ptr = skb_network_offset(skb);
+ sqe_hdr->ol4ptr = skb_transport_offset(skb);
+ /* get vlan protocol Ethertype */
+ if (eth_type_vlan(skb->protocol))
+ skb->protocol = vlan_get_protocol(skb);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ proto = ip_hdr(skb)->protocol;
+ /* In case of TSO, HW needs this to be explicitly set.
+ * So set this always, instead of adding a check.
+ */
+ sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ proto = ipv6_hdr(skb)->nexthdr;
+ sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6;
+ }
+
+ if (proto == IPPROTO_TCP)
+ sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
+ else if (proto == IPPROTO_UDP)
+ sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
+ }
+}
+
+static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int sqe, int hdr_len)
+{
+ int num_segs = skb_shinfo(skb)->nr_frags + 1;
+ struct sg_list *sg = &sq->sg[sqe];
+ u64 dma_addr;
+ int seg, len;
+
+ sg->num_segs = 0;
+
+ /* Get payload length at skb->data */
+ len = skb_headlen(skb) - hdr_len;
+
+ for (seg = 0; seg < num_segs; seg++) {
+ /* Skip skb->data, if there is no payload */
+ if (!seg && !len)
+ continue;
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ goto unmap;
+
+ /* Save DMA mapping info for later unmapping */
+ sg->dma_addr[sg->num_segs] = dma_addr;
+ sg->size[sg->num_segs] = len;
+ sg->num_segs++;
+ }
+ return 0;
+unmap:
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ return -EINVAL;
+}
+
+static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int seg,
+ u64 seg_addr, int hdr_len, int sqe)
+{
+ struct sg_list *sg = &sq->sg[sqe];
+ const skb_frag_t *frag;
+ int offset;
+
+ if (seg < 0)
+ return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
+
+ frag = &skb_shinfo(skb)->frags[seg];
+ offset = seg_addr - (u64)skb_frag_address(frag);
+ if (skb_headlen(skb) - hdr_len)
+ seg++;
+ return sg->dma_addr[seg] + offset;
+}
+
+static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
+ struct sg_list *list, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u16 *sg_lens = NULL;
+ u64 *iova = NULL;
+ int seg;
+
+ /* Add SG descriptors with buffer addresses */
+ for (seg = 0; seg < list->num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+ }
+ sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
+ *iova++ = list->dma_addr[seg];
+ sg->segs++;
+ }
+}
+
+static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ int hdr_len, tcp_data, seg_len, pkt_len, offset;
+ struct nix_sqe_hdr_s *sqe_hdr;
+ int first_sqe = sq->head;
+ struct sg_list list;
+ struct tso_t tso;
+
+ hdr_len = tso_start(skb, &tso);
+
+ /* Map SKB's fragments to DMA.
+ * It's done here to avoid mapping for every TSO segment's packet.
+ */
+ if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ tcp_data = skb->len - hdr_len;
+ while (tcp_data > 0) {
+ char *hdr;
+
+ seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
+ tcp_data -= seg_len;
+
+ /* Set SQE's SEND_HDR */
+ memset(sq->sqe_base, 0, sq->sqe_size);
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+ otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
+ offset = sizeof(*sqe_hdr);
+
+ /* Add TSO segment's pkt header */
+ hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
+ tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
+ list.dma_addr[0] =
+ sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
+ list.size[0] = hdr_len;
+ list.num_segs = 1;
+
+ /* Add TSO segment's payload data fragments */
+ pkt_len = hdr_len;
+ while (seg_len > 0) {
+ int size;
+
+ size = min_t(int, tso.size, seg_len);
+
+ list.size[list.num_segs] = size;
+ list.dma_addr[list.num_segs] =
+ otx2_tso_frag_dma_addr(sq, skb,
+ tso.next_frag_idx - 1,
+ (u64)tso.data, hdr_len,
+ first_sqe);
+ list.num_segs++;
+ pkt_len += size;
+ seg_len -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ sqe_hdr->total = pkt_len;
+ otx2_sqe_tso_add_sg(sq, &list, &offset);
+
+ /* DMA mappings and skb needs to be freed only after last
+ * TSO segment is transmitted out. So set 'PNC' only for
+ * last segment. Also point last segment's sqe_id to first
+ * segment's SQE index where skb address and DMA mappings
+ * are saved.
+ */
+ if (!tcp_data) {
+ sqe_hdr->pnc = 1;
+ sqe_hdr->sqe_id = first_sqe;
+ sq->sg[first_sqe].skb = (u64)skb;
+ } else {
+ sqe_hdr->pnc = 0;
+ }
+
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+
+ /* Flush SQE to HW */
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+ }
+}
+
+static bool is_hw_tso_supported(struct otx2_nic *pfvf,
+ struct sk_buff *skb)
+{
+ int payload_len, last_seg_size;
+
+ if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
+ return true;
+
+ /* On 96xx A0, HW TSO not supported */
+ if (!is_96xx_B0(pfvf->pdev))
+ return false;
+
+ /* HW has an issue due to which when the payload of the last LSO
+ * segment is shorter than 16 bytes, some header fields may not
+ * be correctly modified, hence don't offload such TSO segments.
+ */
+
+ payload_len = skb->len - skb_tcp_all_headers(skb);
+ last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
+ if (last_seg_size && last_seg_size < 16)
+ return false;
+
+ return true;
+}
+
+static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
+{
+ if (!skb_shinfo(skb)->gso_size)
+ return 1;
+
+ /* HW TSO */
+ if (is_hw_tso_supported(pfvf, skb))
+ return 1;
+
+ /* SW TSO */
+ return skb_shinfo(skb)->gso_segs;
+}
+
+static bool otx2_validate_network_transport(struct sk_buff *skb)
+{
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ if (udph->source == htons(PTP_PORT) &&
+ udph->dest == htons(PTP_PORT))
+ return true;
+ }
+
+ return false;
+}
+
+static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ u16 nix_offload_hlen = 0, inner_vhlen = 0;
+ bool udp_hdr_present = false, is_sync;
+ u8 *data = skb->data, *msgtype;
+ __be16 proto = eth->h_proto;
+ int network_depth = 0;
+
+ /* NIX is programmed to offload outer VLAN header
+ * in case of single vlan protocol field holds Network header ETH_IP/V6
+ * in case of stacked vlan protocol field holds Inner vlan (8100)
+ */
+ if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
+ skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ /* Get vlan protocol */
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+ /* SKB APIs like skb_transport_offset does not include
+ * offloaded vlan header length. Need to explicitly add
+ * the length
+ */
+ nix_offload_hlen = VLAN_HLEN;
+ inner_vhlen = VLAN_HLEN;
+ } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ nix_offload_hlen = VLAN_HLEN;
+ }
+ } else if (eth_type_vlan(eth->h_proto)) {
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+ }
+
+ switch (ntohs(proto)) {
+ case ETH_P_1588:
+ if (network_depth)
+ *offset = network_depth;
+ else
+ *offset = ETH_HLEN + nix_offload_hlen +
+ inner_vhlen;
+ break;
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (!otx2_validate_network_transport(skb))
+ return false;
+
+ *offset = nix_offload_hlen + skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ udp_hdr_present = true;
+
+ }
+
+ msgtype = data + *offset;
+ /* Check PTP messageId is SYNC or not */
+ is_sync = !(*msgtype & 0xf);
+ if (is_sync)
+ *udp_csum_crt = udp_hdr_present;
+ else
+ *offset = 0;
+
+ return is_sync;
+}
+
+static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
+ struct otx2_snd_queue *sq, int *offset)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct ptpv2_tstamp *origin_tstamp;
+ bool udp_csum_crt = false;
+ unsigned int udphoff;
+ struct timespec64 ts;
+ int ptp_offset = 0;
+ __wsum skb_csum;
+ u64 iova;
+
+ if (unlikely(!skb_shinfo(skb)->gso_size &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+ if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
+ otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum_crt))) {
+ origin_tstamp = (struct ptpv2_tstamp *)
+ ((u8 *)skb->data + ptp_offset +
+ PTP_SYNC_SEC_OFFSET);
+ ts = ns_to_timespec64(pfvf->ptp->tstamp);
+ origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
+ origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
+ origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
+ /* Point to correction field in PTP packet */
+ ptp_offset += 8;
+
+ /* When user disables hw checksum, stack calculates the csum,
+ * but it does not cover ptp timestamp which is added later.
+ * Recalculate the checksum manually considering the timestamp.
+ */
+ if (udp_csum_crt) {
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) {
+ udphoff = skb_transport_offset(skb);
+ uh->check = 0;
+ skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff,
+ 0);
+ if (ntohs(eth->h_proto) == ETH_P_IPV6)
+ uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len - udphoff,
+ ipv6_hdr(skb)->nexthdr,
+ skb_csum);
+ else
+ uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ skb->len - udphoff,
+ IPPROTO_UDP,
+ skb_csum);
+ }
+ }
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ iova = sq->timestamps->iova + (sq->head * sizeof(u64));
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
+ ptp_offset, pfvf->ptp->base_ns, udp_csum_crt);
+ } else {
+ skb_tx_timestamp(skb);
+ }
+}
+
+bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int offset, num_segs, free_desc;
+ struct nix_sqe_hdr_s *sqe_hdr;
+
+ /* Check if there is enough room between producer
+ * and consumer index.
+ */
+ free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
+ if (free_desc < sq->sqe_thresh)
+ return false;
+
+ if (free_desc < otx2_get_sqe_count(pfvf, skb))
+ return false;
+
+ num_segs = skb_shinfo(skb)->nr_frags + 1;
+
+ /* If SKB doesn't fit in a single SQE, linearize it.
+ * TODO: Consider adding JUMP descriptor instead.
+ */
+ if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ num_segs = skb_shinfo(skb)->nr_frags + 1;
+ }
+
+ if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ /* Insert vlan tag before giving pkt to tso */
+ if (skb_vlan_tag_present(skb))
+ skb = __vlan_hwaccel_push_inside(skb);
+ otx2_sq_append_tso(pfvf, sq, skb, qidx);
+ return true;
+ }
+
+ /* Set SQE's SEND_HDR.
+ * Do not clear the first 64bit as it contains constant info.
+ */
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+ otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
+ offset = sizeof(*sqe_hdr);
+
+ /* Add extended header if needed */
+ otx2_sqe_add_ext(pfvf, sq, skb, &offset);
+
+ /* Add SG subdesc with data frags */
+ if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
+ return false;
+ }
+
+ otx2_set_txtstamp(pfvf, skb, sq, &offset);
+
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Flush SQE to HW */
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+
+ return true;
+}
+EXPORT_SYMBOL(otx2_sq_append_skb);
+
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx)
+{
+ struct nix_cqe_rx_s *cqe;
+ struct otx2_pool *pool;
+ int processed_cqe = 0;
+ u16 pool_id;
+ u64 iova;
+
+ if (pfvf->xdp_prog)
+ xdp_rxq_info_unreg(&cq->xdp_rxq);
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
+ processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
+ if (cqe->sg.segs > 1) {
+ otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
+ continue;
+ }
+ iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
+
+ otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+}
+
+void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+{
+ int tx_pkts = 0, tx_bytes = 0;
+ struct sk_buff *skb = NULL;
+ struct otx2_snd_queue *sq;
+ struct nix_cqe_tx_s *cqe;
+ struct netdev_queue *txq;
+ int processed_cqe = 0;
+ struct sg_list *sg;
+ int qidx;
+
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
+ sg = &sq->sg[cqe->comp.sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (skb) {
+ tx_bytes += skb->len;
+ tx_pkts++;
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ dev_kfree_skb_any(skb);
+ sg->skb = (u64)NULL;
+ }
+ }
+
+ if (likely(tx_pkts)) {
+ if (qidx >= pfvf->hw.tx_queues)
+ qidx -= pfvf->hw.xdp_queues;
+ txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ }
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+}
+
+int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+void otx2_free_pending_sqe(struct otx2_nic *pfvf)
+{
+ int tx_pkts = 0, tx_bytes = 0;
+ struct sk_buff *skb = NULL;
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+ struct sg_list *sg;
+ int sq_idx, sqe;
+
+ for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
+ sq = &pfvf->qset.sq[sq_idx];
+ for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
+ sg = &sq->sg[sqe];
+ skb = (struct sk_buff *)sg->skb;
+ if (skb) {
+ tx_bytes += skb->len;
+ tx_pkts++;
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ dev_kfree_skb_any(skb);
+ sg->skb = (u64)NULL;
+ }
+ }
+
+ if (!tx_pkts)
+ continue;
+ txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ tx_pkts = 0;
+ tx_bytes = 0;
+ }
+}
+
+static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ int len, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 *iova = NULL;
+
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 1;
+ sg->seg1_size = len;
+ iova = (void *)sg + sizeof(*sg);
+ *iova = dma_addr;
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ sq->sg[sq->head].dma_addr[0] = dma_addr;
+ sq->sg[sq->head].size[0] = len;
+ sq->sg[sq->head].num_segs = 1;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset, free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ if (free_sqe < sq->sqe_thresh)
+ return false;
+
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+
+ return true;
+}
+
+static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct bpf_prog *prog,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq,
+ bool *need_xdp_flush)
+{
+ unsigned char *hard_start, *data;
+ int qidx = cq->cq_idx;
+ struct xdp_buff xdp;
+ struct page *page;
+ u64 iova, pa;
+ u32 act;
+ int err;
+
+ iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ page = virt_to_page(phys_to_virt(pa));
+
+ xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
+
+ data = (unsigned char *)phys_to_virt(pa);
+ hard_start = page_address(page);
+ xdp_prepare_buff(&xdp, hard_start, data - hard_start,
+ cqe->sg.seg_size, false);
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ qidx += pfvf->hw.tx_queues;
+ cq->pool_ptrs++;
+ return otx2_xdp_sq_append_pkt(pfvf, iova,
+ cqe->sg.seg_size, qidx);
+ case XDP_REDIRECT:
+ cq->pool_ptrs++;
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ *need_xdp_flush = true;
+ return true;
+ }
+ put_page(page);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(pfvf->netdev, prog, act);
+ break;
+ case XDP_DROP:
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ cq->pool_ptrs++;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
new file mode 100644
index 000000000..a82ffca8c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef OTX2_TXRX_H
+#define OTX2_TXRX_H
+
+#include <linux/etherdevice.h>
+#include <linux/iommu.h>
+#include <linux/if_vlan.h>
+#include <net/xdp.h>
+
+#define LBK_CHAN_BASE 0x000
+#define SDP_CHAN_BASE 0x700
+#define CGX_CHAN_BASE 0x800
+
+#define OTX2_DATA_ALIGN(X) ALIGN(X, OTX2_ALIGN)
+#define OTX2_HEAD_ROOM OTX2_ALIGN
+
+#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
+#define OTX2_MIN_MTU 60
+
+#define OTX2_PAGE_POOL_SZ 2048
+
+#define OTX2_MAX_GSO_SEGS 255
+#define OTX2_MAX_FRAGS_IN_SQE 9
+
+#define MAX_XDP_MTU (1530 - OTX2_ETH_HLEN)
+
+/* Rx buffer size should be in multiples of 128bytes */
+#define RCV_FRAG_LEN1(x) \
+ ((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
+ OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* Prefer 2048 byte buffers for better last level cache
+ * utilization or data distribution across regions.
+ */
+#define RCV_FRAG_LEN(x) \
+ ((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
+
+#define DMA_BUFFER_LEN(x) ((x) - OTX2_HEAD_ROOM)
+
+/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
+ * is equal to this value.
+ */
+#define CQ_CQE_THRESH_DEFAULT 10
+
+/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
+ * is nonzero and this much time elapses after that.
+ */
+#define CQ_TIMER_THRESH_DEFAULT 1 /* 1 usec */
+#define CQ_TIMER_THRESH_MAX 25 /* 25 usec */
+
+/* Min number of CQs (of the ones mapped to this CINT)
+ * with valid CQEs.
+ */
+#define CQ_QCOUNT_DEFAULT 1
+
+#define CQ_OP_STAT_OP_ERR 63
+#define CQ_OP_STAT_CQ_ERR 46
+
+struct queue_stats {
+ u64 bytes;
+ u64 pkts;
+};
+
+struct otx2_rcv_queue {
+ struct queue_stats stats;
+};
+
+struct sg_list {
+ u16 num_segs;
+ u64 skb;
+ u64 size[OTX2_MAX_FRAGS_IN_SQE];
+ u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
+};
+
+struct otx2_snd_queue {
+ u8 aura_id;
+ u16 head;
+ u16 cons_head;
+ u16 sqe_size;
+ u32 sqe_cnt;
+ u16 num_sqbs;
+ u16 sqe_thresh;
+ u8 sqe_per_sqb;
+ u64 io_addr;
+ u64 *aura_fc_addr;
+ u64 *lmt_addr;
+ void *sqe_base;
+ struct qmem *sqe;
+ struct qmem *tso_hdrs;
+ struct sg_list *sg;
+ struct qmem *timestamps;
+ struct queue_stats stats;
+ u16 sqb_count;
+ u64 *sqb_ptrs;
+} ____cacheline_aligned_in_smp;
+
+enum cq_type {
+ CQ_RX,
+ CQ_TX,
+ CQ_XDP,
+ CQ_QOS,
+ CQS_PER_CINT = 4, /* RQ + SQ + XDP + QOS_SQ */
+};
+
+struct otx2_cq_poll {
+ void *dev;
+#define CINT_INVALID_CQ 255
+ u8 cint_idx;
+ u8 cq_ids[CQS_PER_CINT];
+ struct dim dim;
+ struct napi_struct napi;
+};
+
+struct otx2_pool {
+ struct qmem *stack;
+ struct qmem *fc_addr;
+ struct page_pool *page_pool;
+ u16 rbsize;
+};
+
+struct otx2_cq_queue {
+ u8 cq_idx;
+ u8 cq_type;
+ u8 cint_idx; /* CQ interrupt id */
+ u8 refill_task_sched;
+ u16 cqe_size;
+ u16 pool_ptrs;
+ u32 cqe_cnt;
+ u32 cq_head;
+ u32 cq_tail;
+ u32 pend_cqe;
+ void *cqe_base;
+ struct qmem *cqe;
+ struct otx2_pool *rbpool;
+ struct xdp_rxq_info xdp_rxq;
+} ____cacheline_aligned_in_smp;
+
+struct otx2_qset {
+ u32 rqe_cnt;
+ u32 sqe_cnt; /* Keep these two at top */
+#define OTX2_MAX_CQ_CNT 64
+ u16 cq_cnt;
+ u16 xqe_size;
+ struct otx2_pool *pool;
+ struct otx2_cq_poll *napi;
+ struct otx2_cq_queue *cq;
+ struct otx2_snd_queue *sq;
+ struct otx2_rcv_queue *rq;
+};
+
+/* Translate IOVA to physical address */
+static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
+{
+ /* Translation is installed only when IOMMU is present */
+ if (likely(iommu_domain))
+ return iommu_iova_to_phys(iommu_domain, dma_addr);
+ return dma_addr;
+}
+
+int otx2_napi_handler(struct napi_struct *napi, int budget);
+bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx);
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
new file mode 100644
index 000000000..35e060483
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Virtual Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/net_tstamp.h>
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+#include "otx2_ptp.h"
+#include "cn10k.h"
+
+#define DRV_NAME "rvu_nicvf"
+#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
+
+static const struct pci_device_id otx2_vf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
+ { }
+};
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_vf_id_table);
+
+/* RVU VF Interrupt Vector Enumeration */
+enum {
+ RVU_VF_INT_VEC_MBOX = 0x0,
+};
+
+static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
+ struct mbox_msghdr *msg)
+{
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(vf->dev,
+ "Mbox msg with unknown ID %d\n", msg->id);
+ return;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(vf->dev,
+ "Mbox msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ if (msg->rc == MBOX_MSG_INVALID) {
+ dev_err(vf->dev,
+ "PF/AF says the sent msg(s) %d were invalid\n",
+ msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ vf->pcifunc = msg->pcifunc;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg);
+ break;
+ case MBOX_MSG_NPA_LF_ALLOC:
+ mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_LF_ALLOC:
+ mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_BP_ENABLE:
+ mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg);
+ break;
+ default:
+ if (msg->rc)
+ dev_err(vf->dev,
+ "Mbox msg response has err %d, ID %d\n",
+ msg->rc, msg->id);
+ }
+}
+
+static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *af_mbox;
+ int offset, id;
+
+ af_mbox = container_of(work, struct mbox, mbox_wrk);
+ mbox = &af_mbox->mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (af_mbox->num_msgs == 0)
+ return;
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < af_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
+ __otx2_mbox_reset(mbox, 0);
+ mdev->msgs_acked++;
+ }
+}
+
+static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
+ struct mbox_msghdr *req)
+{
+ struct msg_rsp *rsp;
+ int err;
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+
+ switch (req->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(
+ &vf->mbox.mbox_up, 0,
+ sizeof(struct msg_rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = 0;
+ rsp->hdr.rc = 0;
+ err = otx2_mbox_up_handler_cgx_link_event(
+ vf, (struct cgx_link_info_msg *)req, rsp);
+ return err;
+ default:
+ otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *vf_mbox;
+ struct otx2_nic *vf;
+ int offset, id;
+
+ vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ vf = vf_mbox->pfvf;
+ mbox = &vf_mbox->mbox_up;
+ mdev = &mbox->dev[0];
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (vf_mbox->up_num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2vf_process_mbox_msg_up(vf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
+{
+ struct otx2_nic *vf = (struct otx2_nic *)vf_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+
+ /* Clear the IRQ */
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs) {
+ vf->mbox.num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+ }
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs) {
+ vf->mbox.up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
+{
+ int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX);
+
+ /* Disable VF => PF mailbox IRQ */
+ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
+ free_irq(vector, vf);
+}
+
+static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
+{
+ struct otx2_hw *hw = &vf->hw;
+ struct msg_req *req;
+ char *irq_name;
+ int err;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ if (err) {
+ dev_err(vf->dev,
+ "RVUPF: IRQ registration failed for VFAF mbox irq\n");
+ return err;
+ }
+
+ /* Enable mailbox interrupt for msgs coming from PF.
+ * First clear to avoid spurious interrupts, if any.
+ */
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+
+ if (!probe_pf)
+ return 0;
+
+ /* Check mailbox communication with PF */
+ req = otx2_mbox_alloc_msg_ready(&vf->mbox);
+ if (!req) {
+ otx2vf_disable_mbox_intr(vf);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&vf->mbox);
+ if (err) {
+ dev_warn(vf->dev,
+ "AF not responding to mailbox, deferring probe\n");
+ otx2vf_disable_mbox_intr(vf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
+{
+ struct mbox *mbox = &vf->mbox;
+
+ if (vf->mbox_wq) {
+ destroy_workqueue(vf->mbox_wq);
+ vf->mbox_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
+ iounmap((void __iomem *)mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+ otx2_mbox_destroy(&mbox->mbox_up);
+}
+
+static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
+{
+ struct mbox *mbox = &vf->mbox;
+ void __iomem *hwbase;
+ int err;
+
+ mbox->pfvf = vf;
+ vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ if (!vf->mbox_wq)
+ return -ENOMEM;
+
+ if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
+ /* For cn10k platform, VF mailbox region is in its BAR2
+ * register space
+ */
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION;
+ } else {
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e PF0) and this VF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(vf->pdev,
+ PCI_MBOX_BAR_NUM),
+ pci_resource_len(vf->pdev,
+ PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
+ MBOX_DIR_VFPF, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base,
+ MBOX_DIR_VFPF_UP, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_bbuf_init(mbox, vf->pdev);
+ if (err)
+ goto exit;
+
+ INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler);
+ mutex_init(&mbox->lock);
+
+ return 0;
+exit:
+ if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
+ iounmap(hwbase);
+ destroy_workqueue(vf->mbox_wq);
+ return err;
+}
+
+static int otx2vf_open(struct net_device *netdev)
+{
+ struct otx2_nic *vf;
+ int err;
+
+ err = otx2_open(netdev);
+ if (err)
+ return err;
+
+ /* LBKs do not receive link events so tell everyone we are up here */
+ vf = netdev_priv(netdev);
+ if (is_otx2_lbkvf(vf->pdev)) {
+ pr_info("%s NIC Link is UP\n", netdev->name);
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ }
+
+ return 0;
+}
+
+static int otx2vf_stop(struct net_device *netdev)
+{
+ return otx2_stop(netdev);
+}
+
+static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int qidx = skb_get_queue_mapping(skb);
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+
+ sq = &vf->qset.sq[qidx];
+ txq = netdev_get_tx_queue(netdev, qidx);
+
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, incase SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void otx2vf_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ queue_work(vf->otx2_wq, &vf->rx_mode_work);
+}
+
+static void otx2vf_do_set_rx_mode(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work);
+ struct net_device *netdev = vf->netdev;
+ unsigned int flags = netdev->flags;
+ struct nix_rx_mode *req;
+
+ mutex_lock(&vf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox);
+ if (!req) {
+ mutex_unlock(&vf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (flags & IFF_PROMISC)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&vf->mbox);
+
+ mutex_unlock(&vf->mbox.lock);
+}
+
+static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool if_up = netif_running(netdev);
+ int err = 0;
+
+ if (if_up)
+ otx2vf_stop(netdev);
+
+ netdev_info(netdev, "Changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (if_up)
+ err = otx2vf_open(netdev);
+
+ return err;
+}
+
+static void otx2vf_reset_task(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task);
+
+ rtnl_lock();
+
+ if (netif_running(vf->netdev)) {
+ otx2vf_stop(vf->netdev);
+ vf->reset_count++;
+ otx2vf_open(vf->netdev);
+ }
+
+ rtnl_unlock();
+}
+
+static int otx2vf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ return otx2_handle_ntuple_tc_features(netdev, features);
+}
+
+static const struct net_device_ops otx2vf_netdev_ops = {
+ .ndo_open = otx2vf_open,
+ .ndo_stop = otx2vf_stop,
+ .ndo_start_xmit = otx2vf_xmit,
+ .ndo_select_queue = otx2_select_queue,
+ .ndo_set_rx_mode = otx2vf_set_rx_mode,
+ .ndo_set_mac_address = otx2_set_mac_address,
+ .ndo_change_mtu = otx2vf_change_mtu,
+ .ndo_set_features = otx2vf_set_features,
+ .ndo_get_stats64 = otx2_get_stats64,
+ .ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_eth_ioctl = otx2_ioctl,
+ .ndo_setup_tc = otx2_setup_tc,
+};
+
+static int otx2_wq_init(struct otx2_nic *vf)
+{
+ vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
+ if (!vf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode);
+ INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ return 0;
+}
+
+static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
+{
+ struct otx2_hw *hw = &vf->hw;
+ int num_vec, err;
+
+ num_vec = hw->nix_msixoff;
+ num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+
+ otx2vf_disable_mbox_intr(vf);
+ pci_free_irq_vectors(hw->pdev);
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ return otx2vf_register_mbox_intr(vf, false);
+}
+
+static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int num_vec = pci_msix_vec_count(pdev);
+ struct device *dev = &pdev->dev;
+ int err, qcount, qos_txqs;
+ struct net_device *netdev;
+ struct otx2_nic *vf;
+ struct otx2_hw *hw;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ qcount = num_online_cpus();
+ qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
+ netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ vf = netdev_priv(netdev);
+ vf->netdev = netdev;
+ vf->pdev = pdev;
+ vf->dev = dev;
+ vf->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ vf->flags |= OTX2_FLAG_INTF_DOWN;
+ hw = &vf->hw;
+ hw->pdev = vf->pdev;
+ hw->rx_queues = qcount;
+ hw->tx_queues = qcount;
+ hw->max_queues = qcount;
+ hw->non_qos_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
+
+ hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
+ GFP_KERNEL);
+ if (!hw->irq_name) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
+ sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!hw->affinity_mask) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, num_vec);
+ goto err_free_netdev;
+ }
+
+ vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!vf->reg_base) {
+ dev_err(dev, "Unable to map physical function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_free_irq_vectors;
+ }
+
+ otx2_setup_dev_hw_settings(vf);
+ /* Init VF <=> PF mailbox stuff */
+ err = otx2vf_vfaf_mbox_init(vf);
+ if (err)
+ goto err_free_irq_vectors;
+
+ /* Register mailbox interrupt */
+ err = otx2vf_register_mbox_intr(vf, true);
+ if (err)
+ goto err_mbox_destroy;
+
+ /* Request AF to attach NPA and LIX LFs to this AF */
+ err = otx2_attach_npa_nix(vf);
+ if (err)
+ goto err_disable_mbox_intr;
+
+ err = otx2vf_realloc_msix_vectors(vf);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = otx2_set_real_num_queues(netdev, qcount, qcount);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = cn10k_lmtst_init(vf);
+ if (err)
+ goto err_detach_rsrc;
+
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(vf);
+
+ /* Assign default mac address */
+ otx2_get_mac_from_af(netdev);
+
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
+ netdev->features = netdev->hw_features;
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= NETIF_F_HW_TC;
+
+ netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
+ netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+
+ netdev->netdev_ops = &otx2vf_netdev_ops;
+
+ netdev->min_mtu = OTX2_MIN_MTU;
+ netdev->max_mtu = otx2_get_max_mtu(vf);
+
+ /* To distinguish, for LBK VFs set netdev name explicitly */
+ if (is_otx2_lbkvf(vf->pdev)) {
+ int n;
+
+ n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK;
+ /* Need to subtract 1 to get proper VF number */
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_ptp_destroy;
+ }
+
+ err = otx2_wq_init(vf);
+ if (err)
+ goto err_unreg_netdev;
+
+ otx2vf_set_ethtool_ops(netdev);
+
+ err = otx2vf_mcam_flow_init(vf);
+ if (err)
+ goto err_unreg_netdev;
+
+ err = otx2_init_tc(vf);
+ if (err)
+ goto err_unreg_netdev;
+
+ err = otx2_register_dl(vf);
+ if (err)
+ goto err_shutdown_tc;
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_shutdown_tc;
+#endif
+ otx2_qos_init(vf, qos_txqs);
+
+ return 0;
+
+err_shutdown_tc:
+ otx2_shutdown_tc(vf);
+err_unreg_netdev:
+ unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(vf);
+err_detach_rsrc:
+ free_percpu(vf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
+ otx2_detach_resources(&vf->mbox);
+err_disable_mbox_intr:
+ otx2vf_disable_mbox_intr(vf);
+err_mbox_destroy:
+ otx2vf_vfaf_mbox_destroy(vf);
+err_free_irq_vectors:
+ pci_free_irq_vectors(hw->pdev);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+ return err;
+}
+
+static void otx2vf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *vf;
+
+ if (!netdev)
+ return;
+
+ vf = netdev_priv(netdev);
+
+ /* Disable 802.3x pause frames */
+ if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(vf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (vf->pfc_en) {
+ vf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(vf);
+ }
+#endif
+
+ cancel_work_sync(&vf->reset_task);
+ otx2_unregister_dl(vf);
+ unregister_netdev(netdev);
+ if (vf->otx2_wq)
+ destroy_workqueue(vf->otx2_wq);
+ otx2_ptp_destroy(vf);
+ otx2_mcam_flow_del(vf);
+ otx2_shutdown_tc(vf);
+ otx2_shutdown_qos(vf);
+ otx2vf_disable_mbox_intr(vf);
+ otx2_detach_resources(&vf->mbox);
+ free_percpu(vf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
+ otx2vf_vfaf_mbox_destroy(vf);
+ pci_free_irq_vectors(vf->pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver otx2vf_driver = {
+ .name = DRV_NAME,
+ .id_table = otx2_vf_id_table,
+ .probe = otx2vf_probe,
+ .remove = otx2vf_remove,
+ .shutdown = otx2vf_remove,
+};
+
+static int __init otx2vf_init_module(void)
+{
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ return pci_register_driver(&otx2vf_driver);
+}
+
+static void __exit otx2vf_cleanup_module(void)
+{
+ pci_unregister_driver(&otx2vf_driver);
+}
+
+module_init(otx2vf_init_module);
+module_exit(otx2vf_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
new file mode 100644
index 000000000..1e77bbf5d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -0,0 +1,1689 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2023 Marvell.
+ *
+ */
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/bitfield.h>
+
+#include "otx2_common.h"
+#include "cn10k.h"
+#include "qos.h"
+
+#define OTX2_QOS_QID_INNER 0xFFFFU
+#define OTX2_QOS_QID_NONE 0xFFFEU
+#define OTX2_QOS_ROOT_CLASSID 0xFFFFFFFF
+#define OTX2_QOS_CLASS_NONE 0
+#define OTX2_QOS_DEFAULT_PRIO 0xF
+#define OTX2_QOS_INVALID_SQ 0xFFFF
+#define OTX2_QOS_INVALID_TXSCHQ_IDX 0xFFFF
+#define CN10K_MAX_RR_WEIGHT GENMASK_ULL(13, 0)
+#define OTX2_MAX_RR_QUANTUM GENMASK_ULL(23, 0)
+
+static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int tx_queues, qos_txqs, err;
+
+ qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap,
+ OTX2_QOS_MAX_LEAF_NODES);
+
+ tx_queues = hw->tx_queues + qos_txqs;
+
+ err = netif_set_real_num_tx_queues(pfvf->netdev, tx_queues);
+ if (err) {
+ netdev_err(pfvf->netdev,
+ "Failed to set no of Tx queues: %d\n", tx_queues);
+ return;
+ }
+}
+
+static void otx2_qos_get_regaddr(struct otx2_qos_node *node,
+ struct nix_txschq_config *cfg,
+ int index)
+{
+ if (node->level == NIX_TXSCH_LVL_SMQ) {
+ cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq);
+ cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq);
+ cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq);
+ cfg->reg[index] = NIX_AF_MDQX_CIR(node->schq);
+ } else if (node->level == NIX_TXSCH_LVL_TL4) {
+ cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq);
+ cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq);
+ cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq);
+ cfg->reg[index] = NIX_AF_TL4X_CIR(node->schq);
+ } else if (node->level == NIX_TXSCH_LVL_TL3) {
+ cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq);
+ cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq);
+ cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq);
+ cfg->reg[index] = NIX_AF_TL3X_CIR(node->schq);
+ } else if (node->level == NIX_TXSCH_LVL_TL2) {
+ cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq);
+ cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq);
+ cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq);
+ cfg->reg[index] = NIX_AF_TL2X_CIR(node->schq);
+ }
+}
+
+static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum)
+{
+ u32 weight;
+
+ weight = quantum / pfvf->hw.dwrr_mtu;
+ if (quantum % pfvf->hw.dwrr_mtu)
+ weight += 1;
+
+ return weight;
+}
+
+static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct nix_txschq_config *cfg,
+ int *num_regs)
+{
+ u32 rr_weight;
+ u32 quantum;
+ u64 maxrate;
+
+ otx2_qos_get_regaddr(node, cfg, *num_regs);
+
+ /* configure parent txschq */
+ cfg->regval[*num_regs] = node->parent->schq << 16;
+ (*num_regs)++;
+
+ /* configure prio/quantum */
+ if (node->qid == OTX2_QOS_QID_NONE) {
+ cfg->regval[*num_regs] = node->prio << 24 |
+ mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+ (*num_regs)++;
+ return;
+ }
+
+ /* configure priority/quantum */
+ if (node->is_static) {
+ cfg->regval[*num_regs] =
+ (node->schq - node->parent->prio_anchor) << 24;
+ } else {
+ quantum = node->quantum ?
+ node->quantum : pfvf->tx_max_pktlen;
+ rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
+ cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 |
+ rr_weight;
+ }
+ (*num_regs)++;
+
+ /* configure PIR */
+ maxrate = (node->rate > node->ceil) ? node->rate : node->ceil;
+
+ cfg->regval[*num_regs] =
+ otx2_get_txschq_rate_regval(pfvf, maxrate, 65536);
+ (*num_regs)++;
+
+ /* Don't configure CIR when both CIR+PIR not supported
+ * On 96xx, CIR + PIR + RED_ALGO=STALL causes deadlock
+ */
+ if (!test_bit(QOS_CIR_PIR_SUPPORT, &pfvf->hw.cap_flag))
+ return;
+
+ cfg->regval[*num_regs] =
+ otx2_get_txschq_rate_regval(pfvf, node->rate, 65536);
+ (*num_regs)++;
+}
+
+static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct nix_txschq_config *cfg)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int num_regs = 0;
+ u8 level;
+
+ level = node->level;
+
+ /* program txschq registers */
+ if (level == NIX_TXSCH_LVL_SMQ) {
+ cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq);
+ cfg->regval[num_regs] = ((u64)pfvf->tx_max_pktlen << 8) |
+ OTX2_MIN_MTU;
+ cfg->regval[num_regs] |= (0x20ULL << 51) | (0x80ULL << 39) |
+ (0x2ULL << 36);
+ num_regs++;
+
+ otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
+
+ } else if (level == NIX_TXSCH_LVL_TL4) {
+ otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
+ } else if (level == NIX_TXSCH_LVL_TL3) {
+ /* configure link cfg */
+ if (level == pfvf->qos.link_cfg_lvl) {
+ cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
+ cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
+ num_regs++;
+ }
+
+ otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
+ } else if (level == NIX_TXSCH_LVL_TL2) {
+ /* configure link cfg */
+ if (level == pfvf->qos.link_cfg_lvl) {
+ cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
+ cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
+ num_regs++;
+ }
+
+ /* check if node is root */
+ if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
+ cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
+ cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 |
+ mtu_to_dwrr_weight(pfvf,
+ pfvf->tx_max_pktlen);
+ num_regs++;
+ goto txschq_cfg_out;
+ }
+
+ otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
+ }
+
+txschq_cfg_out:
+ cfg->num_regs = num_regs;
+}
+
+static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf,
+ struct otx2_qos_node *parent)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct nix_txschq_config *cfg;
+ int rc;
+
+ if (parent->level == NIX_TXSCH_LVL_MDQ)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+
+ cfg = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!cfg) {
+ mutex_unlock(&mbox->lock);
+ return -ENOMEM;
+ }
+
+ cfg->lvl = parent->level;
+
+ if (parent->level == NIX_TXSCH_LVL_TL4)
+ cfg->reg[0] = NIX_AF_TL4X_TOPOLOGY(parent->schq);
+ else if (parent->level == NIX_TXSCH_LVL_TL3)
+ cfg->reg[0] = NIX_AF_TL3X_TOPOLOGY(parent->schq);
+ else if (parent->level == NIX_TXSCH_LVL_TL2)
+ cfg->reg[0] = NIX_AF_TL2X_TOPOLOGY(parent->schq);
+ else if (parent->level == NIX_TXSCH_LVL_TL1)
+ cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq);
+
+ cfg->regval[0] = (u64)parent->prio_anchor << 32;
+ cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ?
+ parent->child_dwrr_prio : 0) << 1;
+ cfg->num_regs++;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+
+ mutex_unlock(&mbox->lock);
+
+ return rc;
+}
+
+static void otx2_qos_free_hw_node_schq(struct otx2_nic *pfvf,
+ struct otx2_qos_node *parent)
+{
+ struct otx2_qos_node *node;
+
+ list_for_each_entry_reverse(node, &parent->child_schq_list, list)
+ otx2_txschq_free_one(pfvf, node->level, node->schq);
+}
+
+static void otx2_qos_free_hw_node(struct otx2_nic *pfvf,
+ struct otx2_qos_node *parent)
+{
+ struct otx2_qos_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
+ otx2_qos_free_hw_node(pfvf, node);
+ otx2_qos_free_hw_node_schq(pfvf, node);
+ otx2_txschq_free_one(pfvf, node->level, node->schq);
+ }
+}
+
+static void otx2_qos_free_hw_cfg(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ mutex_lock(&pfvf->qos.qos_lock);
+
+ /* free child node hw mappings */
+ otx2_qos_free_hw_node(pfvf, node);
+ otx2_qos_free_hw_node_schq(pfvf, node);
+
+ /* free node hw mappings */
+ otx2_txschq_free_one(pfvf, node->level, node->schq);
+
+ mutex_unlock(&pfvf->qos.qos_lock);
+}
+
+static void otx2_qos_sw_node_delete(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ hash_del_rcu(&node->hlist);
+
+ if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) {
+ __clear_bit(node->qid, pfvf->qos.qos_sq_bmap);
+ otx2_qos_update_tx_netdev_queues(pfvf);
+ }
+
+ list_del(&node->list);
+ kfree(node);
+}
+
+static void otx2_qos_free_sw_node_schq(struct otx2_nic *pfvf,
+ struct otx2_qos_node *parent)
+{
+ struct otx2_qos_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+}
+
+static void __otx2_qos_free_sw_node(struct otx2_nic *pfvf,
+ struct otx2_qos_node *parent)
+{
+ struct otx2_qos_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
+ __otx2_qos_free_sw_node(pfvf, node);
+ otx2_qos_free_sw_node_schq(pfvf, node);
+ otx2_qos_sw_node_delete(pfvf, node);
+ }
+}
+
+static void otx2_qos_free_sw_node(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ mutex_lock(&pfvf->qos.qos_lock);
+
+ __otx2_qos_free_sw_node(pfvf, node);
+ otx2_qos_free_sw_node_schq(pfvf, node);
+ otx2_qos_sw_node_delete(pfvf, node);
+
+ mutex_unlock(&pfvf->qos.qos_lock);
+}
+
+static void otx2_qos_destroy_node(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ otx2_qos_free_hw_cfg(pfvf, node);
+ otx2_qos_free_sw_node(pfvf, node);
+}
+
+static void otx2_qos_fill_cfg_schq(struct otx2_qos_node *parent,
+ struct otx2_qos_cfg *cfg)
+{
+ struct otx2_qos_node *node;
+
+ list_for_each_entry(node, &parent->child_schq_list, list)
+ cfg->schq[node->level]++;
+}
+
+static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent,
+ struct otx2_qos_cfg *cfg)
+{
+ struct otx2_qos_node *node;
+
+ list_for_each_entry(node, &parent->child_list, list) {
+ otx2_qos_fill_cfg_tl(node, cfg);
+ otx2_qos_fill_cfg_schq(node, cfg);
+ }
+
+ /* Assign the required number of transmit schedular queues under the
+ * given class
+ */
+ cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt +
+ parent->max_static_prio + 1;
+}
+
+static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf,
+ struct otx2_qos_node *parent,
+ struct otx2_qos_cfg *cfg)
+{
+ mutex_lock(&pfvf->qos.qos_lock);
+ otx2_qos_fill_cfg_tl(parent, cfg);
+ mutex_unlock(&pfvf->qos.qos_lock);
+}
+
+static void otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node *parent,
+ struct otx2_qos_cfg *cfg)
+{
+ struct otx2_qos_node *node;
+ int cnt;
+
+ list_for_each_entry(node, &parent->child_schq_list, list) {
+ cnt = cfg->dwrr_node_pos[node->level];
+ cfg->schq_list[node->level][cnt] = node->schq;
+ cfg->schq[node->level]++;
+ cfg->dwrr_node_pos[node->level]++;
+ }
+}
+
+static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
+ struct otx2_qos_cfg *cfg)
+{
+ struct otx2_qos_node *node;
+ int cnt;
+
+ list_for_each_entry(node, &parent->child_list, list) {
+ otx2_qos_read_txschq_cfg_tl(node, cfg);
+ cnt = cfg->static_node_pos[node->level];
+ cfg->schq_contig_list[node->level][cnt] = node->schq;
+ cfg->schq_contig[node->level]++;
+ cfg->static_node_pos[node->level]++;
+ otx2_qos_read_txschq_cfg_schq(node, cfg);
+ }
+}
+
+static void otx2_qos_read_txschq_cfg(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct otx2_qos_cfg *cfg)
+{
+ mutex_lock(&pfvf->qos.qos_lock);
+ otx2_qos_read_txschq_cfg_tl(node, cfg);
+ mutex_unlock(&pfvf->qos.qos_lock);
+}
+
+static struct otx2_qos_node *
+otx2_qos_alloc_root(struct otx2_nic *pfvf)
+{
+ struct otx2_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->parent = NULL;
+ if (!is_otx2_vf(pfvf->pcifunc)) {
+ node->level = NIX_TXSCH_LVL_TL1;
+ } else {
+ node->level = NIX_TXSCH_LVL_TL2;
+ node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ }
+
+ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
+ node->classid = OTX2_QOS_ROOT_CLASSID;
+
+ hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid);
+ list_add_tail(&node->list, &pfvf->qos.qos_tree);
+ INIT_LIST_HEAD(&node->child_list);
+ INIT_LIST_HEAD(&node->child_schq_list);
+
+ return node;
+}
+
+static int otx2_qos_add_child_node(struct otx2_qos_node *parent,
+ struct otx2_qos_node *node)
+{
+ struct list_head *head = &parent->child_list;
+ struct otx2_qos_node *tmp_node;
+ struct list_head *tmp;
+
+ if (node->prio > parent->max_static_prio)
+ parent->max_static_prio = node->prio;
+
+ for (tmp = head->next; tmp != head; tmp = tmp->next) {
+ tmp_node = list_entry(tmp, struct otx2_qos_node, list);
+ if (tmp_node->prio == node->prio &&
+ tmp_node->is_static)
+ return -EEXIST;
+ if (tmp_node->prio > node->prio) {
+ list_add_tail(&node->list, tmp);
+ return 0;
+ }
+ }
+
+ list_add_tail(&node->list, head);
+ return 0;
+}
+
+static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ struct otx2_qos_node *txschq_node, *parent, *tmp;
+ int lvl;
+
+ parent = node;
+ for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) {
+ txschq_node = kzalloc(sizeof(*txschq_node), GFP_KERNEL);
+ if (!txschq_node)
+ goto err_out;
+
+ txschq_node->parent = parent;
+ txschq_node->level = lvl;
+ txschq_node->classid = OTX2_QOS_CLASS_NONE;
+ WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE);
+ txschq_node->rate = 0;
+ txschq_node->ceil = 0;
+ txschq_node->prio = 0;
+ txschq_node->quantum = 0;
+ txschq_node->is_static = true;
+ txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
+
+ mutex_lock(&pfvf->qos.qos_lock);
+ list_add_tail(&txschq_node->list, &node->child_schq_list);
+ mutex_unlock(&pfvf->qos.qos_lock);
+
+ INIT_LIST_HEAD(&txschq_node->child_list);
+ INIT_LIST_HEAD(&txschq_node->child_schq_list);
+ parent = txschq_node;
+ }
+
+ return 0;
+
+err_out:
+ list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list,
+ list) {
+ list_del(&txschq_node->list);
+ kfree(txschq_node);
+ }
+ return -ENOMEM;
+}
+
+static struct otx2_qos_node *
+otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
+ struct otx2_qos_node *parent,
+ u16 classid, u32 prio, u64 rate, u64 ceil,
+ u32 quantum, u16 qid, bool static_cfg)
+{
+ struct otx2_qos_node *node;
+ int err;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->parent = parent;
+ node->level = parent->level - 1;
+ node->classid = classid;
+ WRITE_ONCE(node->qid, qid);
+
+ node->rate = otx2_convert_rate(rate);
+ node->ceil = otx2_convert_rate(ceil);
+ node->prio = prio;
+ node->quantum = quantum;
+ node->is_static = static_cfg;
+ node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
+
+ __set_bit(qid, pfvf->qos.qos_sq_bmap);
+
+ hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid);
+
+ mutex_lock(&pfvf->qos.qos_lock);
+ err = otx2_qos_add_child_node(parent, node);
+ if (err) {
+ mutex_unlock(&pfvf->qos.qos_lock);
+ return ERR_PTR(err);
+ }
+ mutex_unlock(&pfvf->qos.qos_lock);
+
+ INIT_LIST_HEAD(&node->child_list);
+ INIT_LIST_HEAD(&node->child_schq_list);
+
+ err = otx2_qos_alloc_txschq_node(pfvf, node);
+ if (err) {
+ otx2_qos_sw_node_delete(pfvf, node);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return node;
+}
+
+static struct otx2_qos_node *
+otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid)
+{
+ struct otx2_qos_node *node = NULL;
+
+ hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static struct otx2_qos_node *
+otx2_sw_node_find_rcu(struct otx2_nic *pfvf, u32 classid)
+{
+ struct otx2_qos_node *node = NULL;
+
+ hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid)
+{
+ struct otx2_qos_node *node;
+ u16 qid;
+ int res;
+
+ node = otx2_sw_node_find_rcu(pfvf, classid);
+ if (!node) {
+ res = -ENOENT;
+ goto out;
+ }
+ qid = READ_ONCE(node->qid);
+ if (qid == OTX2_QOS_QID_INNER) {
+ res = -EINVAL;
+ goto out;
+ }
+ res = pfvf->hw.tx_queues + qid;
+out:
+ return res;
+}
+
+static int
+otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct nix_txschq_config *req;
+ int rc;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = node->level;
+ __otx2_qos_txschq_cfg(pfvf, node, req);
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+
+ mutex_unlock(&mbox->lock);
+
+ return rc;
+}
+
+static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf,
+ struct otx2_qos_cfg *cfg)
+{
+ struct nix_txsch_alloc_req *req;
+ struct nix_txsch_alloc_rsp *rsp;
+ struct mbox *mbox = &pfvf->mbox;
+ int lvl, rc, schq;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return -ENOMEM;
+ }
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ req->schq[lvl] = cfg->schq[lvl];
+ req->schq_contig[lvl] = cfg->schq_contig[lvl];
+ }
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc) {
+ mutex_unlock(&mbox->lock);
+ return rc;
+ }
+
+ rsp = (struct nix_txsch_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+
+ if (IS_ERR(rsp)) {
+ rc = PTR_ERR(rsp);
+ goto out;
+ }
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (schq = 0; schq < rsp->schq_contig[lvl]; schq++) {
+ cfg->schq_contig_list[lvl][schq] =
+ rsp->schq_contig_list[lvl][schq];
+ }
+ }
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (schq = 0; schq < rsp->schq[lvl]; schq++) {
+ cfg->schq_list[lvl][schq] =
+ rsp->schq_list[lvl][schq];
+ }
+ }
+
+ pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl;
+ pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
+
+out:
+ mutex_unlock(&mbox->lock);
+ return rc;
+}
+
+static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf,
+ struct otx2_qos_cfg *cfg)
+{
+ int lvl, idx, schq;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
+ if (!cfg->schq_index_used[lvl][idx]) {
+ schq = cfg->schq_contig_list[lvl][idx];
+ otx2_txschq_free_one(pfvf, lvl, schq);
+ }
+ }
+ }
+}
+
+static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct otx2_qos_cfg *cfg)
+{
+ struct otx2_qos_node *tmp;
+ int cnt;
+
+ list_for_each_entry(tmp, &node->child_schq_list, list) {
+ cnt = cfg->dwrr_node_pos[tmp->level];
+ tmp->schq = cfg->schq_list[tmp->level][cnt];
+ cfg->dwrr_node_pos[tmp->level]++;
+ }
+}
+
+static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct otx2_qos_cfg *cfg)
+{
+ struct otx2_qos_node *tmp;
+ int cnt;
+
+ list_for_each_entry(tmp, &node->child_list, list) {
+ otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg);
+ cnt = cfg->static_node_pos[tmp->level];
+ tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx];
+ cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true;
+ if (cnt == 0)
+ node->prio_anchor =
+ cfg->schq_contig_list[tmp->level][0];
+ cfg->static_node_pos[tmp->level]++;
+ otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg);
+ }
+}
+
+static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct otx2_qos_cfg *cfg)
+{
+ mutex_lock(&pfvf->qos.qos_lock);
+ otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg);
+ otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg);
+ otx2_qos_free_unused_txschq(pfvf, cfg);
+ mutex_unlock(&pfvf->qos.qos_lock);
+}
+
+static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
+ struct otx2_qos_node *tmp,
+ unsigned long *child_idx_bmap,
+ int child_cnt)
+{
+ int idx;
+
+ if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX)
+ return;
+
+ /* assign static nodes 1:1 prio mapping first, then remaining nodes */
+ for (idx = 0; idx < child_cnt; idx++) {
+ if (tmp->is_static && tmp->prio == idx &&
+ !test_bit(idx, child_idx_bmap)) {
+ tmp->txschq_idx = idx;
+ set_bit(idx, child_idx_bmap);
+ return;
+ } else if (!tmp->is_static && idx >= tmp->prio &&
+ !test_bit(idx, child_idx_bmap)) {
+ tmp->txschq_idx = idx;
+ set_bit(idx, child_idx_bmap);
+ return;
+ }
+ }
+}
+
+static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ unsigned long *child_idx_bmap;
+ struct otx2_qos_node *tmp;
+ int child_cnt;
+
+ list_for_each_entry(tmp, &node->child_list, list)
+ tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
+
+ /* allocate child index array */
+ child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1;
+ child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!child_idx_bmap)
+ return -ENOMEM;
+
+ list_for_each_entry(tmp, &node->child_list, list)
+ otx2_qos_assign_base_idx_tl(pfvf, tmp);
+
+ /* assign base index of static priority children first */
+ list_for_each_entry(tmp, &node->child_list, list) {
+ if (!tmp->is_static)
+ continue;
+ __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
+ child_cnt);
+ }
+
+ /* assign base index of dwrr priority children */
+ list_for_each_entry(tmp, &node->child_list, list)
+ __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
+ child_cnt);
+
+ kfree(child_idx_bmap);
+
+ return 0;
+}
+
+static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ int ret = 0;
+
+ mutex_lock(&pfvf->qos.qos_lock);
+ ret = otx2_qos_assign_base_idx_tl(pfvf, node);
+ mutex_unlock(&pfvf->qos.qos_lock);
+
+ return ret;
+}
+
+static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct otx2_qos_cfg *cfg)
+{
+ struct otx2_qos_node *tmp;
+ int ret;
+
+ list_for_each_entry(tmp, &node->child_schq_list, list) {
+ ret = otx2_qos_txschq_config(pfvf, tmp);
+ if (ret)
+ return -EIO;
+ ret = otx2_qos_txschq_set_parent_topology(pfvf, tmp->parent);
+ if (ret)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int otx2_qos_txschq_push_cfg_tl(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct otx2_qos_cfg *cfg)
+{
+ struct otx2_qos_node *tmp;
+ int ret;
+
+ list_for_each_entry(tmp, &node->child_list, list) {
+ ret = otx2_qos_txschq_push_cfg_tl(pfvf, tmp, cfg);
+ if (ret)
+ return -EIO;
+ ret = otx2_qos_txschq_config(pfvf, tmp);
+ if (ret)
+ return -EIO;
+ ret = otx2_qos_txschq_push_cfg_schq(pfvf, tmp, cfg);
+ if (ret)
+ return -EIO;
+ }
+
+ ret = otx2_qos_txschq_set_parent_topology(pfvf, node);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+static int otx2_qos_txschq_push_cfg(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct otx2_qos_cfg *cfg)
+{
+ int ret;
+
+ mutex_lock(&pfvf->qos.qos_lock);
+ ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg);
+ if (ret)
+ goto out;
+ ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg);
+out:
+ mutex_unlock(&pfvf->qos.qos_lock);
+ return ret;
+}
+
+static int otx2_qos_txschq_update_config(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct otx2_qos_cfg *cfg)
+{
+ otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
+
+ return otx2_qos_txschq_push_cfg(pfvf, node, cfg);
+}
+
+static int otx2_qos_txschq_update_root_cfg(struct otx2_nic *pfvf,
+ struct otx2_qos_node *root,
+ struct otx2_qos_cfg *cfg)
+{
+ root->schq = cfg->schq_list[root->level][0];
+ return otx2_qos_txschq_config(pfvf, root);
+}
+
+static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg)
+{
+ int lvl, idx, schq;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (idx = 0; idx < cfg->schq[lvl]; idx++) {
+ schq = cfg->schq_list[lvl][idx];
+ otx2_txschq_free_one(pfvf, lvl, schq);
+ }
+ }
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
+ if (cfg->schq_index_used[lvl][idx]) {
+ schq = cfg->schq_contig_list[lvl][idx];
+ otx2_txschq_free_one(pfvf, lvl, schq);
+ }
+ }
+ }
+}
+
+static void otx2_qos_enadis_sq(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ u16 qid)
+{
+ if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ)
+ otx2_qos_disable_sq(pfvf, qid);
+
+ pfvf->qos.qid_to_sqmap[qid] = node->schq;
+ otx2_qos_enable_sq(pfvf, qid);
+}
+
+static void otx2_qos_update_smq_schq(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ bool action)
+{
+ struct otx2_qos_node *tmp;
+
+ if (node->qid == OTX2_QOS_QID_INNER)
+ return;
+
+ list_for_each_entry(tmp, &node->child_schq_list, list) {
+ if (tmp->level == NIX_TXSCH_LVL_MDQ) {
+ if (action == QOS_SMQ_FLUSH)
+ otx2_smq_flush(pfvf, tmp->schq);
+ else
+ otx2_qos_enadis_sq(pfvf, tmp, node->qid);
+ }
+ }
+}
+
+static void __otx2_qos_update_smq(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ bool action)
+{
+ struct otx2_qos_node *tmp;
+
+ list_for_each_entry(tmp, &node->child_list, list) {
+ __otx2_qos_update_smq(pfvf, tmp, action);
+ if (tmp->qid == OTX2_QOS_QID_INNER)
+ continue;
+ if (tmp->level == NIX_TXSCH_LVL_MDQ) {
+ if (action == QOS_SMQ_FLUSH)
+ otx2_smq_flush(pfvf, tmp->schq);
+ else
+ otx2_qos_enadis_sq(pfvf, tmp, tmp->qid);
+ } else {
+ otx2_qos_update_smq_schq(pfvf, tmp, action);
+ }
+ }
+}
+
+static void otx2_qos_update_smq(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ bool action)
+{
+ mutex_lock(&pfvf->qos.qos_lock);
+ __otx2_qos_update_smq(pfvf, node, action);
+ otx2_qos_update_smq_schq(pfvf, node, action);
+ mutex_unlock(&pfvf->qos.qos_lock);
+}
+
+static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct otx2_qos_cfg *cfg)
+{
+ int ret;
+
+ ret = otx2_qos_txschq_alloc(pfvf, cfg);
+ if (ret)
+ return -ENOSPC;
+
+ ret = otx2_qos_assign_base_idx(pfvf, node);
+ if (ret)
+ return -ENOMEM;
+
+ if (!(pfvf->netdev->flags & IFF_UP)) {
+ otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
+ return 0;
+ }
+
+ ret = otx2_qos_txschq_update_config(pfvf, node, cfg);
+ if (ret) {
+ otx2_qos_free_cfg(pfvf, cfg);
+ return -EIO;
+ }
+
+ otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
+
+ return 0;
+}
+
+static int otx2_qos_update_tree(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node,
+ struct otx2_qos_cfg *cfg)
+{
+ otx2_qos_prepare_txschq_cfg(pfvf, node->parent, cfg);
+ return otx2_qos_push_txschq_cfg(pfvf, node->parent, cfg);
+}
+
+static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_qos_cfg *new_cfg;
+ struct otx2_qos_node *root;
+ int err;
+
+ netdev_dbg(pfvf->netdev,
+ "TC_HTB_CREATE: handle=0x%x defcls=0x%x\n",
+ htb_maj_id, htb_defcls);
+
+ root = otx2_qos_alloc_root(pfvf);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ return err;
+ }
+
+ /* allocate txschq queue */
+ new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
+ if (!new_cfg) {
+ NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
+ err = -ENOMEM;
+ goto free_root_node;
+ }
+ /* allocate htb root node */
+ new_cfg->schq[root->level] = 1;
+ err = otx2_qos_txschq_alloc(pfvf, new_cfg);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error allocating txschq");
+ goto free_root_node;
+ }
+
+ /* Update TL1 RR PRIO */
+ if (root->level == NIX_TXSCH_LVL_TL1) {
+ root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio;
+ netdev_dbg(pfvf->netdev,
+ "TL1 DWRR Priority %d\n", root->child_dwrr_prio);
+ }
+
+ if (!(pfvf->netdev->flags & IFF_UP) ||
+ root->level == NIX_TXSCH_LVL_TL1) {
+ root->schq = new_cfg->schq_list[root->level][0];
+ goto out;
+ }
+
+ /* update the txschq configuration in hw */
+ err = otx2_qos_txschq_update_root_cfg(pfvf, root, new_cfg);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error updating txschq configuration");
+ goto txschq_free;
+ }
+
+out:
+ WRITE_ONCE(pfvf->qos.defcls, htb_defcls);
+ /* Pairs with smp_load_acquire() in ndo_select_queue */
+ smp_store_release(&pfvf->qos.maj_id, htb_maj_id);
+ kfree(new_cfg);
+ return 0;
+
+txschq_free:
+ otx2_qos_free_cfg(pfvf, new_cfg);
+free_root_node:
+ kfree(new_cfg);
+ otx2_qos_sw_node_delete(pfvf, root);
+ return err;
+}
+
+static int otx2_qos_root_destroy(struct otx2_nic *pfvf)
+{
+ struct otx2_qos_node *root;
+
+ netdev_dbg(pfvf->netdev, "TC_HTB_DESTROY\n");
+
+ /* find root node */
+ root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
+ if (!root)
+ return -ENOENT;
+
+ /* free the hw mappings */
+ otx2_qos_destroy_node(pfvf, root);
+
+ return 0;
+}
+
+static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum)
+{
+ u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
+ int err = 0;
+
+ /* Max Round robin weight supported by octeontx2 and CN10K
+ * is different. Validate accordingly
+ */
+ if (is_dev_otx2(pfvf->pdev))
+ err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0;
+ else if (rr_weight > CN10K_MAX_RR_WEIGHT)
+ err = -EINVAL;
+
+ return err;
+}
+
+static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent,
+ struct netlink_ext_ack *extack,
+ struct otx2_nic *pfvf,
+ u64 prio, u64 quantum)
+{
+ int err;
+
+ err = otx2_qos_validate_quantum(pfvf, quantum);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value");
+ return err;
+ }
+
+ if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) {
+ parent->child_dwrr_prio = prio;
+ } else if (prio != parent->child_dwrr_prio) {
+ NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_qos_validate_configuration(struct otx2_qos_node *parent,
+ struct netlink_ext_ack *extack,
+ struct otx2_nic *pfvf,
+ u64 prio, bool static_cfg)
+{
+ if (prio == parent->child_dwrr_prio && static_cfg) {
+ NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists");
+ return -EEXIST;
+ }
+
+ if (static_cfg && test_bit(prio, parent->prio_bmap)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Static priority child with same priority exists");
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio)
+{
+ /* For PF, root node dwrr priority is static */
+ if (parent->level == NIX_TXSCH_LVL_TL1)
+ return;
+
+ if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) {
+ parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ clear_bit(prio, parent->prio_bmap);
+ }
+}
+
+static bool is_qos_node_dwrr(struct otx2_qos_node *parent,
+ struct otx2_nic *pfvf,
+ u64 prio)
+{
+ struct otx2_qos_node *node;
+ bool ret = false;
+
+ if (parent->child_dwrr_prio == prio)
+ return true;
+
+ mutex_lock(&pfvf->qos.qos_lock);
+ list_for_each_entry(node, &parent->child_list, list) {
+ if (prio == node->prio) {
+ if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO &&
+ parent->child_dwrr_prio != prio)
+ continue;
+
+ if (otx2_qos_validate_quantum(pfvf, node->quantum)) {
+ netdev_err(pfvf->netdev,
+ "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d",
+ node->classid, node->quantum,
+ node->prio);
+ break;
+ }
+ /* mark old node as dwrr */
+ node->is_static = false;
+ parent->child_dwrr_cnt++;
+ parent->child_static_cnt--;
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&pfvf->qos.qos_lock);
+
+ return ret;
+}
+
+static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ u64 prio, u32 quantum,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_qos_cfg *old_cfg, *new_cfg;
+ struct otx2_qos_node *node, *parent;
+ int qid, ret, err;
+ bool static_cfg;
+
+ netdev_dbg(pfvf->netdev,
+ "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n",
+ classid, parent_classid, rate, ceil, prio, quantum);
+
+ if (prio > OTX2_QOS_MAX_PRIO) {
+ NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!quantum || quantum > INT_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* get parent node */
+ parent = otx2_sw_node_find(pfvf, parent_classid);
+ if (!parent) {
+ NL_SET_ERR_MSG_MOD(extack, "parent node not found");
+ ret = -ENOENT;
+ goto out;
+ }
+ if (parent->level == NIX_TXSCH_LVL_MDQ) {
+ NL_SET_ERR_MSG_MOD(extack, "HTB qos max levels reached");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ static_cfg = !is_qos_node_dwrr(parent, pfvf, prio);
+ ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio,
+ static_cfg);
+ if (ret)
+ goto out;
+
+ if (!static_cfg) {
+ ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio,
+ quantum);
+ if (ret)
+ goto out;
+ }
+
+ if (static_cfg)
+ parent->child_static_cnt++;
+ else
+ parent->child_dwrr_cnt++;
+
+ set_bit(prio, parent->prio_bmap);
+
+ /* read current txschq configuration */
+ old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
+ if (!old_cfg) {
+ NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
+ ret = -ENOMEM;
+ goto reset_prio;
+ }
+ otx2_qos_read_txschq_cfg(pfvf, parent, old_cfg);
+
+ /* allocate a new sq */
+ qid = otx2_qos_get_qid(pfvf);
+ if (qid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Reached max supported QOS SQ's");
+ ret = -ENOMEM;
+ goto free_old_cfg;
+ }
+
+ /* Actual SQ mapping will be updated after SMQ alloc */
+ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
+
+ /* allocate and initialize a new child node */
+ node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate,
+ ceil, quantum, qid, static_cfg);
+ if (IS_ERR(node)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
+ ret = PTR_ERR(node);
+ goto free_old_cfg;
+ }
+
+ /* push new txschq config to hw */
+ new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
+ if (!new_cfg) {
+ NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
+ ret = -ENOMEM;
+ goto free_node;
+ }
+ ret = otx2_qos_update_tree(pfvf, node, new_cfg);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
+ kfree(new_cfg);
+ otx2_qos_sw_node_delete(pfvf, node);
+ /* restore the old qos tree */
+ err = otx2_qos_txschq_update_config(pfvf, parent, old_cfg);
+ if (err) {
+ netdev_err(pfvf->netdev,
+ "Failed to restore txcshq configuration");
+ goto free_old_cfg;
+ }
+
+ otx2_qos_update_smq(pfvf, parent, QOS_CFG_SQ);
+ goto free_old_cfg;
+ }
+
+ /* update tx_real_queues */
+ otx2_qos_update_tx_netdev_queues(pfvf);
+
+ /* free new txschq config */
+ kfree(new_cfg);
+
+ /* free old txschq config */
+ otx2_qos_free_cfg(pfvf, old_cfg);
+ kfree(old_cfg);
+
+ return pfvf->hw.tx_queues + qid;
+
+free_node:
+ otx2_qos_sw_node_delete(pfvf, node);
+free_old_cfg:
+ kfree(old_cfg);
+reset_prio:
+ if (static_cfg)
+ parent->child_static_cnt--;
+ else
+ parent->child_dwrr_cnt--;
+
+ clear_bit(prio, parent->prio_bmap);
+out:
+ return ret;
+}
+
+static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
+ u16 child_classid, u64 rate, u64 ceil, u64 prio,
+ u32 quantum, struct netlink_ext_ack *extack)
+{
+ struct otx2_qos_cfg *old_cfg, *new_cfg;
+ struct otx2_qos_node *node, *child;
+ bool static_cfg;
+ int ret, err;
+ u16 qid;
+
+ netdev_dbg(pfvf->netdev,
+ "TC_HTB_LEAF_TO_INNER classid %04x, child %04x, rate %llu, ceil %llu\n",
+ classid, child_classid, rate, ceil);
+
+ if (prio > OTX2_QOS_MAX_PRIO) {
+ NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!quantum || quantum > INT_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* find node related to classid */
+ node = otx2_sw_node_find(pfvf, classid);
+ if (!node) {
+ NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
+ ret = -ENOENT;
+ goto out;
+ }
+ /* check max qos txschq level */
+ if (node->level == NIX_TXSCH_LVL_MDQ) {
+ NL_SET_ERR_MSG_MOD(extack, "HTB qos level not supported");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ static_cfg = !is_qos_node_dwrr(node, pfvf, prio);
+ if (!static_cfg) {
+ ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio,
+ quantum);
+ if (ret)
+ goto out;
+ }
+
+ if (static_cfg)
+ node->child_static_cnt++;
+ else
+ node->child_dwrr_cnt++;
+
+ set_bit(prio, node->prio_bmap);
+
+ /* store the qid to assign to leaf node */
+ qid = node->qid;
+
+ /* read current txschq configuration */
+ old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
+ if (!old_cfg) {
+ NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
+ ret = -ENOMEM;
+ goto reset_prio;
+ }
+ otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
+
+ /* delete the txschq nodes allocated for this node */
+ otx2_qos_free_sw_node_schq(pfvf, node);
+
+ /* mark this node as htb inner node */
+ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
+
+ /* allocate and initialize a new child node */
+ child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid,
+ prio, rate, ceil, quantum,
+ qid, static_cfg);
+ if (IS_ERR(child)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
+ ret = PTR_ERR(child);
+ goto free_old_cfg;
+ }
+
+ /* push new txschq config to hw */
+ new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
+ if (!new_cfg) {
+ NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
+ ret = -ENOMEM;
+ goto free_node;
+ }
+ ret = otx2_qos_update_tree(pfvf, child, new_cfg);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
+ kfree(new_cfg);
+ otx2_qos_sw_node_delete(pfvf, child);
+ /* restore the old qos tree */
+ WRITE_ONCE(node->qid, qid);
+ err = otx2_qos_alloc_txschq_node(pfvf, node);
+ if (err) {
+ netdev_err(pfvf->netdev,
+ "Failed to restore old leaf node");
+ goto free_old_cfg;
+ }
+ err = otx2_qos_txschq_update_config(pfvf, node, old_cfg);
+ if (err) {
+ netdev_err(pfvf->netdev,
+ "Failed to restore txcshq configuration");
+ goto free_old_cfg;
+ }
+ otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
+ goto free_old_cfg;
+ }
+
+ /* free new txschq config */
+ kfree(new_cfg);
+
+ /* free old txschq config */
+ otx2_qos_free_cfg(pfvf, old_cfg);
+ kfree(old_cfg);
+
+ return 0;
+
+free_node:
+ otx2_qos_sw_node_delete(pfvf, child);
+free_old_cfg:
+ kfree(old_cfg);
+reset_prio:
+ if (static_cfg)
+ node->child_static_cnt--;
+ else
+ node->child_dwrr_cnt--;
+ clear_bit(prio, node->prio_bmap);
+out:
+ return ret;
+}
+
+static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_qos_node *node, *parent;
+ int dwrr_del_node = false;
+ u64 prio;
+ u16 qid;
+
+ netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
+
+ /* find node related to classid */
+ node = otx2_sw_node_find(pfvf, *classid);
+ if (!node) {
+ NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
+ return -ENOENT;
+ }
+ parent = node->parent;
+ prio = node->prio;
+ qid = node->qid;
+
+ if (!node->is_static)
+ dwrr_del_node = true;
+
+ otx2_qos_disable_sq(pfvf, node->qid);
+
+ otx2_qos_destroy_node(pfvf, node);
+ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
+
+ if (dwrr_del_node) {
+ parent->child_dwrr_cnt--;
+ } else {
+ parent->child_static_cnt--;
+ clear_bit(prio, parent->prio_bmap);
+ }
+
+ /* Reset DWRR priority if all dwrr nodes are deleted */
+ if (!parent->child_dwrr_cnt)
+ otx2_reset_dwrr_prio(parent, prio);
+
+ if (!parent->child_static_cnt)
+ parent->max_static_prio = 0;
+
+ return 0;
+}
+
+static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_qos_node *node, *parent;
+ struct otx2_qos_cfg *new_cfg;
+ int dwrr_del_node = false;
+ u64 prio;
+ int err;
+ u16 qid;
+
+ netdev_dbg(pfvf->netdev,
+ "TC_HTB_LEAF_DEL_LAST classid %04x\n", classid);
+
+ /* find node related to classid */
+ node = otx2_sw_node_find(pfvf, classid);
+ if (!node) {
+ NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
+ return -ENOENT;
+ }
+
+ /* save qid for use by parent */
+ qid = node->qid;
+ prio = node->prio;
+
+ parent = otx2_sw_node_find(pfvf, node->parent->classid);
+ if (!parent) {
+ NL_SET_ERR_MSG_MOD(extack, "parent node not found");
+ return -ENOENT;
+ }
+
+ if (!node->is_static)
+ dwrr_del_node = true;
+
+ /* destroy the leaf node */
+ otx2_qos_destroy_node(pfvf, node);
+ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
+
+ if (dwrr_del_node) {
+ parent->child_dwrr_cnt--;
+ } else {
+ parent->child_static_cnt--;
+ clear_bit(prio, parent->prio_bmap);
+ }
+
+ /* Reset DWRR priority if all dwrr nodes are deleted */
+ if (!parent->child_dwrr_cnt)
+ otx2_reset_dwrr_prio(parent, prio);
+
+ if (!parent->child_static_cnt)
+ parent->max_static_prio = 0;
+
+ /* create downstream txschq entries to parent */
+ err = otx2_qos_alloc_txschq_node(pfvf, parent);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "HTB failed to create txsch configuration");
+ return err;
+ }
+ WRITE_ONCE(parent->qid, qid);
+ __set_bit(qid, pfvf->qos.qos_sq_bmap);
+
+ /* push new txschq config to hw */
+ new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
+ if (!new_cfg) {
+ NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
+ return -ENOMEM;
+ }
+ /* fill txschq cfg and push txschq cfg to hw */
+ otx2_qos_fill_cfg_schq(parent, new_cfg);
+ err = otx2_qos_push_txschq_cfg(pfvf, parent, new_cfg);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
+ kfree(new_cfg);
+ return err;
+ }
+ kfree(new_cfg);
+
+ /* update tx_real_queues */
+ otx2_qos_update_tx_netdev_queues(pfvf);
+
+ return 0;
+}
+
+void otx2_clean_qos_queues(struct otx2_nic *pfvf)
+{
+ struct otx2_qos_node *root;
+
+ root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
+ if (!root)
+ return;
+
+ otx2_qos_update_smq(pfvf, root, QOS_SMQ_FLUSH);
+}
+
+void otx2_qos_config_txschq(struct otx2_nic *pfvf)
+{
+ struct otx2_qos_node *root;
+ int err;
+
+ root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
+ if (!root)
+ return;
+
+ if (root->level != NIX_TXSCH_LVL_TL1) {
+ err = otx2_qos_txschq_config(pfvf, root);
+ if (err) {
+ netdev_err(pfvf->netdev, "Error update txschq configuration\n");
+ goto root_destroy;
+ }
+ }
+
+ err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL);
+ if (err) {
+ netdev_err(pfvf->netdev, "Error update txschq configuration\n");
+ goto root_destroy;
+ }
+
+ otx2_qos_update_smq(pfvf, root, QOS_CFG_SQ);
+ return;
+
+root_destroy:
+ netdev_err(pfvf->netdev, "Failed to update Scheduler/Shaping config in Hardware\n");
+ /* Free resources allocated */
+ otx2_qos_root_destroy(pfvf);
+}
+
+int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
+{
+ struct otx2_nic *pfvf = netdev_priv(ndev);
+ int res;
+
+ switch (htb->command) {
+ case TC_HTB_CREATE:
+ return otx2_qos_root_add(pfvf, htb->parent_classid,
+ htb->classid, htb->extack);
+ case TC_HTB_DESTROY:
+ return otx2_qos_root_destroy(pfvf);
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid,
+ htb->parent_classid,
+ htb->rate, htb->ceil,
+ htb->prio, htb->quantum,
+ htb->extack);
+ if (res < 0)
+ return res;
+ htb->qid = res;
+ return 0;
+ case TC_HTB_LEAF_TO_INNER:
+ return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid,
+ htb->classid, htb->rate,
+ htb->ceil, htb->prio,
+ htb->quantum, htb->extack);
+ case TC_HTB_LEAF_DEL:
+ return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack);
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return otx2_qos_leaf_del_last(pfvf, htb->classid,
+ htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
+ htb->extack);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ res = otx2_get_txq_by_classid(pfvf, htb->classid);
+ htb->qid = res;
+ return 0;
+ case TC_HTB_NODE_MODIFY:
+ fallthrough;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
new file mode 100644
index 000000000..221bd0438
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2023 Marvell.
+ *
+ */
+#ifndef OTX2_QOS_H
+#define OTX2_QOS_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
+
+#define OTX2_QOS_MAX_LVL 4
+#define OTX2_QOS_MAX_PRIO 7
+#define OTX2_QOS_MAX_LEAF_NODES 16
+
+enum qos_smq_operations {
+ QOS_CFG_SQ,
+ QOS_SMQ_FLUSH,
+};
+
+u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, u64 maxrate, u32 burst);
+
+int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb);
+int otx2_qos_get_qid(struct otx2_nic *pfvf);
+void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx);
+int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx);
+void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx);
+
+struct otx2_qos_cfg {
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ u16 schq_contig[NIX_TXSCH_LVL_CNT];
+ int static_node_pos[NIX_TXSCH_LVL_CNT];
+ int dwrr_node_pos[NIX_TXSCH_LVL_CNT];
+ u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ bool schq_index_used[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+};
+
+struct otx2_qos {
+ DECLARE_HASHTABLE(qos_hlist, order_base_2(OTX2_QOS_MAX_LEAF_NODES));
+ struct mutex qos_lock; /* child list lock */
+ u16 qid_to_sqmap[OTX2_QOS_MAX_LEAF_NODES];
+ struct list_head qos_tree;
+ DECLARE_BITMAP(qos_sq_bmap, OTX2_QOS_MAX_LEAF_NODES);
+ u16 maj_id;
+ u16 defcls;
+ u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
+};
+
+struct otx2_qos_node {
+ struct list_head list; /* list management */
+ struct list_head child_list;
+ struct list_head child_schq_list;
+ struct hlist_node hlist;
+ DECLARE_BITMAP(prio_bmap, OTX2_QOS_MAX_PRIO + 1);
+ struct otx2_qos_node *parent; /* parent qos node */
+ u64 rate; /* htb params */
+ u64 ceil;
+ u32 classid;
+ u32 prio;
+ u32 quantum;
+ /* hw txschq */
+ u16 schq;
+ u16 qid;
+ u16 prio_anchor;
+ u16 max_static_prio;
+ u16 child_dwrr_cnt;
+ u16 child_static_cnt;
+ u16 child_dwrr_prio;
+ u16 txschq_idx; /* txschq allocation index */
+ u8 level;
+ bool is_static;
+};
+
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
new file mode 100644
index 000000000..9d887bfc3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Physical Function ethernet driver
+ *
+ * Copyright (C) 2023 Marvell.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <net/tso.h>
+
+#include "cn10k.h"
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_txrx.h"
+#include "otx2_struct.h"
+
+#define OTX2_QOS_MAX_LEAF_NODES 16
+
+static void otx2_qos_aura_pool_free(struct otx2_nic *pfvf, int pool_id)
+{
+ struct otx2_pool *pool;
+
+ if (!pfvf->qset.pool)
+ return;
+
+ pool = &pfvf->qset.pool[pool_id];
+ qmem_free(pfvf->dev, pool->stack);
+ qmem_free(pfvf->dev, pool->fc_addr);
+ pool->stack = NULL;
+ pool->fc_addr = NULL;
+}
+
+static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ int pool_id, stack_pages, num_sqbs;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ struct otx2_pool *pool;
+ dma_addr_t bufptr;
+ int err, ptr;
+ u64 iova, pa;
+
+ /* Calculate number of SQBs needed.
+ *
+ * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
+ * Last SQE is used for pointing to next SQB.
+ */
+ num_sqbs = (hw->sqb_size / 128) - 1;
+ num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
+
+ /* Get no of stack pages needed */
+ stack_pages =
+ (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ /* Initialize aura context */
+ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
+ if (err)
+ return err;
+
+ /* Initialize pool context */
+ err = otx2_pool_init(pfvf, pool_id, stack_pages,
+ num_sqbs, hw->sqb_size, AURA_NIX_SQ);
+ if (err)
+ goto aura_free;
+
+ /* Flush accumulated messages */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto pool_free;
+
+ /* Allocate pointers and free them to aura/pool */
+ sq = &qset->sq[qidx];
+ sq->sqb_count = 0;
+ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
+ if (!sq->sqb_ptrs) {
+ err = -ENOMEM;
+ goto pool_free;
+ }
+
+ for (ptr = 0; ptr < num_sqbs; ptr++) {
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
+ goto sqb_free;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
+ sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
+ }
+
+ return 0;
+
+sqb_free:
+ while (ptr--) {
+ if (!sq->sqb_ptrs[ptr])
+ continue;
+ iova = sq->sqb_ptrs[ptr];
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ otx2_aura_allocptr(pfvf, pool_id);
+ }
+ sq->sqb_count = 0;
+ kfree(sq->sqb_ptrs);
+pool_free:
+ qmem_free(pfvf->dev, pool->stack);
+aura_free:
+ qmem_free(pfvf->dev, pool->fc_addr);
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ return err;
+}
+
+static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ u64 iova, pa;
+ int sqb;
+
+ sq = &qset->sq[qidx];
+ if (!sq->sqb_ptrs)
+ return;
+ for (sqb = 0; sqb < sq->sqb_count; sqb++) {
+ if (!sq->sqb_ptrs[sqb])
+ continue;
+ iova = sq->sqb_ptrs[sqb];
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ }
+
+ sq->sqb_count = 0;
+
+ sq = &qset->sq[qidx];
+ qmem_free(pfvf->dev, sq->sqe);
+ qmem_free(pfvf->dev, sq->tso_hdrs);
+ kfree(sq->sg);
+ kfree(sq->sqb_ptrs);
+ qmem_free(pfvf->dev, sq->timestamps);
+
+ memset((void *)sq, 0, sizeof(*sq));
+}
+
+/* send queue id */
+static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
+{
+ int sqe_tail, sqe_head;
+ u64 incr, *ptr, val;
+
+ ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ incr = (u64)qidx << 32;
+ val = otx2_atomic64_add(incr, ptr);
+ sqe_head = (val >> 20) & 0x3F;
+ sqe_tail = (val >> 28) & 0x3F;
+ if (sqe_head != sqe_tail)
+ usleep_range(50, 60);
+}
+
+static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
+ struct npa_aq_enq_req *aura_aq;
+ struct npa_aq_enq_req *pool_aq;
+ struct nix_aq_enq_req *sq_aq;
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_sq_aq)
+ return -ENOMEM;
+ cn10k_sq_aq->qidx = qidx;
+ cn10k_sq_aq->sq.ena = 0;
+ cn10k_sq_aq->sq_mask.ena = 1;
+ cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
+ } else {
+ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!sq_aq)
+ return -ENOMEM;
+ sq_aq->qidx = qidx;
+ sq_aq->sq.ena = 0;
+ sq_aq->sq_mask.ena = 1;
+ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ sq_aq->op = NIX_AQ_INSTOP_WRITE;
+ }
+
+ aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aura_aq) {
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ return -ENOMEM;
+ }
+
+ aura_aq->aura_id = aura_id;
+ aura_aq->aura.ena = 0;
+ aura_aq->aura_mask.ena = 1;
+ aura_aq->ctype = NPA_AQ_CTYPE_AURA;
+ aura_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!pool_aq) {
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ return -ENOMEM;
+ }
+
+ pool_aq->aura_id = aura_id;
+ pool_aq->pool.ena = 0;
+ pool_aq->pool_mask.ena = 1;
+
+ pool_aq->ctype = NPA_AQ_CTYPE_POOL;
+ pool_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_qos_get_qid(struct otx2_nic *pfvf)
+{
+ int qidx;
+
+ qidx = find_first_zero_bit(pfvf->qos.qos_sq_bmap,
+ pfvf->hw.tc_tx_queues);
+
+ return qidx == pfvf->hw.tc_tx_queues ? -ENOSPC : qidx;
+}
+
+void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx)
+{
+ clear_bit(qidx, pfvf->qos.qos_sq_bmap);
+}
+
+int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int pool_id, sq_idx, err;
+
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return -EPERM;
+
+ sq_idx = hw->non_qos_queues + qidx;
+
+ mutex_lock(&pfvf->mbox.lock);
+ err = otx2_qos_sq_aura_pool_init(pfvf, sq_idx);
+ if (err)
+ goto out;
+
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
+ err = otx2_sq_init(pfvf, sq_idx, pool_id);
+ if (err)
+ goto out;
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ struct otx2_cq_queue *cq;
+ int pool_id, sq_idx;
+
+ sq_idx = hw->non_qos_queues + qidx;
+
+ /* If the DOWN flag is set SQs are already freed */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return;
+
+ sq = &pfvf->qset.sq[sq_idx];
+ if (!sq->sqb_ptrs)
+ return;
+
+ if (sq_idx < hw->non_qos_queues ||
+ sq_idx >= otx2_get_total_tx_queues(pfvf)) {
+ netdev_err(pfvf->netdev, "Send Queue is not a QoS queue\n");
+ return;
+ }
+
+ cq = &qset->cq[pfvf->hw.rx_queues + sq_idx];
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
+
+ otx2_qos_sqb_flush(pfvf, sq_idx);
+ otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
+ otx2_cleanup_tx_cqes(pfvf, cq);
+
+ mutex_lock(&pfvf->mbox.lock);
+ otx2_qos_ctx_disable(pfvf, sq_idx, pool_id);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ otx2_qos_sq_free_sqbs(pfvf, sq_idx);
+ otx2_qos_aura_pool_free(pfvf, pool_id);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
new file mode 100644
index 000000000..f2f7663c3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell Prestera drivers configuration
+#
+
+config PRESTERA
+ tristate "Marvell Prestera Switch ASICs support"
+ depends on NET_SWITCHDEV && VLAN_8021Q
+ depends on BRIDGE || BRIDGE=n
+ select NET_DEVLINK
+ select PHYLINK
+ help
+ This driver supports Marvell Prestera Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called prestera.
+
+config PRESTERA_PCI
+ tristate "PCI interface driver for Marvell Prestera Switch ASICs family"
+ depends on PCI && HAS_IOMEM && PRESTERA
+ default PRESTERA
+ help
+ This is implementation of PCI interface support for Marvell Prestera
+ Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called prestera_pci.
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
new file mode 100644
index 000000000..df14cee80
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PRESTERA) += prestera.o
+prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
+ prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
+ prestera_switchdev.o prestera_acl.o prestera_flow.o \
+ prestera_flower.o prestera_span.o prestera_counter.o \
+ prestera_router.o prestera_router_hw.o prestera_matchall.o
+
+obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
new file mode 100644
index 000000000..35554ee80
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_H_
+#define _PRESTERA_H_
+
+#include <linux/notifier.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/phylink.h>
+#include <net/devlink.h>
+#include <uapi/linux/if_ether.h>
+
+#define PRESTERA_DRV_NAME "prestera"
+
+#define PRESTERA_DEFAULT_VID 1
+
+struct prestera_fw_rev {
+ u16 maj;
+ u16 min;
+ u16 sub;
+};
+
+struct prestera_flood_domain {
+ struct prestera_switch *sw;
+ struct list_head flood_domain_port_list;
+ u32 idx;
+};
+
+struct prestera_mdb_entry {
+ struct prestera_switch *sw;
+ struct prestera_flood_domain *flood_domain;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+struct prestera_flood_domain_port {
+ struct prestera_flood_domain *flood_domain;
+ struct net_device *dev;
+ struct list_head flood_domain_port_node;
+ u16 vid;
+};
+
+struct prestera_port_stats {
+ u64 good_octets_received;
+ u64 bad_octets_received;
+ u64 mac_trans_error;
+ u64 broadcast_frames_received;
+ u64 multicast_frames_received;
+ u64 frames_64_octets;
+ u64 frames_65_to_127_octets;
+ u64 frames_128_to_255_octets;
+ u64 frames_256_to_511_octets;
+ u64 frames_512_to_1023_octets;
+ u64 frames_1024_to_max_octets;
+ u64 excessive_collision;
+ u64 multicast_frames_sent;
+ u64 broadcast_frames_sent;
+ u64 fc_sent;
+ u64 fc_received;
+ u64 buffer_overrun;
+ u64 undersize;
+ u64 fragments;
+ u64 oversize;
+ u64 jabber;
+ u64 rx_error_frame_received;
+ u64 bad_crc;
+ u64 collisions;
+ u64 late_collision;
+ u64 unicast_frames_received;
+ u64 unicast_frames_sent;
+ u64 sent_multiple;
+ u64 sent_deferred;
+ u64 good_octets_sent;
+};
+
+#define PRESTERA_AP_PORT_MAX (10)
+
+struct prestera_port_caps {
+ u64 supp_link_modes;
+ u8 supp_fec;
+ u8 type;
+ u8 transceiver;
+};
+
+struct prestera_lag {
+ struct net_device *dev;
+ struct list_head members;
+ u16 member_count;
+ u16 lag_id;
+};
+
+struct prestera_flow_block;
+
+struct prestera_port_mac_state {
+ bool valid;
+ u32 mode;
+ u32 speed;
+ bool oper;
+ u8 duplex;
+ u8 fc;
+ u8 fec;
+};
+
+struct prestera_port_phy_state {
+ u64 lmode_bmap;
+ struct {
+ bool pause;
+ bool asym_pause;
+ } remote_fc;
+ u8 mdix;
+};
+
+struct prestera_port_mac_config {
+ u32 mode;
+ u32 speed;
+ bool admin;
+ u8 inband;
+ u8 duplex;
+ u8 fec;
+};
+
+struct prestera_port_phy_config {
+ u32 mode;
+ bool admin;
+ u8 mdix;
+};
+
+struct prestera_port {
+ struct net_device *dev;
+ struct prestera_switch *sw;
+ struct prestera_flow_block *ingress_flow_block;
+ struct prestera_flow_block *egress_flow_block;
+ struct devlink_port dl_port;
+ struct list_head lag_member;
+ struct prestera_lag *lag;
+ u32 id;
+ u32 hw_id;
+ u32 dev_id;
+ u16 fp_id;
+ u16 pvid;
+ bool autoneg;
+ u64 adver_link_modes;
+ u8 adver_fec;
+ struct prestera_port_caps caps;
+ struct list_head list;
+ struct list_head vlans_list;
+ struct {
+ struct prestera_port_stats stats;
+ struct delayed_work caching_dw;
+ } cached_hw_stats;
+ struct prestera_port_mac_config cfg_mac;
+ struct prestera_port_phy_config cfg_phy;
+ struct prestera_port_mac_state state_mac;
+ struct prestera_port_phy_state state_phy;
+
+ struct phylink_config phy_config;
+ struct phylink *phy_link;
+ struct phylink_pcs phylink_pcs;
+
+ /* protects state_mac */
+ spinlock_t state_mac_lock;
+};
+
+struct prestera_device {
+ struct device *dev;
+ u8 __iomem *ctl_regs;
+ u8 __iomem *pp_regs;
+ struct prestera_fw_rev fw_rev;
+ void *priv;
+
+ /* called by device driver to handle received packets */
+ void (*recv_pkt)(struct prestera_device *dev);
+
+ /* called by device driver to pass event up to the higher layer */
+ int (*recv_msg)(struct prestera_device *dev, void *msg, size_t size);
+
+ /* called by higher layer to send request to the firmware */
+ int (*send_req)(struct prestera_device *dev, int qid, void *in_msg,
+ size_t in_size, void *out_msg, size_t out_size,
+ unsigned int wait);
+};
+
+enum prestera_event_type {
+ PRESTERA_EVENT_TYPE_UNSPEC,
+
+ PRESTERA_EVENT_TYPE_PORT,
+ PRESTERA_EVENT_TYPE_FDB,
+ PRESTERA_EVENT_TYPE_RXTX,
+
+ PRESTERA_EVENT_TYPE_MAX
+};
+
+enum prestera_rxtx_event_id {
+ PRESTERA_RXTX_EVENT_UNSPEC,
+ PRESTERA_RXTX_EVENT_RCV_PKT,
+};
+
+enum prestera_port_event_id {
+ PRESTERA_PORT_EVENT_UNSPEC,
+ PRESTERA_PORT_EVENT_MAC_STATE_CHANGED,
+};
+
+struct prestera_port_event {
+ u32 port_id;
+ union {
+ struct {
+ u32 mode;
+ u32 speed;
+ u8 oper;
+ u8 duplex;
+ u8 fc;
+ u8 fec;
+ } mac;
+ struct {
+ u64 lmode_bmap;
+ struct {
+ bool pause;
+ bool asym_pause;
+ } remote_fc;
+ u8 mdix;
+ } phy;
+ } data;
+};
+
+enum prestera_fdb_entry_type {
+ PRESTERA_FDB_ENTRY_TYPE_REG_PORT,
+ PRESTERA_FDB_ENTRY_TYPE_LAG,
+ PRESTERA_FDB_ENTRY_TYPE_MAX
+};
+
+enum prestera_fdb_event_id {
+ PRESTERA_FDB_EVENT_UNSPEC,
+ PRESTERA_FDB_EVENT_LEARNED,
+ PRESTERA_FDB_EVENT_AGED,
+};
+
+struct prestera_fdb_event {
+ enum prestera_fdb_entry_type type;
+ union {
+ u32 port_id;
+ u16 lag_id;
+ } dest;
+ u32 vid;
+ union {
+ u8 mac[ETH_ALEN];
+ } data;
+};
+
+struct prestera_event {
+ u16 id;
+ union {
+ struct prestera_port_event port_evt;
+ struct prestera_fdb_event fdb_evt;
+ };
+};
+
+enum prestera_if_type {
+ /* the interface is of port type (dev,port) */
+ PRESTERA_IF_PORT_E = 0,
+
+ /* the interface is of lag type (lag-id) */
+ PRESTERA_IF_LAG_E = 1,
+
+ /* the interface is of Vid type (vlan-id) */
+ PRESTERA_IF_VID_E = 3,
+};
+
+struct prestera_iface {
+ enum prestera_if_type type;
+ struct {
+ u32 hw_dev_num;
+ u32 port_num;
+ } dev_port;
+ u32 hw_dev_num;
+ u16 vr_id;
+ u16 lag_id;
+ u16 vlan_id;
+};
+
+struct prestera_switchdev;
+struct prestera_span;
+struct prestera_rxtx;
+struct prestera_trap_data;
+struct prestera_acl;
+
+struct prestera_switch {
+ struct prestera_device *dev;
+ struct prestera_switchdev *swdev;
+ struct prestera_rxtx *rxtx;
+ struct prestera_acl *acl;
+ struct prestera_span *span;
+ struct list_head event_handlers;
+ struct notifier_block netdev_nb;
+ struct prestera_trap_data *trap_data;
+ char base_mac[ETH_ALEN];
+ struct list_head port_list;
+ rwlock_t port_list_lock;
+ u32 port_count;
+ u32 mtu_min;
+ u32 mtu_max;
+ u8 id;
+ struct device_node *np;
+ struct prestera_router *router;
+ struct prestera_lag *lags;
+ struct prestera_counter *counter;
+ u8 lag_member_max;
+ u8 lag_max;
+ u32 size_tbl_router_nexthop;
+};
+
+struct prestera_router {
+ struct prestera_switch *sw;
+ struct list_head vr_list;
+ struct list_head rif_entry_list;
+ struct rhashtable nh_neigh_ht;
+ struct rhashtable nexthop_group_ht;
+ struct rhashtable fib_ht;
+ struct rhashtable kern_neigh_cache_ht;
+ struct rhashtable kern_fib_cache_ht;
+ struct notifier_block inetaddr_nb;
+ struct notifier_block inetaddr_valid_nb;
+ struct notifier_block fib_nb;
+ struct notifier_block netevent_nb;
+ u8 *nhgrp_hw_state_cache; /* Bitmap cached hw state of nhs */
+ unsigned long nhgrp_hw_cache_kick; /* jiffies */
+ struct {
+ struct delayed_work dw;
+ } neighs_update;
+};
+
+struct prestera_rxtx_params {
+ bool use_sdma;
+ u32 map_addr;
+};
+
+#define prestera_dev(sw) ((sw)->dev->dev)
+
+static inline void prestera_write(const struct prestera_switch *sw,
+ unsigned int reg, u32 val)
+{
+ writel(val, sw->dev->pp_regs + reg);
+}
+
+static inline u32 prestera_read(const struct prestera_switch *sw,
+ unsigned int reg)
+{
+ return readl(sw->dev->pp_regs + reg);
+}
+
+int prestera_device_register(struct prestera_device *dev);
+void prestera_device_unregister(struct prestera_device *dev);
+
+struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
+ u32 dev_id, u32 hw_id);
+
+int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes);
+
+int prestera_router_init(struct prestera_switch *sw);
+void prestera_router_fini(struct prestera_switch *sw);
+
+struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+
+struct prestera_switch *prestera_switch_get(struct net_device *dev);
+
+int prestera_port_cfg_mac_read(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg);
+
+int prestera_port_cfg_mac_write(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg);
+
+struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
+
+void prestera_queue_work(struct work_struct *work);
+void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay);
+void prestera_queue_drain(void);
+
+int prestera_port_learning_set(struct prestera_port *port, bool learn_enable);
+int prestera_port_uc_flood_set(struct prestera_port *port, bool flood);
+int prestera_port_mc_flood_set(struct prestera_port *port, bool flood);
+
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked);
+
+int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
+
+bool prestera_netdev_check(const struct net_device *dev);
+
+int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr);
+
+bool prestera_port_is_lag_member(const struct prestera_port *port);
+int prestera_lag_id(struct prestera_switch *sw,
+ struct net_device *lag_dev, u16 *lag_id);
+
+struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id);
+
+u16 prestera_port_lag_id(const struct prestera_port *port);
+
+struct prestera_mdb_entry *
+prestera_mdb_entry_create(struct prestera_switch *sw,
+ const unsigned char *addr, u16 vid);
+void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry);
+
+struct prestera_flood_domain *
+prestera_flood_domain_create(struct prestera_switch *sw);
+void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain);
+
+int
+prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev,
+ u16 vid);
+void
+prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port);
+struct prestera_flood_domain_port *
+prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev, u16 vid);
+
+#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
new file mode 100644
index 000000000..cba89fda5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -0,0 +1,927 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved */
+
+#include <linux/rhashtable.h>
+
+#include "prestera_acl.h"
+#include "prestera_flow.h"
+#include "prestera_hw.h"
+#include "prestera.h"
+
+#define ACL_KEYMASK_SIZE \
+ (sizeof(__be32) * __PRESTERA_ACL_RULE_MATCH_TYPE_MAX)
+
+struct prestera_acl {
+ struct prestera_switch *sw;
+ struct list_head vtcam_list;
+ struct list_head rules;
+ struct rhashtable ruleset_ht;
+ struct rhashtable acl_rule_entry_ht;
+ struct idr uid;
+};
+
+struct prestera_acl_ruleset_ht_key {
+ struct prestera_flow_block *block;
+ u32 chain_index;
+};
+
+struct prestera_acl_rule_entry {
+ struct rhash_head ht_node;
+ struct prestera_acl_rule_entry_key key;
+ u32 hw_id;
+ u32 vtcam_id;
+ struct {
+ struct {
+ u8 valid:1;
+ } accept, drop, trap;
+ struct {
+ u8 valid:1;
+ struct prestera_acl_action_police i;
+ } police;
+ struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
+ struct {
+ u32 id;
+ struct prestera_counter_block *block;
+ } counter;
+ };
+};
+
+struct prestera_acl_ruleset {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct prestera_acl_ruleset_ht_key ht_key;
+ struct rhashtable rule_ht;
+ struct prestera_acl *acl;
+ struct {
+ u32 min;
+ u32 max;
+ } prio;
+ unsigned long rule_count;
+ refcount_t refcount;
+ void *keymask;
+ u32 vtcam_id;
+ u32 index;
+ u16 pcl_id;
+ bool offload;
+ bool ingress;
+};
+
+struct prestera_acl_vtcam {
+ struct list_head list;
+ __be32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ refcount_t refcount;
+ u32 id;
+ bool is_keymask_set;
+ u8 lookup;
+ u8 direction;
+};
+
+static const struct rhashtable_params prestera_acl_ruleset_ht_params = {
+ .key_len = sizeof(struct prestera_acl_ruleset_ht_key),
+ .key_offset = offsetof(struct prestera_acl_ruleset, ht_key),
+ .head_offset = offsetof(struct prestera_acl_ruleset, ht_node),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params prestera_acl_rule_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct prestera_acl_rule, cookie),
+ .head_offset = offsetof(struct prestera_acl_rule, ht_node),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
+ .key_offset = offsetof(struct prestera_acl_rule_entry, key),
+ .head_offset = offsetof(struct prestera_acl_rule_entry, ht_node),
+ .key_len = sizeof(struct prestera_acl_rule_entry_key),
+ .automatic_shrinking = true,
+};
+
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client)
+{
+ static const u32 ingress_client_map[] = {
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2
+ };
+
+ if (!ingress) {
+ /* prestera supports only one chain on egress */
+ if (chain_index > 0)
+ return -EINVAL;
+
+ *client = PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP;
+ return 0;
+ }
+
+ if (chain_index >= ARRAY_SIZE(ingress_client_map))
+ return -EINVAL;
+
+ *client = ingress_client_map[chain_index];
+ return 0;
+}
+
+static bool prestera_acl_chain_is_supported(u32 chain_index, bool ingress)
+{
+ if (!ingress)
+ /* prestera supports only one chain on egress */
+ return chain_index == 0;
+
+ return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0;
+}
+
+static struct prestera_acl_ruleset *
+prestera_acl_ruleset_create(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index)
+{
+ struct prestera_acl_ruleset *ruleset;
+ u32 uid = 0;
+ int err;
+
+ if (!prestera_acl_chain_is_supported(chain_index, block->ingress))
+ return ERR_PTR(-EINVAL);
+
+ ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
+ if (!ruleset)
+ return ERR_PTR(-ENOMEM);
+
+ ruleset->acl = acl;
+ ruleset->ingress = block->ingress;
+ ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
+ refcount_set(&ruleset->refcount, 1);
+
+ err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = idr_alloc_u32(&acl->uid, NULL, &uid, U8_MAX, GFP_KERNEL);
+ if (err)
+ goto err_ruleset_create;
+
+ /* make pcl-id based on uid */
+ ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index);
+ ruleset->index = uid;
+
+ ruleset->prio.min = UINT_MAX;
+ ruleset->prio.max = 0;
+
+ err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ prestera_acl_ruleset_ht_params);
+ if (err)
+ goto err_ruleset_ht_insert;
+
+ return ruleset;
+
+err_ruleset_ht_insert:
+ idr_remove(&acl->uid, uid);
+err_ruleset_create:
+ rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+ kfree(ruleset);
+ return ERR_PTR(err);
+}
+
+int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask)
+{
+ ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL);
+ if (!ruleset->keymask)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
+{
+ struct prestera_acl_iface iface;
+ u32 vtcam_id;
+ int dir;
+ int err;
+
+ dir = ruleset->ingress ?
+ PRESTERA_HW_VTCAM_DIR_INGRESS : PRESTERA_HW_VTCAM_DIR_EGRESS;
+
+ if (ruleset->offload)
+ return -EEXIST;
+
+ err = prestera_acl_vtcam_id_get(ruleset->acl,
+ ruleset->ht_key.chain_index,
+ dir,
+ ruleset->keymask, &vtcam_id);
+ if (err)
+ goto err_vtcam_create;
+
+ if (ruleset->ht_key.chain_index) {
+ /* for chain > 0, bind iface index to pcl-id to be able
+ * to jump from any other ruleset to this one using the index.
+ */
+ iface.index = ruleset->index;
+ iface.type = PRESTERA_ACL_IFACE_TYPE_INDEX;
+ err = prestera_hw_vtcam_iface_bind(ruleset->acl->sw, &iface,
+ vtcam_id, ruleset->pcl_id);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ ruleset->vtcam_id = vtcam_id;
+ ruleset->offload = true;
+ return 0;
+
+err_ruleset_bind:
+ prestera_acl_vtcam_id_put(ruleset->acl, ruleset->vtcam_id);
+err_vtcam_create:
+ return err;
+}
+
+static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
+{
+ struct prestera_acl *acl = ruleset->acl;
+ u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER;
+ int err;
+
+ rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ prestera_acl_ruleset_ht_params);
+
+ if (ruleset->offload) {
+ if (ruleset->ht_key.chain_index) {
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_INDEX,
+ .index = ruleset->index
+ };
+ err = prestera_hw_vtcam_iface_unbind(acl->sw, &iface,
+ ruleset->vtcam_id);
+ WARN_ON(err);
+ }
+ WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id));
+ }
+
+ idr_remove(&acl->uid, uid);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset->keymask);
+ kfree(ruleset);
+}
+
+static struct prestera_acl_ruleset *
+__prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index)
+{
+ struct prestera_acl_ruleset_ht_key ht_key;
+
+ memset(&ht_key, 0, sizeof(ht_key));
+ ht_key.block = block;
+ ht_key.chain_index = chain_index;
+ return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
+ prestera_acl_ruleset_ht_params);
+}
+
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
+ if (!ruleset)
+ return ERR_PTR(-ENOENT);
+
+ refcount_inc(&ruleset->refcount);
+ return ruleset;
+}
+
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_get(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
+ if (ruleset) {
+ refcount_inc(&ruleset->refcount);
+ return ruleset;
+ }
+
+ return prestera_acl_ruleset_create(acl, block, chain_index);
+}
+
+void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset)
+{
+ if (!refcount_dec_and_test(&ruleset->refcount))
+ return;
+
+ prestera_acl_ruleset_destroy(ruleset);
+}
+
+int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port)
+{
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_PORT,
+ .port = port
+ };
+
+ return prestera_hw_vtcam_iface_bind(port->sw, &iface, ruleset->vtcam_id,
+ ruleset->pcl_id);
+}
+
+int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port)
+{
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_PORT,
+ .port = port
+ };
+
+ return prestera_hw_vtcam_iface_unbind(port->sw, &iface,
+ ruleset->vtcam_id);
+}
+
+static int prestera_acl_ruleset_block_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_flow_block *block)
+{
+ struct prestera_flow_block_binding *binding;
+ int err;
+
+ block->ruleset_zero = ruleset;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_acl_ruleset_bind(ruleset, binding->port);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ err = prestera_acl_ruleset_unbind(ruleset, binding->port);
+ block->ruleset_zero = NULL;
+
+ return err;
+}
+
+static void
+prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_flow_block *block)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_acl_ruleset_unbind(ruleset, binding->port);
+ block->ruleset_zero = NULL;
+}
+
+static void
+prestera_acl_ruleset_prio_refresh(struct prestera_acl *acl,
+ struct prestera_acl_ruleset *ruleset)
+{
+ struct prestera_acl_rule *rule;
+
+ ruleset->prio.min = UINT_MAX;
+ ruleset->prio.max = 0;
+
+ list_for_each_entry(rule, &acl->rules, list) {
+ if (ruleset->ingress != rule->ruleset->ingress)
+ continue;
+ if (ruleset->ht_key.chain_index != rule->chain_index)
+ continue;
+
+ ruleset->prio.min = min(ruleset->prio.min, rule->priority);
+ ruleset->prio.max = max(ruleset->prio.max, rule->priority);
+ }
+}
+
+void
+prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id)
+{
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ __be16 pcl_id_mask = htons(PRESTERA_ACL_KEYMASK_PCL_ID);
+ __be16 pcl_id_key = htons(pcl_id);
+
+ rule_match_set(r_match->key, PCL_ID, pcl_id_key);
+ rule_match_set(r_match->mask, PCL_ID, pcl_id_mask);
+}
+
+struct prestera_acl_rule *
+prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie)
+{
+ return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
+ prestera_acl_rule_ht_params);
+}
+
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset)
+{
+ return ruleset->index;
+}
+
+void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset,
+ u32 *prio_min, u32 *prio_max)
+{
+ *prio_min = ruleset->prio.min;
+ *prio_max = ruleset->prio.max;
+}
+
+bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
+{
+ return ruleset->offload;
+}
+
+struct prestera_acl_rule *
+prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie, u32 chain_index)
+{
+ struct prestera_acl_rule *rule;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return ERR_PTR(-ENOMEM);
+
+ rule->ruleset = ruleset;
+ rule->cookie = cookie;
+ rule->chain_index = chain_index;
+
+ refcount_inc(&ruleset->refcount);
+
+ return rule;
+}
+
+void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
+ u32 priority)
+{
+ rule->priority = priority;
+}
+
+void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
+{
+ if (rule->jump_ruleset)
+ /* release ruleset kept by jump action */
+ prestera_acl_ruleset_put(rule->jump_ruleset);
+
+ prestera_acl_ruleset_put(rule->ruleset);
+ kfree(rule);
+}
+
+static void prestera_acl_ruleset_prio_update(struct prestera_acl_ruleset *ruleset,
+ u32 prio)
+{
+ ruleset->prio.min = min(ruleset->prio.min, prio);
+ ruleset->prio.max = max(ruleset->prio.max, prio);
+}
+
+int prestera_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule)
+{
+ int err;
+ struct prestera_acl_ruleset *ruleset = rule->ruleset;
+ struct prestera_flow_block *block = ruleset->ht_key.block;
+
+ /* try to add rule to hash table first */
+ err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ prestera_acl_rule_keymask_pcl_id_set(rule, ruleset->pcl_id);
+ rule->re_arg.vtcam_id = ruleset->vtcam_id;
+ rule->re_key.prio = rule->priority;
+
+ rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
+ err = WARN_ON(rule->re) ? -EEXIST : 0;
+ if (err)
+ goto err_rule_add;
+
+ rule->re = prestera_acl_rule_entry_create(sw->acl, &rule->re_key,
+ &rule->re_arg);
+ err = !rule->re ? -EINVAL : 0;
+ if (err)
+ goto err_rule_add;
+
+ /* bind the block (all ports) to chain index 0, rest of
+ * the chains are bound to goto action
+ */
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count) {
+ err = prestera_acl_ruleset_block_bind(ruleset, block);
+ if (err)
+ goto err_acl_block_bind;
+ }
+
+ list_add_tail(&rule->list, &sw->acl->rules);
+ ruleset->rule_count++;
+ prestera_acl_ruleset_prio_update(ruleset, rule->priority);
+ return 0;
+
+err_acl_block_bind:
+ prestera_acl_rule_entry_destroy(sw->acl, rule->re);
+err_rule_add:
+ rule->re = NULL;
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+err_ht_insert:
+ return err;
+}
+
+void prestera_acl_rule_del(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule)
+{
+ struct prestera_acl_ruleset *ruleset = rule->ruleset;
+ struct prestera_flow_block *block = ruleset->ht_key.block;
+
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ ruleset->rule_count--;
+ list_del(&rule->list);
+
+ prestera_acl_rule_entry_destroy(sw->acl, rule->re);
+ prestera_acl_ruleset_prio_refresh(sw->acl, ruleset);
+
+ /* unbind block (all ports) */
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count)
+ prestera_acl_ruleset_block_unbind(ruleset, block);
+}
+
+int prestera_acl_rule_get_stats(struct prestera_acl *acl,
+ struct prestera_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use)
+{
+ u64 current_packets;
+ u64 current_bytes;
+ int err;
+
+ err = prestera_counter_stats_get(acl->sw->counter,
+ rule->re->counter.block,
+ rule->re->counter.id,
+ &current_packets, &current_bytes);
+ if (err)
+ return err;
+
+ *packets = current_packets;
+ *bytes = current_bytes;
+ *last_use = jiffies;
+
+ return 0;
+}
+
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_find(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key)
+{
+ return rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key,
+ __prestera_acl_rule_entry_ht_params);
+}
+
+static int __prestera_acl_rule_entry2hw_del(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
+{
+ return prestera_hw_vtcam_rule_del(sw, e->vtcam_id, e->hw_id);
+}
+
+static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
+{
+ struct prestera_acl_hw_action_info act_hw[PRESTERA_ACL_RULE_ACTION_MAX];
+ int act_num;
+
+ memset(&act_hw, 0, sizeof(act_hw));
+ act_num = 0;
+
+ /* accept */
+ if (e->accept.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_ACCEPT;
+ act_num++;
+ }
+ /* drop */
+ if (e->drop.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_DROP;
+ act_num++;
+ }
+ /* trap */
+ if (e->trap.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP;
+ act_num++;
+ }
+ /* police */
+ if (e->police.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_POLICE;
+ act_hw[act_num].police = e->police.i;
+ act_num++;
+ }
+ /* jump */
+ if (e->jump.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP;
+ act_hw[act_num].jump = e->jump.i;
+ act_num++;
+ }
+ /* counter */
+ if (e->counter.block) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT;
+ act_hw[act_num].count.id = e->counter.id;
+ act_num++;
+ }
+
+ return prestera_hw_vtcam_rule_add(sw, e->vtcam_id, e->key.prio,
+ e->key.match.key, e->key.match.mask,
+ act_hw, act_num, &e->hw_id);
+}
+
+static void
+__prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
+{
+ /* counter */
+ prestera_counter_put(sw->counter, e->counter.block, e->counter.id);
+ /* police */
+ if (e->police.valid)
+ prestera_hw_policer_release(sw, e->police.i.id);
+}
+
+void prestera_acl_rule_entry_destroy(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry *e)
+{
+ int ret;
+
+ rhashtable_remove_fast(&acl->acl_rule_entry_ht, &e->ht_node,
+ __prestera_acl_rule_entry_ht_params);
+
+ ret = __prestera_acl_rule_entry2hw_del(acl->sw, e);
+ WARN_ON(ret && ret != -ENODEV);
+
+ __prestera_acl_rule_entry_act_destruct(acl->sw, e);
+ kfree(e);
+}
+
+static int
+__prestera_acl_rule_entry_act_construct(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e,
+ struct prestera_acl_rule_entry_arg *arg)
+{
+ int err;
+
+ /* accept */
+ e->accept.valid = arg->accept.valid;
+ /* drop */
+ e->drop.valid = arg->drop.valid;
+ /* trap */
+ e->trap.valid = arg->trap.valid;
+ /* jump */
+ e->jump.valid = arg->jump.valid;
+ e->jump.i = arg->jump.i;
+ /* police */
+ if (arg->police.valid) {
+ u8 type = arg->police.ingress ? PRESTERA_POLICER_TYPE_INGRESS :
+ PRESTERA_POLICER_TYPE_EGRESS;
+
+ err = prestera_hw_policer_create(sw, type, &e->police.i.id);
+ if (err)
+ goto err_out;
+
+ err = prestera_hw_policer_sr_tcm_set(sw, e->police.i.id,
+ arg->police.rate,
+ arg->police.burst);
+ if (err) {
+ prestera_hw_policer_release(sw, e->police.i.id);
+ goto err_out;
+ }
+ e->police.valid = arg->police.valid;
+ }
+ /* counter */
+ if (arg->count.valid) {
+ err = prestera_counter_get(sw->counter, arg->count.client,
+ &e->counter.block,
+ &e->counter.id);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ __prestera_acl_rule_entry_act_destruct(sw, e);
+ return -EINVAL;
+}
+
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_create(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key,
+ struct prestera_acl_rule_entry_arg *arg)
+{
+ struct prestera_acl_rule_entry *e;
+ int err;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto err_kzalloc;
+
+ memcpy(&e->key, key, sizeof(*key));
+ e->vtcam_id = arg->vtcam_id;
+ err = __prestera_acl_rule_entry_act_construct(acl->sw, e, arg);
+ if (err)
+ goto err_act_construct;
+
+ err = __prestera_acl_rule_entry2hw_add(acl->sw, e);
+ if (err)
+ goto err_hw_add;
+
+ err = rhashtable_insert_fast(&acl->acl_rule_entry_ht, &e->ht_node,
+ __prestera_acl_rule_entry_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return e;
+
+err_ht_insert:
+ WARN_ON(__prestera_acl_rule_entry2hw_del(acl->sw, e));
+err_hw_add:
+ __prestera_acl_rule_entry_act_destruct(acl->sw, e);
+err_act_construct:
+ kfree(e);
+err_kzalloc:
+ return NULL;
+}
+
+static int __prestera_acl_vtcam_id_try_fit(struct prestera_acl *acl, u8 lookup,
+ void *keymask, u32 *vtcam_id)
+{
+ struct prestera_acl_vtcam *vtcam;
+ int i;
+
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (lookup != vtcam->lookup)
+ continue;
+
+ if (!keymask && !vtcam->is_keymask_set)
+ goto vtcam_found;
+
+ if (!(keymask && vtcam->is_keymask_set))
+ continue;
+
+ /* try to fit with vtcam keymask */
+ for (i = 0; i < __PRESTERA_ACL_RULE_MATCH_TYPE_MAX; i++) {
+ __be32 __keymask = ((__be32 *)keymask)[i];
+
+ if (!__keymask)
+ /* vtcam keymask in not interested */
+ continue;
+
+ if (__keymask & ~vtcam->keymask[i])
+ /* keymask does not fit the vtcam keymask */
+ break;
+ }
+
+ if (i == __PRESTERA_ACL_RULE_MATCH_TYPE_MAX)
+ /* keymask fits vtcam keymask, return it */
+ goto vtcam_found;
+ }
+
+ /* nothing is found */
+ return -ENOENT;
+
+vtcam_found:
+ refcount_inc(&vtcam->refcount);
+ *vtcam_id = vtcam->id;
+ return 0;
+}
+
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
+ void *keymask, u32 *vtcam_id)
+{
+ struct prestera_acl_vtcam *vtcam;
+ u32 new_vtcam_id;
+ int err;
+
+ /* find the vtcam that suits keymask. We do not expect to have
+ * a big number of vtcams, so, the list type for vtcam list is
+ * fine for now
+ */
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (lookup != vtcam->lookup ||
+ dir != vtcam->direction)
+ continue;
+
+ if (!keymask && !vtcam->is_keymask_set) {
+ refcount_inc(&vtcam->refcount);
+ goto vtcam_found;
+ }
+
+ if (keymask && vtcam->is_keymask_set &&
+ !memcmp(keymask, vtcam->keymask, sizeof(vtcam->keymask))) {
+ refcount_inc(&vtcam->refcount);
+ goto vtcam_found;
+ }
+ }
+
+ /* vtcam not found, try to create new one */
+ vtcam = kzalloc(sizeof(*vtcam), GFP_KERNEL);
+ if (!vtcam)
+ return -ENOMEM;
+
+ err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id,
+ dir);
+ if (err) {
+ kfree(vtcam);
+
+ /* cannot create new, try to fit into existing vtcam */
+ if (__prestera_acl_vtcam_id_try_fit(acl, lookup,
+ keymask, &new_vtcam_id))
+ return err;
+
+ *vtcam_id = new_vtcam_id;
+ return 0;
+ }
+
+ vtcam->direction = dir;
+ vtcam->id = new_vtcam_id;
+ vtcam->lookup = lookup;
+ if (keymask) {
+ memcpy(vtcam->keymask, keymask, sizeof(vtcam->keymask));
+ vtcam->is_keymask_set = true;
+ }
+ refcount_set(&vtcam->refcount, 1);
+ list_add_rcu(&vtcam->list, &acl->vtcam_list);
+
+vtcam_found:
+ *vtcam_id = vtcam->id;
+ return 0;
+}
+
+int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id)
+{
+ struct prestera_acl_vtcam *vtcam;
+ int err;
+
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (vtcam_id != vtcam->id)
+ continue;
+
+ if (!refcount_dec_and_test(&vtcam->refcount))
+ return 0;
+
+ err = prestera_hw_vtcam_destroy(acl->sw, vtcam->id);
+ if (err && err != -ENODEV) {
+ refcount_set(&vtcam->refcount, 1);
+ return err;
+ }
+
+ list_del(&vtcam->list);
+ kfree(vtcam);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int prestera_acl_init(struct prestera_switch *sw)
+{
+ struct prestera_acl *acl;
+ int err;
+
+ acl = kzalloc(sizeof(*acl), GFP_KERNEL);
+ if (!acl)
+ return -ENOMEM;
+
+ acl->sw = sw;
+ INIT_LIST_HEAD(&acl->rules);
+ INIT_LIST_HEAD(&acl->vtcam_list);
+ idr_init(&acl->uid);
+
+ err = rhashtable_init(&acl->acl_rule_entry_ht,
+ &__prestera_acl_rule_entry_ht_params);
+ if (err)
+ goto err_acl_rule_entry_ht_init;
+
+ err = rhashtable_init(&acl->ruleset_ht,
+ &prestera_acl_ruleset_ht_params);
+ if (err)
+ goto err_ruleset_ht_init;
+
+ sw->acl = acl;
+
+ return 0;
+
+err_ruleset_ht_init:
+ rhashtable_destroy(&acl->acl_rule_entry_ht);
+err_acl_rule_entry_ht_init:
+ kfree(acl);
+ return err;
+}
+
+void prestera_acl_fini(struct prestera_switch *sw)
+{
+ struct prestera_acl *acl = sw->acl;
+
+ WARN_ON(!idr_is_empty(&acl->uid));
+ idr_destroy(&acl->uid);
+
+ WARN_ON(!list_empty(&acl->vtcam_list));
+ WARN_ON(!list_empty(&acl->rules));
+
+ rhashtable_destroy(&acl->ruleset_ht);
+ rhashtable_destroy(&acl->acl_rule_entry_ht);
+
+ kfree(acl);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
new file mode 100644
index 000000000..a35cc0609
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_ACL_H_
+#define _PRESTERA_ACL_H_
+
+#include <linux/types.h>
+#include "prestera_counter.h"
+
+#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF
+#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \
+ (PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF)
+#define PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN \
+ (PRESTERA_ACL_KEYMASK_PCL_ID & 0xFF00)
+#define PRESTERA_ACL_CHAIN_MASK \
+ (PRESTERA_ACL_KEYMASK_PCL_ID >> 8)
+
+#define PRESTERA_ACL_PCL_ID_MAKE(uid, chain_id) \
+ (((uid) & PRESTERA_ACL_KEYMASK_PCL_ID_USER) | \
+ (((chain_id) << 8) & PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN))
+
+#define rule_match_set_n(match_p, type, val_p, size) \
+ memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
+ val_p, size)
+#define rule_match_set(match_p, type, val) \
+ memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
+ &(val), sizeof(val))
+
+enum prestera_acl_match_type {
+ PRESTERA_ACL_RULE_MATCH_TYPE_PCL_ID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_TYPE,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_0,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_1,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_0,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_1,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_PROTO,
+ PRESTERA_ACL_RULE_MATCH_TYPE_SYS_PORT,
+ PRESTERA_ACL_RULE_MATCH_TYPE_SYS_DEV,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_ID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_TPID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_TYPE,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_CODE,
+
+ __PRESTERA_ACL_RULE_MATCH_TYPE_MAX
+};
+
+enum prestera_acl_rule_action {
+ PRESTERA_ACL_RULE_ACTION_ACCEPT = 0,
+ PRESTERA_ACL_RULE_ACTION_DROP = 1,
+ PRESTERA_ACL_RULE_ACTION_TRAP = 2,
+ PRESTERA_ACL_RULE_ACTION_JUMP = 5,
+ PRESTERA_ACL_RULE_ACTION_COUNT = 7,
+ PRESTERA_ACL_RULE_ACTION_POLICE = 8,
+
+ PRESTERA_ACL_RULE_ACTION_MAX
+};
+
+enum {
+ PRESTERA_ACL_IFACE_TYPE_PORT,
+ PRESTERA_ACL_IFACE_TYPE_INDEX
+};
+
+struct prestera_acl_match {
+ __be32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+};
+
+struct prestera_acl_action_jump {
+ u32 index;
+};
+
+struct prestera_acl_action_police {
+ u32 id;
+};
+
+struct prestera_acl_action_count {
+ u32 id;
+};
+
+struct prestera_acl_rule_entry_key {
+ u32 prio;
+ struct prestera_acl_match match;
+};
+
+struct prestera_acl_hw_action_info {
+ enum prestera_acl_rule_action id;
+ union {
+ struct prestera_acl_action_police police;
+ struct prestera_acl_action_count count;
+ struct prestera_acl_action_jump jump;
+ };
+};
+
+/* This struct (arg) used only to be passed as parameter for
+ * acl_rule_entry_create. Must be flat. Can contain object keys, which will be
+ * resolved to object links, before saving to acl_rule_entry struct
+ */
+struct prestera_acl_rule_entry_arg {
+ u32 vtcam_id;
+ struct {
+ struct {
+ u8 valid:1;
+ } accept, drop, trap;
+ struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
+ struct {
+ u8 valid:1;
+ u64 rate;
+ u64 burst;
+ bool ingress;
+ } police;
+ struct {
+ u8 valid:1;
+ u32 client;
+ } count;
+ };
+};
+
+struct prestera_acl_rule {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct list_head list;
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_ruleset *jump_ruleset;
+ unsigned long cookie;
+ u32 chain_index;
+ u32 priority;
+ struct prestera_acl_rule_entry_key re_key;
+ struct prestera_acl_rule_entry_arg re_arg;
+ struct prestera_acl_rule_entry *re;
+};
+
+struct prestera_acl_iface {
+ union {
+ struct prestera_port *port;
+ u32 index;
+ };
+ u8 type;
+};
+
+struct prestera_acl;
+struct prestera_switch;
+struct prestera_flow_block;
+
+int prestera_acl_init(struct prestera_switch *sw);
+void prestera_acl_fini(struct prestera_switch *sw);
+
+struct prestera_acl_rule *
+prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie, u32 chain_index);
+void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
+ u32 priority);
+void prestera_acl_rule_destroy(struct prestera_acl_rule *rule);
+struct prestera_acl_rule *
+prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie);
+int prestera_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule);
+void prestera_acl_rule_del(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule);
+int prestera_acl_rule_get_stats(struct prestera_acl *acl,
+ struct prestera_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use);
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_find(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key);
+void prestera_acl_rule_entry_destroy(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry *e);
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_create(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key,
+ struct prestera_acl_rule_entry_arg *arg);
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_get(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index);
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index);
+int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask);
+bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
+int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset);
+void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset);
+int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port);
+int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port);
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset);
+void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset,
+ u32 *prio_min, u32 *prio_max);
+void
+prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
+ u16 pcl_id);
+
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
+ void *keymask, u32 *vtcam_id);
+int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client);
+
+#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
new file mode 100644
index 000000000..4cd53a2da
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2021 Marvell International Ltd. All rights reserved */
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_counter.h"
+
+#define COUNTER_POLL_TIME (msecs_to_jiffies(1000))
+#define COUNTER_RESCHED_TIME (msecs_to_jiffies(50))
+#define COUNTER_BULK_SIZE (256)
+
+struct prestera_counter {
+ struct prestera_switch *sw;
+ struct delayed_work stats_dw;
+ struct mutex mtx; /* protect block_list */
+ struct prestera_counter_block **block_list;
+ u32 total_read;
+ u32 block_list_len;
+ u32 curr_idx;
+ bool is_fetching;
+};
+
+struct prestera_counter_block {
+ struct list_head list;
+ u32 id;
+ u32 offset;
+ u32 num_counters;
+ u32 client;
+ struct idr counter_idr;
+ refcount_t refcnt;
+ struct mutex mtx; /* protect stats and counter_idr */
+ struct prestera_counter_stats *stats;
+ u8 *counter_flag;
+ bool is_updating;
+ bool full;
+};
+
+enum {
+ COUNTER_FLAG_READY = 0,
+ COUNTER_FLAG_INVALID = 1
+};
+
+static bool
+prestera_counter_is_ready(struct prestera_counter_block *block, u32 id)
+{
+ return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY;
+}
+
+static void prestera_counter_lock(struct prestera_counter *counter)
+{
+ mutex_lock(&counter->mtx);
+}
+
+static void prestera_counter_unlock(struct prestera_counter *counter)
+{
+ mutex_unlock(&counter->mtx);
+}
+
+static void prestera_counter_block_lock(struct prestera_counter_block *block)
+{
+ mutex_lock(&block->mtx);
+}
+
+static void prestera_counter_block_unlock(struct prestera_counter_block *block)
+{
+ mutex_unlock(&block->mtx);
+}
+
+static bool prestera_counter_block_incref(struct prestera_counter_block *block)
+{
+ return refcount_inc_not_zero(&block->refcnt);
+}
+
+static bool prestera_counter_block_decref(struct prestera_counter_block *block)
+{
+ return refcount_dec_and_test(&block->refcnt);
+}
+
+/* must be called with prestera_counter_block_lock() */
+static void prestera_counter_stats_clear(struct prestera_counter_block *block,
+ u32 counter_id)
+{
+ memset(&block->stats[counter_id - block->offset], 0,
+ sizeof(*block->stats));
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_lookup_not_full(struct prestera_counter *counter,
+ u32 client)
+{
+ u32 i;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i] &&
+ counter->block_list[i]->client == client &&
+ !counter->block_list[i]->full &&
+ prestera_counter_block_incref(counter->block_list[i])) {
+ prestera_counter_unlock(counter);
+ return counter->block_list[i];
+ }
+ }
+ prestera_counter_unlock(counter);
+
+ return NULL;
+}
+
+static int prestera_counter_block_list_add(struct prestera_counter *counter,
+ struct prestera_counter_block *block)
+{
+ struct prestera_counter_block **arr;
+ u32 i;
+
+ prestera_counter_lock(counter);
+
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i])
+ continue;
+
+ counter->block_list[i] = block;
+ prestera_counter_unlock(counter);
+ return 0;
+ }
+
+ arr = krealloc(counter->block_list, (counter->block_list_len + 1) *
+ sizeof(*counter->block_list), GFP_KERNEL);
+ if (!arr) {
+ prestera_counter_unlock(counter);
+ return -ENOMEM;
+ }
+
+ counter->block_list = arr;
+ counter->block_list[counter->block_list_len] = block;
+ counter->block_list_len++;
+ prestera_counter_unlock(counter);
+ return 0;
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_get(struct prestera_counter *counter, u32 client)
+{
+ struct prestera_counter_block *block;
+ int err;
+
+ block = prestera_counter_block_lookup_not_full(counter, client);
+ if (block)
+ return block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+
+ err = prestera_hw_counter_block_get(counter->sw, client,
+ &block->id, &block->offset,
+ &block->num_counters);
+ if (err)
+ goto err_block;
+
+ block->stats = kcalloc(block->num_counters,
+ sizeof(*block->stats), GFP_KERNEL);
+ if (!block->stats) {
+ err = -ENOMEM;
+ goto err_stats;
+ }
+
+ block->counter_flag = kcalloc(block->num_counters,
+ sizeof(*block->counter_flag),
+ GFP_KERNEL);
+ if (!block->counter_flag) {
+ err = -ENOMEM;
+ goto err_flag;
+ }
+
+ block->client = client;
+ mutex_init(&block->mtx);
+ refcount_set(&block->refcnt, 1);
+ idr_init_base(&block->counter_idr, block->offset);
+
+ err = prestera_counter_block_list_add(counter, block);
+ if (err)
+ goto err_list_add;
+
+ return block;
+
+err_list_add:
+ idr_destroy(&block->counter_idr);
+ mutex_destroy(&block->mtx);
+ kfree(block->counter_flag);
+err_flag:
+ kfree(block->stats);
+err_stats:
+ prestera_hw_counter_block_release(counter->sw, block->id);
+err_block:
+ kfree(block);
+ return ERR_PTR(err);
+}
+
+static void prestera_counter_block_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block)
+{
+ u32 i;
+
+ if (!prestera_counter_block_decref(block))
+ return;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i] &&
+ counter->block_list[i]->id == block->id) {
+ counter->block_list[i] = NULL;
+ break;
+ }
+ }
+ prestera_counter_unlock(counter);
+
+ WARN_ON(!idr_is_empty(&block->counter_idr));
+
+ prestera_hw_counter_block_release(counter->sw, block->id);
+ idr_destroy(&block->counter_idr);
+ mutex_destroy(&block->mtx);
+ kfree(block->stats);
+ kfree(block);
+}
+
+static int prestera_counter_get_vacant(struct prestera_counter_block *block,
+ u32 *id)
+{
+ int free_id;
+
+ if (block->full)
+ return -ENOSPC;
+
+ prestera_counter_block_lock(block);
+ free_id = idr_alloc_cyclic(&block->counter_idr, NULL, block->offset,
+ block->offset + block->num_counters,
+ GFP_KERNEL);
+ if (free_id < 0) {
+ if (free_id == -ENOSPC)
+ block->full = true;
+
+ prestera_counter_block_unlock(block);
+ return free_id;
+ }
+ *id = free_id;
+ prestera_counter_block_unlock(block);
+
+ return 0;
+}
+
+int prestera_counter_get(struct prestera_counter *counter, u32 client,
+ struct prestera_counter_block **bl, u32 *counter_id)
+{
+ struct prestera_counter_block *block;
+ int err;
+ u32 id;
+
+get_next_block:
+ block = prestera_counter_block_get(counter, client);
+ if (IS_ERR(block))
+ return PTR_ERR(block);
+
+ err = prestera_counter_get_vacant(block, &id);
+ if (err) {
+ prestera_counter_block_put(counter, block);
+
+ if (err == -ENOSPC)
+ goto get_next_block;
+
+ return err;
+ }
+
+ prestera_counter_block_lock(block);
+ if (block->is_updating)
+ block->counter_flag[id - block->offset] = COUNTER_FLAG_INVALID;
+ prestera_counter_block_unlock(block);
+
+ *counter_id = id;
+ *bl = block;
+
+ return 0;
+}
+
+void prestera_counter_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block, u32 counter_id)
+{
+ if (!block)
+ return;
+
+ prestera_counter_block_lock(block);
+ idr_remove(&block->counter_idr, counter_id);
+ block->full = false;
+ prestera_counter_stats_clear(block, counter_id);
+ prestera_counter_block_unlock(block);
+
+ prestera_hw_counter_clear(counter->sw, block->id, counter_id);
+ prestera_counter_block_put(counter, block);
+}
+
+static u32 prestera_counter_block_idx_next(struct prestera_counter *counter,
+ u32 curr_idx)
+{
+ u32 idx, i, start = curr_idx + 1;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ idx = (start + i) % counter->block_list_len;
+ if (!counter->block_list[idx])
+ continue;
+
+ prestera_counter_unlock(counter);
+ return idx;
+ }
+ prestera_counter_unlock(counter);
+
+ return 0;
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx)
+{
+ if (idx >= counter->block_list_len)
+ return NULL;
+
+ prestera_counter_lock(counter);
+
+ if (!counter->block_list[idx] ||
+ !prestera_counter_block_incref(counter->block_list[idx])) {
+ prestera_counter_unlock(counter);
+ return NULL;
+ }
+
+ prestera_counter_unlock(counter);
+ return counter->block_list[idx];
+}
+
+static void prestera_counter_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dl_work =
+ container_of(work, struct delayed_work, work);
+ struct prestera_counter *counter =
+ container_of(dl_work, struct prestera_counter, stats_dw);
+ struct prestera_counter_block *block;
+ u32 resched_time = COUNTER_POLL_TIME;
+ u32 count = COUNTER_BULK_SIZE;
+ bool done = false;
+ int err;
+ u32 i;
+
+ block = prestera_counter_block_get_by_idx(counter, counter->curr_idx);
+ if (!block) {
+ if (counter->is_fetching)
+ goto abort;
+
+ goto next;
+ }
+
+ if (!counter->is_fetching) {
+ err = prestera_hw_counter_trigger(counter->sw, block->id);
+ if (err)
+ goto abort;
+
+ prestera_counter_block_lock(block);
+ block->is_updating = true;
+ prestera_counter_block_unlock(block);
+
+ counter->is_fetching = true;
+ counter->total_read = 0;
+ resched_time = COUNTER_RESCHED_TIME;
+ goto resched;
+ }
+
+ prestera_counter_block_lock(block);
+ err = prestera_hw_counters_get(counter->sw, counter->total_read,
+ &count, &done,
+ &block->stats[counter->total_read]);
+ prestera_counter_block_unlock(block);
+ if (err)
+ goto abort;
+
+ counter->total_read += count;
+ if (!done || counter->total_read < block->num_counters) {
+ resched_time = COUNTER_RESCHED_TIME;
+ goto resched;
+ }
+
+ for (i = 0; i < block->num_counters; i++) {
+ if (block->counter_flag[i] == COUNTER_FLAG_INVALID) {
+ prestera_counter_block_lock(block);
+ block->counter_flag[i] = COUNTER_FLAG_READY;
+ memset(&block->stats[i], 0, sizeof(*block->stats));
+ prestera_counter_block_unlock(block);
+ }
+ }
+
+ prestera_counter_block_lock(block);
+ block->is_updating = false;
+ prestera_counter_block_unlock(block);
+
+ goto next;
+abort:
+ prestera_hw_counter_abort(counter->sw);
+next:
+ counter->is_fetching = false;
+ counter->curr_idx =
+ prestera_counter_block_idx_next(counter, counter->curr_idx);
+resched:
+ if (block)
+ prestera_counter_block_put(counter, block);
+
+ schedule_delayed_work(&counter->stats_dw, resched_time);
+}
+
+/* Can be executed without rtnl_lock().
+ * So pay attention when something changing.
+ */
+int prestera_counter_stats_get(struct prestera_counter *counter,
+ struct prestera_counter_block *block,
+ u32 counter_id, u64 *packets, u64 *bytes)
+{
+ if (!block || !prestera_counter_is_ready(block, counter_id)) {
+ *packets = 0;
+ *bytes = 0;
+ return 0;
+ }
+
+ prestera_counter_block_lock(block);
+ *packets = block->stats[counter_id - block->offset].packets;
+ *bytes = block->stats[counter_id - block->offset].bytes;
+
+ prestera_counter_stats_clear(block, counter_id);
+ prestera_counter_block_unlock(block);
+
+ return 0;
+}
+
+int prestera_counter_init(struct prestera_switch *sw)
+{
+ struct prestera_counter *counter;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return -ENOMEM;
+
+ counter->block_list = kzalloc(sizeof(*counter->block_list), GFP_KERNEL);
+ if (!counter->block_list) {
+ kfree(counter);
+ return -ENOMEM;
+ }
+
+ mutex_init(&counter->mtx);
+ counter->block_list_len = 1;
+ counter->sw = sw;
+ sw->counter = counter;
+
+ INIT_DELAYED_WORK(&counter->stats_dw, prestera_counter_stats_work);
+ schedule_delayed_work(&counter->stats_dw, COUNTER_POLL_TIME);
+
+ return 0;
+}
+
+void prestera_counter_fini(struct prestera_switch *sw)
+{
+ struct prestera_counter *counter = sw->counter;
+ u32 i;
+
+ cancel_delayed_work_sync(&counter->stats_dw);
+
+ for (i = 0; i < counter->block_list_len; i++)
+ WARN_ON(counter->block_list[i]);
+
+ mutex_destroy(&counter->mtx);
+ kfree(counter->block_list);
+ kfree(counter);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.h b/drivers/net/ethernet/marvell/prestera/prestera_counter.h
new file mode 100644
index 000000000..ad6b73907
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2021 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_COUNTER_H_
+#define _PRESTERA_COUNTER_H_
+
+#include <linux/types.h>
+
+struct prestera_counter_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct prestera_switch;
+struct prestera_counter;
+struct prestera_counter_block;
+
+int prestera_counter_init(struct prestera_switch *sw);
+void prestera_counter_fini(struct prestera_switch *sw);
+
+int prestera_counter_get(struct prestera_counter *counter, u32 client,
+ struct prestera_counter_block **block,
+ u32 *counter_id);
+void prestera_counter_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block, u32 counter_id);
+int prestera_counter_stats_get(struct prestera_counter *counter,
+ struct prestera_counter_block *block,
+ u32 counter_id, u64 *packets, u64 *bytes);
+
+#endif /* _PRESTERA_COUNTER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
new file mode 100644
index 000000000..2a4c9df4e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <net/devlink.h>
+
+#include "prestera_devlink.h"
+#include "prestera_hw.h"
+
+/* All driver-specific traps must be documented in
+ * Documentation/networking/devlink/prestera.rst
+ */
+enum {
+ DEVLINK_PRESTERA_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
+ DEVLINK_PRESTERA_TRAP_ID_ARP_BC,
+ DEVLINK_PRESTERA_TRAP_ID_IS_IS,
+ DEVLINK_PRESTERA_TRAP_ID_OSPF,
+ DEVLINK_PRESTERA_TRAP_ID_IP_BC_MAC,
+ DEVLINK_PRESTERA_TRAP_ID_ROUTER_MC,
+ DEVLINK_PRESTERA_TRAP_ID_VRRP,
+ DEVLINK_PRESTERA_TRAP_ID_DHCP,
+ DEVLINK_PRESTERA_TRAP_ID_MAC_TO_ME,
+ DEVLINK_PRESTERA_TRAP_ID_IPV4_OPTIONS,
+ DEVLINK_PRESTERA_TRAP_ID_IP_DEFAULT_ROUTE,
+ DEVLINK_PRESTERA_TRAP_ID_IP_TO_ME,
+ DEVLINK_PRESTERA_TRAP_ID_IPV4_ICMP_REDIRECT,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_0,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_1,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_2,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_3,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_4,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_5,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_6,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_7,
+ DEVLINK_PRESTERA_TRAP_ID_BGP,
+ DEVLINK_PRESTERA_TRAP_ID_SSH,
+ DEVLINK_PRESTERA_TRAP_ID_TELNET,
+ DEVLINK_PRESTERA_TRAP_ID_ICMP,
+ DEVLINK_PRESTERA_TRAP_ID_MET_RED,
+ DEVLINK_PRESTERA_TRAP_ID_IP_SIP_IS_ZERO,
+ DEVLINK_PRESTERA_TRAP_ID_IP_UC_DIP_DA_MISMATCH,
+ DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IPV4_HDR,
+ DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IP_ADDR,
+ DEVLINK_PRESTERA_TRAP_ID_INVALID_SA,
+ DEVLINK_PRESTERA_TRAP_ID_LOCAL_PORT,
+ DEVLINK_PRESTERA_TRAP_ID_PORT_NO_VLAN,
+ DEVLINK_PRESTERA_TRAP_ID_RXDMA_DROP,
+};
+
+#define DEVLINK_PRESTERA_TRAP_NAME_ARP_BC \
+ "arp_bc"
+#define DEVLINK_PRESTERA_TRAP_NAME_IS_IS \
+ "is_is"
+#define DEVLINK_PRESTERA_TRAP_NAME_OSPF \
+ "ospf"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_BC_MAC \
+ "ip_bc_mac"
+#define DEVLINK_PRESTERA_TRAP_NAME_ROUTER_MC \
+ "router_mc"
+#define DEVLINK_PRESTERA_TRAP_NAME_VRRP \
+ "vrrp"
+#define DEVLINK_PRESTERA_TRAP_NAME_DHCP \
+ "dhcp"
+#define DEVLINK_PRESTERA_TRAP_NAME_MAC_TO_ME \
+ "mac_to_me"
+#define DEVLINK_PRESTERA_TRAP_NAME_IPV4_OPTIONS \
+ "ipv4_options"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_DEFAULT_ROUTE \
+ "ip_default_route"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_TO_ME \
+ "ip_to_me"
+#define DEVLINK_PRESTERA_TRAP_NAME_IPV4_ICMP_REDIRECT \
+ "ipv4_icmp_redirect"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_0 \
+ "acl_code_0"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_1 \
+ "acl_code_1"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_2 \
+ "acl_code_2"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_3 \
+ "acl_code_3"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_4 \
+ "acl_code_4"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_5 \
+ "acl_code_5"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_6 \
+ "acl_code_6"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_7 \
+ "acl_code_7"
+#define DEVLINK_PRESTERA_TRAP_NAME_BGP \
+ "bgp"
+#define DEVLINK_PRESTERA_TRAP_NAME_SSH \
+ "ssh"
+#define DEVLINK_PRESTERA_TRAP_NAME_TELNET \
+ "telnet"
+#define DEVLINK_PRESTERA_TRAP_NAME_ICMP \
+ "icmp"
+#define DEVLINK_PRESTERA_TRAP_NAME_RXDMA_DROP \
+ "rxdma_drop"
+#define DEVLINK_PRESTERA_TRAP_NAME_PORT_NO_VLAN \
+ "port_no_vlan"
+#define DEVLINK_PRESTERA_TRAP_NAME_LOCAL_PORT \
+ "local_port"
+#define DEVLINK_PRESTERA_TRAP_NAME_INVALID_SA \
+ "invalid_sa"
+#define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IP_ADDR \
+ "illegal_ip_addr"
+#define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IPV4_HDR \
+ "illegal_ipv4_hdr"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_UC_DIP_DA_MISMATCH \
+ "ip_uc_dip_da_mismatch"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_SIP_IS_ZERO \
+ "ip_sip_is_zero"
+#define DEVLINK_PRESTERA_TRAP_NAME_MET_RED \
+ "met_red"
+
+struct prestera_trap {
+ struct devlink_trap trap;
+ u8 cpu_code;
+};
+
+struct prestera_trap_item {
+ enum devlink_trap_action action;
+ void *trap_ctx;
+};
+
+struct prestera_trap_data {
+ struct prestera_switch *sw;
+ struct prestera_trap_item *trap_items_arr;
+ u32 traps_count;
+};
+
+#define PRESTERA_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+
+#define PRESTERA_TRAP_CONTROL(_id, _group_id, _action) \
+ DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_DRIVER_CONTROL(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(CONTROL, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \
+ DEVLINK_PRESTERA_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_DRIVER_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \
+ DEVLINK_PRESTERA_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_DRIVER_DROP(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_PRESTERA_TRAP_ID_##_id, \
+ DEVLINK_PRESTERA_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+static const struct devlink_trap_group prestera_trap_groups_arr[] = {
+ /* No policer is associated with following groups (policerid == 0)*/
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(NEIGH_DISCOVERY, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(OSPF, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(STP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(LACP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(LLDP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(VRRP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(DHCP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(BGP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(LOCAL_DELIVERY, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 0),
+};
+
+/* Initialize trap list, as well as associate CPU code with them. */
+static struct prestera_trap prestera_trap_items_arr[] = {
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ARP_BC, NEIGH_DISCOVERY),
+ .cpu_code = 5,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(IS_IS, LOCAL_DELIVERY),
+ .cpu_code = 13,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(OSPF, OSPF),
+ .cpu_code = 16,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_BC_MAC, LOCAL_DELIVERY),
+ .cpu_code = 19,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(STP, STP, TRAP),
+ .cpu_code = 26,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(LACP, LACP, TRAP),
+ .cpu_code = 27,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(LLDP, LLDP, TRAP),
+ .cpu_code = 28,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ROUTER_MC, LOCAL_DELIVERY),
+ .cpu_code = 29,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(VRRP, VRRP),
+ .cpu_code = 30,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(DHCP, DHCP),
+ .cpu_code = 33,
+ },
+ {
+ .trap = PRESTERA_TRAP_EXCEPTION(MTU_ERROR, L3_EXCEPTIONS),
+ .cpu_code = 63,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(MAC_TO_ME, LOCAL_DELIVERY),
+ .cpu_code = 65,
+ },
+ {
+ .trap = PRESTERA_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS),
+ .cpu_code = 133,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_OPTIONS,
+ L3_EXCEPTIONS),
+ .cpu_code = 141,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_DEFAULT_ROUTE,
+ LOCAL_DELIVERY),
+ .cpu_code = 160,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(LOCAL_ROUTE, LOCAL_DELIVERY,
+ TRAP),
+ .cpu_code = 161,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_ICMP_REDIRECT,
+ L3_EXCEPTIONS),
+ .cpu_code = 180,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
+ TRAP),
+ .cpu_code = 188,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_0, ACL_TRAP),
+ .cpu_code = 192,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_1, ACL_TRAP),
+ .cpu_code = 193,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_2, ACL_TRAP),
+ .cpu_code = 194,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_3, ACL_TRAP),
+ .cpu_code = 195,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_4, ACL_TRAP),
+ .cpu_code = 196,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_5, ACL_TRAP),
+ .cpu_code = 197,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_6, ACL_TRAP),
+ .cpu_code = 198,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_7, ACL_TRAP),
+ .cpu_code = 199,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(BGP, BGP),
+ .cpu_code = 206,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(SSH, LOCAL_DELIVERY),
+ .cpu_code = 207,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(TELNET, LOCAL_DELIVERY),
+ .cpu_code = 208,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ICMP, LOCAL_DELIVERY),
+ .cpu_code = 209,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(RXDMA_DROP, BUFFER_DROPS),
+ .cpu_code = 37,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(PORT_NO_VLAN, L2_DROPS),
+ .cpu_code = 39,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(LOCAL_PORT, L2_DROPS),
+ .cpu_code = 56,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(INVALID_SA, L2_DROPS),
+ .cpu_code = 60,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IP_ADDR, L3_DROPS),
+ .cpu_code = 136,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IPV4_HDR, L3_DROPS),
+ .cpu_code = 137,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(IP_UC_DIP_DA_MISMATCH,
+ L3_DROPS),
+ .cpu_code = 138,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(IP_SIP_IS_ZERO, L3_DROPS),
+ .cpu_code = 145,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(MET_RED, BUFFER_DROPS),
+ .cpu_code = 185,
+ },
+};
+
+static int prestera_drop_counter_get(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops);
+
+static int prestera_dl_info_get(struct devlink *dl,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_switch *sw = devlink_priv(dl);
+ char buf[16];
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d",
+ sw->dev->fw_rev.maj,
+ sw->dev->fw_rev.min,
+ sw->dev->fw_rev.sub);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+}
+
+static int prestera_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap, void *trap_ctx);
+
+static int prestera_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+
+static const struct devlink_ops prestera_dl_ops = {
+ .info_get = prestera_dl_info_get,
+ .trap_init = prestera_trap_init,
+ .trap_action_set = prestera_trap_action_set,
+ .trap_drop_counter_get = prestera_drop_counter_get,
+};
+
+struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev)
+{
+ struct devlink *dl;
+
+ dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch),
+ dev->dev);
+
+ return devlink_priv(dl);
+}
+
+void prestera_devlink_free(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_free(dl);
+}
+
+void prestera_devlink_register(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_register(dl);
+}
+
+void prestera_devlink_unregister(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_unregister(dl);
+}
+
+int prestera_devlink_port_register(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct devlink *dl = priv_to_devlink(sw);
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = port->fp_id;
+ attrs.switch_id.id_len = sizeof(sw->id);
+ memcpy(attrs.switch_id.id, &sw->id, attrs.switch_id.id_len);
+
+ devlink_port_attrs_set(&port->dl_port, &attrs);
+
+ err = devlink_port_register(dl, &port->dl_port, port->fp_id);
+ if (err) {
+ dev_err(prestera_dev(sw), "devlink_port_register failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+void prestera_devlink_port_unregister(struct prestera_port *port)
+{
+ devlink_port_unregister(&port->dl_port);
+}
+
+int prestera_devlink_traps_register(struct prestera_switch *sw)
+{
+ const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr);
+ const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr);
+ struct devlink *devlink = priv_to_devlink(sw);
+ struct prestera_trap_data *trap_data;
+ struct prestera_trap *prestera_trap;
+ int err, i;
+
+ trap_data = kzalloc(sizeof(*trap_data), GFP_KERNEL);
+ if (!trap_data)
+ return -ENOMEM;
+
+ trap_data->trap_items_arr = kcalloc(traps_count,
+ sizeof(struct prestera_trap_item),
+ GFP_KERNEL);
+ if (!trap_data->trap_items_arr) {
+ err = -ENOMEM;
+ goto err_trap_items_alloc;
+ }
+
+ trap_data->sw = sw;
+ trap_data->traps_count = traps_count;
+ sw->trap_data = trap_data;
+
+ err = devlink_trap_groups_register(devlink, prestera_trap_groups_arr,
+ groups_count);
+ if (err)
+ goto err_groups_register;
+
+ for (i = 0; i < traps_count; i++) {
+ prestera_trap = &prestera_trap_items_arr[i];
+ err = devlink_traps_register(devlink, &prestera_trap->trap, 1,
+ sw);
+ if (err)
+ goto err_trap_register;
+ }
+
+ return 0;
+
+err_trap_register:
+ for (i--; i >= 0; i--) {
+ prestera_trap = &prestera_trap_items_arr[i];
+ devlink_traps_unregister(devlink, &prestera_trap->trap, 1);
+ }
+ devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr,
+ groups_count);
+err_groups_register:
+ kfree(trap_data->trap_items_arr);
+err_trap_items_alloc:
+ kfree(trap_data);
+ return err;
+}
+
+static struct prestera_trap_item *
+prestera_get_trap_item_by_cpu_code(struct prestera_switch *sw, u8 cpu_code)
+{
+ struct prestera_trap_data *trap_data = sw->trap_data;
+ struct prestera_trap *prestera_trap;
+ int i;
+
+ for (i = 0; i < trap_data->traps_count; i++) {
+ prestera_trap = &prestera_trap_items_arr[i];
+ if (cpu_code == prestera_trap->cpu_code)
+ return &trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+void prestera_devlink_trap_report(struct prestera_port *port,
+ struct sk_buff *skb, u8 cpu_code)
+{
+ struct prestera_trap_item *trap_item;
+ struct devlink *devlink;
+
+ devlink = port->dl_port.devlink;
+
+ trap_item = prestera_get_trap_item_by_cpu_code(port->sw, cpu_code);
+ if (unlikely(!trap_item))
+ return;
+
+ devlink_trap_report(devlink, skb, trap_item->trap_ctx,
+ &port->dl_port, NULL);
+}
+
+static struct prestera_trap_item *
+prestera_devlink_trap_item_lookup(struct prestera_switch *sw, u16 trap_id)
+{
+ struct prestera_trap_data *trap_data = sw->trap_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); i++) {
+ if (prestera_trap_items_arr[i].trap.id == trap_id)
+ return &trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static int prestera_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ struct prestera_switch *sw = devlink_priv(devlink);
+ struct prestera_trap_item *trap_item;
+
+ trap_item = prestera_devlink_trap_item_lookup(sw, trap->id);
+ if (WARN_ON(!trap_item))
+ return -EINVAL;
+
+ trap_item->trap_ctx = trap_ctx;
+ trap_item->action = trap->init_action;
+
+ return 0;
+}
+
+static int prestera_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ /* Currently, driver does not support trap action altering */
+ return -EOPNOTSUPP;
+}
+
+static int prestera_drop_counter_get(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops)
+{
+ struct prestera_switch *sw = devlink_priv(devlink);
+ enum prestera_hw_cpu_code_cnt_t cpu_code_type =
+ PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP;
+ struct prestera_trap *prestera_trap =
+ container_of(trap, struct prestera_trap, trap);
+
+ return prestera_hw_cpu_code_counters_get(sw, prestera_trap->cpu_code,
+ cpu_code_type, p_drops);
+}
+
+void prestera_devlink_traps_unregister(struct prestera_switch *sw)
+{
+ struct prestera_trap_data *trap_data = sw->trap_data;
+ struct devlink *dl = priv_to_devlink(sw);
+ const struct devlink_trap *trap;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); ++i) {
+ trap = &prestera_trap_items_arr[i].trap;
+ devlink_traps_unregister(dl, trap, 1);
+ }
+
+ devlink_trap_groups_unregister(dl, prestera_trap_groups_arr,
+ ARRAY_SIZE(prestera_trap_groups_arr));
+ kfree(trap_data->trap_items_arr);
+ kfree(trap_data);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
new file mode 100644
index 000000000..bf84ad6fd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_DEVLINK_H_
+#define _PRESTERA_DEVLINK_H_
+
+#include "prestera.h"
+
+struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev);
+void prestera_devlink_free(struct prestera_switch *sw);
+
+void prestera_devlink_register(struct prestera_switch *sw);
+void prestera_devlink_unregister(struct prestera_switch *sw);
+
+int prestera_devlink_port_register(struct prestera_port *port);
+void prestera_devlink_port_unregister(struct prestera_port *port);
+
+void prestera_devlink_trap_report(struct prestera_port *port,
+ struct sk_buff *skb, u8 cpu_code);
+int prestera_devlink_traps_register(struct prestera_switch *sw);
+void prestera_devlink_traps_unregister(struct prestera_switch *sw);
+
+#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
new file mode 100644
index 000000000..b7e89c0ca
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include "prestera_dsa.h"
+
+#define PRESTERA_DSA_W0_CMD GENMASK(31, 30)
+#define PRESTERA_DSA_W0_IS_TAGGED BIT(29)
+#define PRESTERA_DSA_W0_DEV_NUM GENMASK(28, 24)
+#define PRESTERA_DSA_W0_PORT_NUM GENMASK(23, 19)
+#define PRESTERA_DSA_W0_VPT GENMASK(15, 13)
+#define PRESTERA_DSA_W0_EXT_BIT BIT(12)
+#define PRESTERA_DSA_W0_VID GENMASK(11, 0)
+
+#define PRESTERA_DSA_W1_EXT_BIT BIT(31)
+#define PRESTERA_DSA_W1_CFI_BIT BIT(30)
+#define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10)
+#define PRESTERA_DSA_W1_MASK_CPU_CODE GENMASK(7, 0)
+
+#define PRESTERA_DSA_W2_EXT_BIT BIT(31)
+#define PRESTERA_DSA_W2_PORT_NUM BIT(20)
+
+#define PRESTERA_DSA_W3_VID GENMASK(30, 27)
+#define PRESTERA_DSA_W3_DST_EPORT GENMASK(23, 7)
+#define PRESTERA_DSA_W3_DEV_NUM GENMASK(6, 0)
+
+#define PRESTERA_DSA_VID GENMASK(15, 12)
+#define PRESTERA_DSA_DEV_NUM GENMASK(11, 5)
+
+int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf)
+{
+ __be32 *dsa_words = (__be32 *)dsa_buf;
+ enum prestera_dsa_cmd cmd;
+ u32 words[4];
+ u32 field;
+
+ words[0] = ntohl(dsa_words[0]);
+ words[1] = ntohl(dsa_words[1]);
+ words[2] = ntohl(dsa_words[2]);
+ words[3] = ntohl(dsa_words[3]);
+
+ /* set the common parameters */
+ cmd = (enum prestera_dsa_cmd)FIELD_GET(PRESTERA_DSA_W0_CMD, words[0]);
+
+ /* only to CPU is supported */
+ if (unlikely(cmd != PRESTERA_DSA_CMD_TO_CPU))
+ return -EINVAL;
+
+ if (FIELD_GET(PRESTERA_DSA_W0_EXT_BIT, words[0]) == 0)
+ return -EINVAL;
+ if (FIELD_GET(PRESTERA_DSA_W1_EXT_BIT, words[1]) == 0)
+ return -EINVAL;
+ if (FIELD_GET(PRESTERA_DSA_W2_EXT_BIT, words[2]) == 0)
+ return -EINVAL;
+
+ field = FIELD_GET(PRESTERA_DSA_W3_VID, words[3]);
+
+ dsa->vlan.is_tagged = FIELD_GET(PRESTERA_DSA_W0_IS_TAGGED, words[0]);
+ dsa->vlan.cfi_bit = FIELD_GET(PRESTERA_DSA_W1_CFI_BIT, words[1]);
+ dsa->vlan.vpt = FIELD_GET(PRESTERA_DSA_W0_VPT, words[0]);
+ dsa->vlan.vid = FIELD_GET(PRESTERA_DSA_W0_VID, words[0]);
+ dsa->vlan.vid &= ~PRESTERA_DSA_VID;
+ dsa->vlan.vid |= FIELD_PREP(PRESTERA_DSA_VID, field);
+
+ field = FIELD_GET(PRESTERA_DSA_W3_DEV_NUM, words[3]);
+
+ dsa->hw_dev_num = FIELD_GET(PRESTERA_DSA_W0_DEV_NUM, words[0]);
+ dsa->hw_dev_num |= FIELD_PREP(PRESTERA_DSA_DEV_NUM, field);
+
+ dsa->port_num = (FIELD_GET(PRESTERA_DSA_W0_PORT_NUM, words[0]) << 0) |
+ (FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) |
+ (FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7);
+
+ dsa->cpu_code = FIELD_GET(PRESTERA_DSA_W1_MASK_CPU_CODE, words[1]);
+
+ return 0;
+}
+
+int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf)
+{
+ __be32 *dsa_words = (__be32 *)dsa_buf;
+ u32 dev_num = dsa->hw_dev_num;
+ u32 words[4] = { 0 };
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_CMD, PRESTERA_DSA_CMD_FROM_CPU);
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_DEV_NUM, dev_num);
+ dev_num = FIELD_GET(PRESTERA_DSA_DEV_NUM, dev_num);
+ words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DEV_NUM, dev_num);
+
+ words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DST_EPORT, dsa->port_num);
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_EXT_BIT, 1);
+ words[1] |= FIELD_PREP(PRESTERA_DSA_W1_EXT_BIT, 1);
+ words[2] |= FIELD_PREP(PRESTERA_DSA_W2_EXT_BIT, 1);
+
+ dsa_words[0] = htonl(words[0]);
+ dsa_words[1] = htonl(words[1]);
+ dsa_words[2] = htonl(words[2]);
+ dsa_words[3] = htonl(words[3]);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
new file mode 100644
index 000000000..c99342f47
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef __PRESTERA_DSA_H_
+#define __PRESTERA_DSA_H_
+
+#include <linux/types.h>
+
+#define PRESTERA_DSA_HLEN 16
+
+enum prestera_dsa_cmd {
+ /* DSA command is "To CPU" */
+ PRESTERA_DSA_CMD_TO_CPU = 0,
+
+ /* DSA command is "From CPU" */
+ PRESTERA_DSA_CMD_FROM_CPU,
+};
+
+struct prestera_dsa_vlan {
+ u16 vid;
+ u8 vpt;
+ u8 cfi_bit;
+ bool is_tagged;
+};
+
+struct prestera_dsa {
+ struct prestera_dsa_vlan vlan;
+ u32 hw_dev_num;
+ u32 port_num;
+ u8 cpu_code;
+};
+
+int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf);
+int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf);
+
+#endif /* _PRESTERA_DSA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
new file mode 100644
index 000000000..2f52daba5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -0,0 +1,802 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "prestera_ethtool.h"
+#include "prestera.h"
+#include "prestera_hw.h"
+
+#define PRESTERA_STATS_CNT \
+ (sizeof(struct prestera_port_stats) / sizeof(u64))
+#define PRESTERA_STATS_IDX(name) \
+ (offsetof(struct prestera_port_stats, name) / sizeof(u64))
+#define PRESTERA_STATS_FIELD(name) \
+ [PRESTERA_STATS_IDX(name)] = __stringify(name)
+
+static const char driver_kind[] = "prestera";
+
+static const struct prestera_link_mode {
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u32 speed;
+ u64 pr_mask;
+ u8 duplex;
+ u8 port_type;
+} port_link_modes[PRESTERA_LINK_MODE_MAX] = {
+ [PRESTERA_LINK_MODE_10baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ .speed = 10,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_10baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ .speed = 10,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ .speed = 100,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = 100,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_1000baseKX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseKX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_2500baseX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ .speed = 2500,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_2500baseX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ },
+ [PRESTERA_LINK_MODE_10GbaseKR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseKR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_10GbaseSR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseSR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_10GbaseLR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseLR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_20GbaseKR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ .speed = 20000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_20GbaseKR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_25GbaseCR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseCR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_25GbaseKR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseKR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_25GbaseSR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseSR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_40GbaseKR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseKR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_40GbaseCR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseCR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_40GbaseSR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseSR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_50GbaseCR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseCR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_50GbaseKR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseKR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_50GbaseSR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseSR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_100GbaseKR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseKR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100GbaseSR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseSR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_100GbaseCR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseCR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ }
+};
+
+static const struct prestera_fec {
+ u32 eth_fec;
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u8 pr_fec;
+} port_fec_caps[PRESTERA_PORT_FEC_MAX] = {
+ [PRESTERA_PORT_FEC_OFF] = {
+ .eth_fec = ETHTOOL_FEC_OFF,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_OFF,
+ },
+ [PRESTERA_PORT_FEC_BASER] = {
+ .eth_fec = ETHTOOL_FEC_BASER,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_BASER,
+ },
+ [PRESTERA_PORT_FEC_RS] = {
+ .eth_fec = ETHTOOL_FEC_RS,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_RS,
+ }
+};
+
+static const struct prestera_port_type {
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u8 eth_type;
+} port_types[PRESTERA_PORT_TYPE_MAX] = {
+ [PRESTERA_PORT_TYPE_NONE] = {
+ .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS,
+ .eth_type = PORT_NONE,
+ },
+ [PRESTERA_PORT_TYPE_TP] = {
+ .eth_mode = ETHTOOL_LINK_MODE_TP_BIT,
+ .eth_type = PORT_TP,
+ },
+ [PRESTERA_PORT_TYPE_AUI] = {
+ .eth_mode = ETHTOOL_LINK_MODE_AUI_BIT,
+ .eth_type = PORT_AUI,
+ },
+ [PRESTERA_PORT_TYPE_MII] = {
+ .eth_mode = ETHTOOL_LINK_MODE_MII_BIT,
+ .eth_type = PORT_MII,
+ },
+ [PRESTERA_PORT_TYPE_FIBRE] = {
+ .eth_mode = ETHTOOL_LINK_MODE_FIBRE_BIT,
+ .eth_type = PORT_FIBRE,
+ },
+ [PRESTERA_PORT_TYPE_BNC] = {
+ .eth_mode = ETHTOOL_LINK_MODE_BNC_BIT,
+ .eth_type = PORT_BNC,
+ },
+ [PRESTERA_PORT_TYPE_DA] = {
+ .eth_mode = ETHTOOL_LINK_MODE_TP_BIT,
+ .eth_type = PORT_TP,
+ },
+ [PRESTERA_PORT_TYPE_OTHER] = {
+ .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS,
+ .eth_type = PORT_OTHER,
+ }
+};
+
+static const char prestera_cnt_name[PRESTERA_STATS_CNT][ETH_GSTRING_LEN] = {
+ PRESTERA_STATS_FIELD(good_octets_received),
+ PRESTERA_STATS_FIELD(bad_octets_received),
+ PRESTERA_STATS_FIELD(mac_trans_error),
+ PRESTERA_STATS_FIELD(broadcast_frames_received),
+ PRESTERA_STATS_FIELD(multicast_frames_received),
+ PRESTERA_STATS_FIELD(frames_64_octets),
+ PRESTERA_STATS_FIELD(frames_65_to_127_octets),
+ PRESTERA_STATS_FIELD(frames_128_to_255_octets),
+ PRESTERA_STATS_FIELD(frames_256_to_511_octets),
+ PRESTERA_STATS_FIELD(frames_512_to_1023_octets),
+ PRESTERA_STATS_FIELD(frames_1024_to_max_octets),
+ PRESTERA_STATS_FIELD(excessive_collision),
+ PRESTERA_STATS_FIELD(multicast_frames_sent),
+ PRESTERA_STATS_FIELD(broadcast_frames_sent),
+ PRESTERA_STATS_FIELD(fc_sent),
+ PRESTERA_STATS_FIELD(fc_received),
+ PRESTERA_STATS_FIELD(buffer_overrun),
+ PRESTERA_STATS_FIELD(undersize),
+ PRESTERA_STATS_FIELD(fragments),
+ PRESTERA_STATS_FIELD(oversize),
+ PRESTERA_STATS_FIELD(jabber),
+ PRESTERA_STATS_FIELD(rx_error_frame_received),
+ PRESTERA_STATS_FIELD(bad_crc),
+ PRESTERA_STATS_FIELD(collisions),
+ PRESTERA_STATS_FIELD(late_collision),
+ PRESTERA_STATS_FIELD(unicast_frames_received),
+ PRESTERA_STATS_FIELD(unicast_frames_sent),
+ PRESTERA_STATS_FIELD(sent_multiple),
+ PRESTERA_STATS_FIELD(sent_deferred),
+ PRESTERA_STATS_FIELD(good_octets_sent),
+};
+
+static void prestera_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_switch *sw = port->sw;
+
+ strscpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ sw->dev->fw_rev.maj,
+ sw->dev->fw_rev.min,
+ sw->dev->fw_rev.sub);
+}
+
+static u8 prestera_port_type_get(struct prestera_port *port)
+{
+ if (port->caps.type < PRESTERA_PORT_TYPE_MAX)
+ return port_types[port->caps.type].eth_type;
+
+ return PORT_OTHER;
+}
+
+static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 new_mode = PRESTERA_LINK_MODE_MAX;
+ u32 type, mode;
+
+ for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) {
+ if (port_types[type].eth_type == ecmd->base.port &&
+ test_bit(port_types[type].eth_mode,
+ ecmd->link_modes.supported)) {
+ break;
+ }
+ }
+
+ if (type == port->caps.type)
+ return 0;
+ if (type != port->caps.type && ecmd->base.autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+ if (type == PRESTERA_PORT_TYPE_MAX)
+ return -EOPNOTSUPP;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes) &&
+ type == port_link_modes[mode].port_type) {
+ new_mode = mode;
+ }
+ }
+
+ if (new_mode >= PRESTERA_LINK_MODE_MAX)
+ return -EINVAL;
+
+ port->caps.type = type;
+ port->autoneg = false;
+
+ return 0;
+}
+
+static void prestera_modes_to_eth(unsigned long *eth_modes, u64 link_modes,
+ u8 fec, u8 type)
+{
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask & link_modes) == 0)
+ continue;
+
+ if (type != PRESTERA_PORT_TYPE_NONE &&
+ port_link_modes[mode].port_type != type)
+ continue;
+
+ __set_bit(port_link_modes[mode].eth_mode, eth_modes);
+ }
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].pr_fec & fec) == 0)
+ continue;
+
+ __set_bit(port_fec_caps[mode].eth_mode, eth_modes);
+ }
+}
+
+static void prestera_modes_from_eth(const unsigned long *eth_modes,
+ u64 *link_modes, u8 *fec, u8 type)
+{
+ u64 adver_modes = 0;
+ u32 fec_modes = 0;
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if (!test_bit(port_link_modes[mode].eth_mode, eth_modes))
+ continue;
+
+ if (port_link_modes[mode].port_type != type)
+ continue;
+
+ adver_modes |= port_link_modes[mode].pr_mask;
+ }
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if (!test_bit(port_fec_caps[mode].eth_mode, eth_modes))
+ continue;
+
+ fec_modes |= port_fec_caps[mode].pr_fec;
+ }
+
+ *link_modes = adver_modes;
+ *fec = fec_modes;
+}
+
+static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 mode;
+ u8 ptype;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes) == 0)
+ continue;
+
+ ptype = port_link_modes[mode].port_type;
+ __set_bit(port_types[ptype].eth_mode,
+ ecmd->link_modes.supported);
+ }
+}
+
+static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ struct prestera_port_phy_state *state = &port->state_phy;
+ bool asym_pause;
+ bool pause;
+ u64 bitmap;
+ int err;
+
+ err = prestera_hw_port_phy_mode_get(port, NULL, &state->lmode_bmap,
+ &state->remote_fc.pause,
+ &state->remote_fc.asym_pause);
+ if (err)
+ netdev_warn(port->dev, "Remote link caps get failed %d",
+ port->caps.transceiver);
+
+ bitmap = state->lmode_bmap;
+
+ prestera_modes_to_eth(ecmd->link_modes.lp_advertising,
+ bitmap, 0, PRESTERA_PORT_TYPE_NONE);
+
+ if (!bitmap_empty(ecmd->link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Autoneg);
+ }
+
+ pause = state->remote_fc.pause;
+ asym_pause = state->remote_fc.asym_pause;
+
+ if (pause)
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Pause);
+ if (asym_pause)
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Asym_Pause);
+}
+
+static void prestera_port_link_mode_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ struct prestera_port_mac_state *state = &port->state_mac;
+ u32 speed;
+ u8 duplex;
+ int err;
+
+ if (!port->state_mac.oper)
+ return;
+
+ if (state->speed == SPEED_UNKNOWN || state->duplex == DUPLEX_UNKNOWN) {
+ err = prestera_hw_port_mac_mode_get(port, NULL, &speed,
+ &duplex, NULL);
+ if (err) {
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ } else {
+ state->speed = speed;
+ state->duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ }
+
+ ecmd->base.speed = port->state_mac.speed;
+ ecmd->base.duplex = port->state_mac.duplex;
+}
+
+static void prestera_port_mdix_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ struct prestera_port_phy_state *state = &port->state_phy;
+
+ if (prestera_hw_port_phy_mode_get(port,
+ &state->mdix, NULL, NULL, NULL)) {
+ netdev_warn(port->dev, "MDIX params get failed");
+ state->mdix = ETH_TP_MDI_INVALID;
+ }
+
+ ecmd->base.eth_tp_mdix = port->state_phy.mdix;
+ ecmd->base.eth_tp_mdix_ctrl = port->cfg_phy.mdix;
+}
+
+static int
+prestera_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising);
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+
+ if (port->phy_link)
+ return phylink_ethtool_ksettings_get(port->phy_link, ecmd);
+
+ ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ if (port->caps.type == PRESTERA_PORT_TYPE_TP) {
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Autoneg);
+
+ if (netif_running(dev) &&
+ (port->autoneg ||
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER))
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ Autoneg);
+ }
+
+ prestera_modes_to_eth(ecmd->link_modes.supported,
+ port->caps.supp_link_modes,
+ port->caps.supp_fec,
+ port->caps.type);
+
+ prestera_port_supp_types_get(ecmd, port);
+
+ if (netif_carrier_ok(dev))
+ prestera_port_link_mode_get(ecmd, port);
+
+ ecmd->base.port = prestera_port_type_get(port);
+
+ if (port->autoneg) {
+ if (netif_running(dev))
+ prestera_modes_to_eth(ecmd->link_modes.advertising,
+ port->adver_link_modes,
+ port->adver_fec,
+ port->caps.type);
+
+ if (netif_carrier_ok(dev) &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
+ prestera_port_remote_cap_get(ecmd, port);
+ }
+
+ if (port->caps.type == PRESTERA_PORT_TYPE_TP &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
+ prestera_port_mdix_get(ecmd, port);
+
+ return 0;
+}
+
+static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP) {
+ port->cfg_phy.mdix = ecmd->base.eth_tp_mdix_ctrl;
+ return prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin,
+ port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
+ return 0;
+
+}
+
+static int prestera_port_link_mode_set(struct prestera_port *port,
+ u32 speed, u8 duplex, u8 type)
+{
+ u32 new_mode = PRESTERA_LINK_MODE_MAX;
+ u32 mode;
+ int err;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if (speed != SPEED_UNKNOWN &&
+ speed != port_link_modes[mode].speed)
+ continue;
+
+ if (duplex != DUPLEX_UNKNOWN &&
+ duplex != port_link_modes[mode].duplex)
+ continue;
+
+ if (!(port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes))
+ continue;
+
+ if (type != port_link_modes[mode].port_type)
+ continue;
+
+ new_mode = mode;
+ break;
+ }
+
+ if (new_mode == PRESTERA_LINK_MODE_MAX)
+ return -EOPNOTSUPP;
+
+ err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin,
+ false, new_mode, 0,
+ port->cfg_phy.mdix);
+ if (err)
+ return err;
+
+ port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
+ port->adver_link_modes = 0;
+ port->cfg_phy.mode = new_mode;
+ port->autoneg = false;
+
+ return 0;
+}
+
+static int
+prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u8 duplex = DUPLEX_UNKNOWN;
+
+ if (ecmd->base.duplex != DUPLEX_UNKNOWN)
+ duplex = ecmd->base.duplex == DUPLEX_FULL ?
+ PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF;
+
+ return prestera_port_link_mode_set(port, ecmd->base.speed, duplex,
+ port->caps.type);
+}
+
+static int
+prestera_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u64 adver_modes;
+ u8 adver_fec;
+ int err;
+
+ if (port->phy_link)
+ return phylink_ethtool_ksettings_set(port->phy_link, ecmd);
+
+ err = prestera_port_type_set(ecmd, port);
+ if (err)
+ return err;
+
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) {
+ err = prestera_port_mdix_set(ecmd, port);
+ if (err)
+ return err;
+ }
+
+ prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes,
+ &adver_fec, port->caps.type);
+
+ if (ecmd->base.autoneg == AUTONEG_ENABLE)
+ err = prestera_port_autoneg_set(port, adver_modes);
+ else
+ err = prestera_port_speed_duplex_set(ecmd, port);
+
+ return err;
+}
+
+static int prestera_ethtool_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u8 active;
+ u32 mode;
+ int err;
+
+ err = prestera_hw_port_mac_mode_get(port, NULL, NULL, NULL, &active);
+ if (err)
+ return err;
+
+ fecparam->fec = 0;
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].pr_fec & port->caps.supp_fec) == 0)
+ continue;
+
+ fecparam->fec |= port_fec_caps[mode].eth_fec;
+ }
+
+ if (active < PRESTERA_PORT_FEC_MAX)
+ fecparam->active_fec = port_fec_caps[active].eth_fec;
+ else
+ fecparam->active_fec = ETHTOOL_FEC_AUTO;
+
+ return 0;
+}
+
+static int prestera_ethtool_set_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_mac_config cfg_mac;
+ u32 mode;
+ u8 fec;
+
+ if (port->autoneg) {
+ netdev_err(dev, "FEC set is not allowed while autoneg is on\n");
+ return -EINVAL;
+ }
+
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ netdev_err(dev, "FEC set is not allowed on non-SFP ports\n");
+ return -EINVAL;
+ }
+
+ fec = PRESTERA_PORT_FEC_MAX;
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].eth_fec & fecparam->fec) &&
+ (port_fec_caps[mode].pr_fec & port->caps.supp_fec)) {
+ fec = mode;
+ break;
+ }
+ }
+
+ prestera_port_cfg_mac_read(port, &cfg_mac);
+
+ if (fec == cfg_mac.fec)
+ return 0;
+
+ if (fec == PRESTERA_PORT_FEC_MAX) {
+ netdev_err(dev, "Unsupported FEC requested");
+ return -EINVAL;
+ }
+
+ cfg_mac.fec = fec;
+
+ return prestera_port_cfg_mac_write(port, &cfg_mac);
+}
+
+static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return PRESTERA_STATS_CNT;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void prestera_ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, prestera_cnt_name, sizeof(prestera_cnt_name));
+}
+
+static void prestera_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_stats *port_stats;
+
+ port_stats = &port->cached_hw_stats.stats;
+
+ memcpy(data, port_stats, sizeof(*port_stats));
+}
+
+static int prestera_ethtool_nway_reset(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ if (netif_running(dev) &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP)
+ return prestera_hw_port_autoneg_restart(port);
+
+ return -EINVAL;
+}
+
+const struct ethtool_ops prestera_ethtool_ops = {
+ .get_drvinfo = prestera_ethtool_get_drvinfo,
+ .get_link_ksettings = prestera_ethtool_get_link_ksettings,
+ .set_link_ksettings = prestera_ethtool_set_link_ksettings,
+ .get_fecparam = prestera_ethtool_get_fecparam,
+ .set_fecparam = prestera_ethtool_set_fecparam,
+ .get_sset_count = prestera_ethtool_get_sset_count,
+ .get_strings = prestera_ethtool_get_strings,
+ .get_ethtool_stats = prestera_ethtool_get_stats,
+ .get_link = ethtool_op_get_link,
+ .nway_reset = prestera_ethtool_nway_reset
+};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
new file mode 100644
index 000000000..bd5600886
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef __PRESTERA_ETHTOOL_H_
+#define __PRESTERA_ETHTOOL_H_
+
+#include <linux/ethtool.h>
+
+struct prestera_port_event;
+struct prestera_port;
+
+extern const struct ethtool_ops prestera_ethtool_ops;
+
+#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
new file mode 100644
index 000000000..9f4267f32
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_acl.h"
+#include "prestera_flow.h"
+#include "prestera_flower.h"
+#include "prestera_matchall.h"
+#include "prestera_span.h"
+
+static LIST_HEAD(prestera_block_cb_list);
+
+static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return prestera_mall_replace(block, f);
+ case TC_CLSMATCHALL_DESTROY:
+ prestera_mall_destroy(block);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return prestera_flower_replace(block, f);
+ case FLOW_CLS_DESTROY:
+ prestera_flower_destroy(block, f);
+ return 0;
+ case FLOW_CLS_STATS:
+ return prestera_flower_stats(block, f);
+ case FLOW_CLS_TMPLT_CREATE:
+ return prestera_flower_tmplt_create(block, f);
+ case FLOW_CLS_TMPLT_DESTROY:
+ prestera_flower_tmplt_destroy(block, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_flow_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct prestera_flow_block *block = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return prestera_flow_block_flower_cb(block, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return prestera_flow_block_mall_cb(block, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void prestera_flow_block_destroy(void *cb_priv)
+{
+ struct prestera_flow_block *block = cb_priv;
+
+ prestera_flower_template_cleanup(block);
+
+ WARN_ON(!list_empty(&block->template_list));
+ WARN_ON(!list_empty(&block->binding_list));
+
+ kfree(block);
+}
+
+static struct prestera_flow_block *
+prestera_flow_block_create(struct prestera_switch *sw,
+ struct net *net,
+ bool ingress)
+{
+ struct prestera_flow_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+
+ INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->template_list);
+ block->net = net;
+ block->sw = sw;
+ block->mall.prio_min = UINT_MAX;
+ block->mall.prio_max = 0;
+ block->mall.bound = false;
+ block->ingress = ingress;
+
+ return block;
+}
+
+static void prestera_flow_block_release(void *cb_priv)
+{
+ struct prestera_flow_block *block = cb_priv;
+
+ prestera_flow_block_destroy(block);
+}
+
+static bool
+prestera_flow_block_is_bound(const struct prestera_flow_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static struct prestera_flow_block_binding *
+prestera_flow_block_lookup(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->port == port)
+ return binding;
+
+ return NULL;
+}
+
+static int prestera_flow_block_bind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+ int err;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+
+ binding->span_id = PRESTERA_SPAN_INVALID_ID;
+ binding->port = port;
+
+ if (prestera_flow_block_is_bound(block)) {
+ err = prestera_acl_ruleset_bind(block->ruleset_zero, port);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+ return err;
+}
+
+static int prestera_flow_block_unbind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ binding = prestera_flow_block_lookup(block, port);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (prestera_flow_block_is_bound(block))
+ prestera_acl_ruleset_unbind(block->ruleset_zero, port);
+
+ kfree(binding);
+ return 0;
+}
+
+static struct prestera_flow_block *
+prestera_flow_block_get(struct prestera_switch *sw,
+ struct flow_block_offload *f,
+ bool *register_block,
+ bool ingress)
+{
+ struct prestera_flow_block *block;
+ struct flow_block_cb *block_cb;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ prestera_flow_block_cb, sw);
+ if (!block_cb) {
+ block = prestera_flow_block_create(sw, f->net, ingress);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+
+ block_cb = flow_block_cb_alloc(prestera_flow_block_cb,
+ sw, block,
+ prestera_flow_block_release);
+ if (IS_ERR(block_cb)) {
+ prestera_flow_block_destroy(block);
+ return ERR_CAST(block_cb);
+ }
+
+ block->block_cb = block_cb;
+ *register_block = true;
+ } else {
+ block = flow_block_cb_priv(block_cb);
+ *register_block = false;
+ }
+
+ flow_block_cb_incref(block_cb);
+
+ return block;
+}
+
+static void prestera_flow_block_put(struct prestera_flow_block *block)
+{
+ struct flow_block_cb *block_cb = block->block_cb;
+
+ if (flow_block_cb_decref(block_cb))
+ return;
+
+ flow_block_cb_free(block_cb);
+ prestera_flow_block_destroy(block);
+}
+
+static int prestera_setup_flow_block_bind(struct prestera_port *port,
+ struct flow_block_offload *f, bool ingress)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_flow_block *block;
+ struct flow_block_cb *block_cb;
+ bool register_block;
+ int err;
+
+ block = prestera_flow_block_get(sw, f, &register_block, ingress);
+ if (IS_ERR(block))
+ return PTR_ERR(block);
+
+ block_cb = block->block_cb;
+
+ err = prestera_flow_block_bind(block, port);
+ if (err)
+ goto err_block_bind;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &prestera_block_cb_list);
+ }
+
+ if (ingress)
+ port->ingress_flow_block = block;
+ else
+ port->egress_flow_block = block;
+
+ return 0;
+
+err_block_bind:
+ prestera_flow_block_put(block);
+
+ return err;
+}
+
+static void prestera_setup_flow_block_unbind(struct prestera_port *port,
+ struct flow_block_offload *f, bool ingress)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_flow_block *block;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, prestera_flow_block_cb, sw);
+ if (!block_cb)
+ return;
+
+ block = flow_block_cb_priv(block_cb);
+
+ prestera_mall_destroy(block);
+
+ err = prestera_flow_block_unbind(block, port);
+ if (err)
+ goto error;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+error:
+ if (ingress)
+ port->ingress_flow_block = NULL;
+ else
+ port->egress_flow_block = NULL;
+}
+
+static int prestera_setup_flow_block_clsact(struct prestera_port *port,
+ struct flow_block_offload *f,
+ bool ingress)
+{
+ f->driver_block_list = &prestera_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return prestera_setup_flow_block_bind(port, f, ingress);
+ case FLOW_BLOCK_UNBIND:
+ prestera_setup_flow_block_unbind(port, f, ingress);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ switch (f->binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ return prestera_setup_flow_block_clsact(port, f, true);
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ return prestera_setup_flow_block_clsact(port, f, false);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
new file mode 100644
index 000000000..a85a3eb40
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_FLOW_H_
+#define _PRESTERA_FLOW_H_
+
+#include <net/flow_offload.h>
+
+struct prestera_port;
+struct prestera_switch;
+
+struct prestera_flow_block_binding {
+ struct list_head list;
+ struct prestera_port *port;
+ int span_id;
+};
+
+struct prestera_flow_block {
+ struct list_head binding_list;
+ struct prestera_switch *sw;
+ struct net *net;
+ struct prestera_acl_ruleset *ruleset_zero;
+ struct flow_block_cb *block_cb;
+ struct list_head template_list;
+ struct {
+ u32 prio_min;
+ u32 prio_max;
+ bool bound;
+ } mall;
+ unsigned int rule_count;
+ bool ingress;
+};
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f);
+
+#endif /* _PRESTERA_FLOW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
new file mode 100644
index 000000000..8b9455d8a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include "prestera.h"
+#include "prestera_acl.h"
+#include "prestera_flow.h"
+#include "prestera_flower.h"
+#include "prestera_matchall.h"
+
+struct prestera_flower_template {
+ struct prestera_acl_ruleset *ruleset;
+ struct list_head list;
+ u32 chain_index;
+};
+
+static void
+prestera_flower_template_free(struct prestera_flower_template *template)
+{
+ prestera_acl_ruleset_put(template->ruleset);
+ list_del(&template->list);
+ kfree(template);
+}
+
+void prestera_flower_template_cleanup(struct prestera_flow_block *block)
+{
+ struct prestera_flower_template *template, *tmp;
+
+ /* put the reference to all rulesets kept in tmpl create */
+ list_for_each_entry_safe(template, tmp, &block->template_list, list)
+ prestera_flower_template_free(template);
+}
+
+static int
+prestera_flower_parse_goto_action(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ u32 chain_index,
+ const struct flow_action_entry *act)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ if (act->chain_index <= chain_index)
+ /* we can jump only forward */
+ return -EINVAL;
+
+ if (rule->re_arg.jump.valid)
+ return -EEXIST;
+
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ act->chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule->re_arg.jump.valid = 1;
+ rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
+
+ rule->jump_ruleset = ruleset;
+
+ return 0;
+}
+
+static int prestera_flower_parse_actions(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ struct flow_action *flow_action,
+ u32 chain_index,
+ struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry *act;
+ int err, i;
+
+ /* whole struct (rule->re_arg) must be initialized with 0 */
+ if (!flow_action_has_entries(flow_action))
+ return 0;
+
+ if (!flow_action_mixed_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
+ act = flow_action_first_entry_get(flow_action);
+ if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
+ /* Nothing to do */
+ } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
+ /* setup counter first */
+ rule->re_arg.count.valid = true;
+ err = prestera_acl_chain_to_client(chain_index, block->ingress,
+ &rule->re_arg.count.client);
+ if (err)
+ return err;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ if (rule->re_arg.accept.valid)
+ return -EEXIST;
+
+ rule->re_arg.accept.valid = 1;
+ break;
+ case FLOW_ACTION_DROP:
+ if (rule->re_arg.drop.valid)
+ return -EEXIST;
+
+ rule->re_arg.drop.valid = 1;
+ break;
+ case FLOW_ACTION_TRAP:
+ if (rule->re_arg.trap.valid)
+ return -EEXIST;
+
+ rule->re_arg.trap.valid = 1;
+ break;
+ case FLOW_ACTION_POLICE:
+ if (rule->re_arg.police.valid)
+ return -EEXIST;
+
+ rule->re_arg.police.valid = 1;
+ rule->re_arg.police.rate =
+ act->police.rate_bytes_ps;
+ rule->re_arg.police.burst = act->police.burst;
+ rule->re_arg.police.ingress = block->ingress;
+ break;
+ case FLOW_ACTION_GOTO:
+ err = prestera_flower_parse_goto_action(block, rule,
+ chain_index,
+ act);
+ if (err)
+ return err;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ pr_err("Unsupported action\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
+ struct flow_cls_offload *f,
+ struct prestera_flow_block *block)
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ struct prestera_port *port;
+ struct net_device *ingress_dev;
+ struct flow_match_meta match;
+ __be16 key, mask;
+
+ flow_rule_match_meta(f_rule, &match);
+
+ if (match.mask->l2_miss) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on \"l2_miss\"");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported ingress ifindex mask");
+ return -EINVAL;
+ }
+
+ ingress_dev = __dev_get_by_index(block->net,
+ match.key->ingress_ifindex);
+ if (!ingress_dev) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Can't find specified ingress port to match on");
+ return -EINVAL;
+ }
+
+ if (!prestera_netdev_check(ingress_dev)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Can't match on switchdev ingress port");
+ return -EINVAL;
+ }
+ port = netdev_priv(ingress_dev);
+
+ mask = htons(0x1FFF << 3);
+ key = htons(port->hw_id << 3);
+ rule_match_set(r_match->key, SYS_PORT, key);
+ rule_match_set(r_match->mask, SYS_PORT, mask);
+
+ mask = htons(0x3FF);
+ key = htons(port->dev_id);
+ rule_match_set(r_match->key, SYS_DEV, key);
+ rule_match_set(r_match->mask, SYS_DEV, mask);
+
+ return 0;
+}
+
+static int prestera_flower_parse(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = f_rule->match.dissector;
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ __be16 n_proto_mask = 0;
+ __be16 n_proto_key = 0;
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+ int err;
+
+ if (dissector->used_keys &
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
+ return -EOPNOTSUPP;
+ }
+
+ prestera_acl_rule_priority_set(rule, f->common.prio);
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
+ err = prestera_flower_parse_meta(rule, f, block);
+ if (err)
+ return err;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(f_rule, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(f_rule, &match);
+ n_proto_key = match.key->n_proto;
+ n_proto_mask = match.mask->n_proto;
+
+ if (ntohs(match.key->n_proto) == ETH_P_ALL) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ }
+
+ rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
+ rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
+
+ rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
+ rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
+ ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(f_rule, &match);
+
+ /* DA key, mask */
+ rule_match_set_n(r_match->key,
+ ETH_DMAC_0, &match.key->dst[0], 4);
+ rule_match_set_n(r_match->key,
+ ETH_DMAC_1, &match.key->dst[4], 2);
+
+ rule_match_set_n(r_match->mask,
+ ETH_DMAC_0, &match.mask->dst[0], 4);
+ rule_match_set_n(r_match->mask,
+ ETH_DMAC_1, &match.mask->dst[4], 2);
+
+ /* SA key, mask */
+ rule_match_set_n(r_match->key,
+ ETH_SMAC_0, &match.key->src[0], 4);
+ rule_match_set_n(r_match->key,
+ ETH_SMAC_1, &match.key->src[4], 2);
+
+ rule_match_set_n(r_match->mask,
+ ETH_SMAC_0, &match.mask->src[0], 4);
+ rule_match_set_n(r_match->mask,
+ ETH_SMAC_1, &match.mask->src[4], 2);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(f_rule, &match);
+
+ rule_match_set(r_match->key, IP_SRC, match.key->src);
+ rule_match_set(r_match->mask, IP_SRC, match.mask->src);
+
+ rule_match_set(r_match->key, IP_DST, match.key->dst);
+ rule_match_set(r_match->mask, IP_DST, match.mask->dst);
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ NL_SET_ERR_MSG_MOD
+ (f->common.extack,
+ "Only UDP and TCP keys are supported");
+ return -EINVAL;
+ }
+
+ flow_rule_match_ports(f_rule, &match);
+
+ rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
+ rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
+
+ rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
+ rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
+ struct flow_match_ports_range match;
+ __be32 tp_key, tp_mask;
+
+ flow_rule_match_ports_range(f_rule, &match);
+
+ /* src port range (min, max) */
+ tp_key = htonl(ntohs(match.key->tp_min.src) |
+ (ntohs(match.key->tp_max.src) << 16));
+ tp_mask = htonl(ntohs(match.mask->tp_min.src) |
+ (ntohs(match.mask->tp_max.src) << 16));
+ rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key);
+ rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask);
+
+ /* dst port range (min, max) */
+ tp_key = htonl(ntohs(match.key->tp_min.dst) |
+ (ntohs(match.key->tp_max.dst) << 16));
+ tp_mask = htonl(ntohs(match.mask->tp_min.dst) |
+ (ntohs(match.mask->tp_max.dst) << 16));
+ rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key);
+ rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask);
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(f_rule, &match);
+
+ if (match.mask->vlan_id != 0) {
+ __be16 key = cpu_to_be16(match.key->vlan_id);
+ __be16 mask = cpu_to_be16(match.mask->vlan_id);
+
+ rule_match_set(r_match->key, VLAN_ID, key);
+ rule_match_set(r_match->mask, VLAN_ID, mask);
+ }
+
+ rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
+ rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_match_icmp match;
+
+ flow_rule_match_icmp(f_rule, &match);
+
+ rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
+ rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
+
+ rule_match_set(r_match->key, ICMP_CODE, match.key->code);
+ rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
+ }
+
+ return prestera_flower_parse_actions(block, rule, &f->rule->action,
+ f->common.chain_index,
+ f->common.extack);
+}
+
+static int prestera_flower_prio_check(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ u32 mall_prio_min;
+ u32 mall_prio_max;
+ int err;
+
+ err = prestera_mall_prio_get(block, &mall_prio_min, &mall_prio_max);
+ if (err == -ENOENT)
+ /* No matchall filters installed on this chain. */
+ return 0;
+
+ if (err) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
+ return err;
+ }
+
+ if (f->common.prio <= mall_prio_max && block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Failed to add in front of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.prio >= mall_prio_min && !block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
+ u32 *prio_min, u32 *prio_max)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ prestera_acl_ruleset_prio_get(ruleset, prio_min, prio_max);
+ return 0;
+}
+
+int prestera_flower_replace(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl *acl = block->sw->acl;
+ struct prestera_acl_rule *rule;
+ int err;
+
+ err = prestera_flower_prio_check(block, f);
+ if (err)
+ return err;
+
+ ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ /* increments the ruleset reference */
+ rule = prestera_acl_rule_create(ruleset, f->cookie,
+ f->common.chain_index);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule_create;
+ }
+
+ err = prestera_flower_parse(block, rule, f);
+ if (err)
+ goto err_rule_add;
+
+ if (!prestera_acl_ruleset_is_offload(ruleset)) {
+ err = prestera_acl_ruleset_offload(ruleset);
+ if (err)
+ goto err_ruleset_offload;
+ }
+
+ err = prestera_acl_rule_add(block->sw, rule);
+ if (err)
+ goto err_rule_add;
+
+ prestera_acl_ruleset_put(ruleset);
+ return 0;
+
+err_ruleset_offload:
+err_rule_add:
+ prestera_acl_rule_destroy(rule);
+err_rule_create:
+ prestera_acl_ruleset_put(ruleset);
+ return err;
+}
+
+void prestera_flower_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_rule *rule;
+
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
+ if (IS_ERR(ruleset))
+ return;
+
+ rule = prestera_acl_rule_lookup(ruleset, f->cookie);
+ if (rule) {
+ prestera_acl_rule_del(block->sw, rule);
+ prestera_acl_rule_destroy(rule);
+ }
+ prestera_acl_ruleset_put(ruleset);
+}
+
+int prestera_flower_tmplt_create(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_flower_template *template;
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_rule rule;
+ int err;
+
+ memset(&rule, 0, sizeof(rule));
+ err = prestera_flower_parse(block, &rule, f);
+ if (err)
+ return err;
+
+ template = kmalloc(sizeof(*template), GFP_KERNEL);
+ if (!template) {
+ err = -ENOMEM;
+ goto err_malloc;
+ }
+
+ prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ f->common.chain_index);
+ if (IS_ERR_OR_NULL(ruleset)) {
+ err = -EINVAL;
+ goto err_ruleset_get;
+ }
+
+ /* preserve keymask/template to this ruleset */
+ err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
+ if (err)
+ goto err_ruleset_keymask_set;
+
+ /* skip error, as it is not possible to reject template operation,
+ * so, keep the reference to the ruleset for rules to be added
+ * to that ruleset later. In case of offload fail, the ruleset
+ * will be offloaded again during adding a new rule. Also,
+ * unlikly possble that ruleset is already offloaded at this staage.
+ */
+ prestera_acl_ruleset_offload(ruleset);
+
+ /* keep the reference to the ruleset */
+ template->ruleset = ruleset;
+ template->chain_index = f->common.chain_index;
+ list_add_rcu(&template->list, &block->template_list);
+ return 0;
+
+err_ruleset_keymask_set:
+ prestera_acl_ruleset_put(ruleset);
+err_ruleset_get:
+ kfree(template);
+err_malloc:
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
+ return err;
+}
+
+void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_flower_template *template, *tmp;
+
+ list_for_each_entry_safe(template, tmp, &block->template_list, list)
+ if (template->chain_index == f->common.chain_index) {
+ /* put the reference to the ruleset kept in create */
+ prestera_flower_template_free(template);
+ return;
+ }
+}
+
+int prestera_flower_stats(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_rule *rule;
+ u64 packets;
+ u64 lastuse;
+ u64 bytes;
+ int err;
+
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule = prestera_acl_rule_lookup(ruleset, f->cookie);
+ if (!rule) {
+ err = -EINVAL;
+ goto err_rule_get_stats;
+ }
+
+ err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
+ &bytes, &lastuse);
+ if (err)
+ goto err_rule_get_stats;
+
+ flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+err_rule_get_stats:
+ prestera_acl_ruleset_put(ruleset);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
new file mode 100644
index 000000000..1181115fe
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_FLOWER_H_
+#define _PRESTERA_FLOWER_H_
+
+#include <net/pkt_cls.h>
+
+struct prestera_flow_block;
+
+int prestera_flower_replace(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+int prestera_flower_stats(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+int prestera_flower_tmplt_create(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_template_cleanup(struct prestera_flow_block *block);
+int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
+ u32 *prio_min, u32 *prio_max);
+
+#endif /* _PRESTERA_FLOWER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
new file mode 100644
index 000000000..fc6f7d274
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -0,0 +1,2561 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/ethtool.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_counter.h"
+#include "prestera_router_hw.h"
+
+#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
+
+#define PRESTERA_MIN_MTU 64
+
+#define PRESTERA_MSG_CHUNK_SIZE 1024
+
+enum prestera_cmd_type_t {
+ PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1,
+ PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2,
+
+ PRESTERA_CMD_TYPE_PORT_ATTR_SET = 0x100,
+ PRESTERA_CMD_TYPE_PORT_ATTR_GET = 0x101,
+ PRESTERA_CMD_TYPE_PORT_INFO_GET = 0x110,
+
+ PRESTERA_CMD_TYPE_VLAN_CREATE = 0x200,
+ PRESTERA_CMD_TYPE_VLAN_DELETE = 0x201,
+ PRESTERA_CMD_TYPE_VLAN_PORT_SET = 0x202,
+ PRESTERA_CMD_TYPE_VLAN_PVID_SET = 0x203,
+
+ PRESTERA_CMD_TYPE_FDB_ADD = 0x300,
+ PRESTERA_CMD_TYPE_FDB_DELETE = 0x301,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_PORT = 0x310,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN = 0x311,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN = 0x312,
+
+ PRESTERA_CMD_TYPE_BRIDGE_CREATE = 0x400,
+ PRESTERA_CMD_TYPE_BRIDGE_DELETE = 0x401,
+ PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
+ PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
+
+ PRESTERA_CMD_TYPE_COUNTER_GET = 0x510,
+ PRESTERA_CMD_TYPE_COUNTER_ABORT = 0x511,
+ PRESTERA_CMD_TYPE_COUNTER_TRIGGER = 0x512,
+ PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET = 0x513,
+ PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE = 0x514,
+ PRESTERA_CMD_TYPE_COUNTER_CLEAR = 0x515,
+
+ PRESTERA_CMD_TYPE_VTCAM_CREATE = 0x540,
+ PRESTERA_CMD_TYPE_VTCAM_DESTROY = 0x541,
+ PRESTERA_CMD_TYPE_VTCAM_RULE_ADD = 0x550,
+ PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE = 0x551,
+ PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND = 0x560,
+ PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND = 0x561,
+
+ PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600,
+ PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET = 0x622,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET = 0x645,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD = 0x623,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE = 0x624,
+ PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
+ PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
+
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE = 0x700,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY = 0x701,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET = 0x702,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET = 0x703,
+
+ PRESTERA_CMD_TYPE_MDB_CREATE = 0x704,
+ PRESTERA_CMD_TYPE_MDB_DESTROY = 0x705,
+
+ PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
+
+ PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE = 0x901,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE = 0x902,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE = 0x903,
+
+ PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
+
+ PRESTERA_CMD_TYPE_SPAN_GET = 0x1100,
+ PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND = 0x1101,
+ PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND = 0x1102,
+ PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103,
+ PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND = 0x1104,
+ PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND = 0x1105,
+
+ PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500,
+ PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501,
+ PRESTERA_CMD_TYPE_POLICER_SET = 0x1502,
+
+ PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000,
+
+ PRESTERA_CMD_TYPE_ACK = 0x10000,
+ PRESTERA_CMD_TYPE_MAX
+};
+
+enum {
+ PRESTERA_CMD_PORT_ATTR_ADMIN_STATE = 1,
+ PRESTERA_CMD_PORT_ATTR_MTU = 3,
+ PRESTERA_CMD_PORT_ATTR_MAC = 4,
+ PRESTERA_CMD_PORT_ATTR_SPEED = 5,
+ PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE = 6,
+ PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
+ PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
+ PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
+ PRESTERA_CMD_PORT_ATTR_LOCKED = 10,
+ PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12,
+ PRESTERA_CMD_PORT_ATTR_TYPE = 13,
+ PRESTERA_CMD_PORT_ATTR_STATS = 17,
+ PRESTERA_CMD_PORT_ATTR_MAC_AUTONEG_RESTART = 18,
+ PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART = 19,
+ PRESTERA_CMD_PORT_ATTR_MAC_MODE = 22,
+};
+
+enum {
+ PRESTERA_CMD_SWITCH_ATTR_MAC = 1,
+ PRESTERA_CMD_SWITCH_ATTR_AGEING = 2,
+};
+
+enum {
+ PRESTERA_CMD_ACK_OK,
+ PRESTERA_CMD_ACK_FAILED,
+
+ PRESTERA_CMD_ACK_MAX
+};
+
+enum {
+ PRESTERA_PORT_TP_NA,
+ PRESTERA_PORT_TP_MDI,
+ PRESTERA_PORT_TP_MDIX,
+ PRESTERA_PORT_TP_AUTO,
+};
+
+enum {
+ PRESTERA_PORT_FLOOD_TYPE_UC = 0,
+ PRESTERA_PORT_FLOOD_TYPE_MC = 1,
+};
+
+enum {
+ PRESTERA_PORT_GOOD_OCTETS_RCV_CNT,
+ PRESTERA_PORT_BAD_OCTETS_RCV_CNT,
+ PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT,
+ PRESTERA_PORT_BRDC_PKTS_RCV_CNT,
+ PRESTERA_PORT_MC_PKTS_RCV_CNT,
+ PRESTERA_PORT_PKTS_64L_CNT,
+ PRESTERA_PORT_PKTS_65TO127L_CNT,
+ PRESTERA_PORT_PKTS_128TO255L_CNT,
+ PRESTERA_PORT_PKTS_256TO511L_CNT,
+ PRESTERA_PORT_PKTS_512TO1023L_CNT,
+ PRESTERA_PORT_PKTS_1024TOMAXL_CNT,
+ PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT,
+ PRESTERA_PORT_MC_PKTS_SENT_CNT,
+ PRESTERA_PORT_BRDC_PKTS_SENT_CNT,
+ PRESTERA_PORT_FC_SENT_CNT,
+ PRESTERA_PORT_GOOD_FC_RCV_CNT,
+ PRESTERA_PORT_DROP_EVENTS_CNT,
+ PRESTERA_PORT_UNDERSIZE_PKTS_CNT,
+ PRESTERA_PORT_FRAGMENTS_PKTS_CNT,
+ PRESTERA_PORT_OVERSIZE_PKTS_CNT,
+ PRESTERA_PORT_JABBER_PKTS_CNT,
+ PRESTERA_PORT_MAC_RCV_ERROR_CNT,
+ PRESTERA_PORT_BAD_CRC_CNT,
+ PRESTERA_PORT_COLLISIONS_CNT,
+ PRESTERA_PORT_LATE_COLLISIONS_CNT,
+ PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT,
+ PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT,
+ PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT,
+ PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT,
+ PRESTERA_PORT_GOOD_OCTETS_SENT_CNT,
+
+ PRESTERA_PORT_CNT_MAX
+};
+
+enum {
+ PRESTERA_FC_NONE,
+ PRESTERA_FC_SYMMETRIC,
+ PRESTERA_FC_ASYMMETRIC,
+ PRESTERA_FC_SYMM_ASYMM,
+};
+
+enum {
+ PRESTERA_POLICER_MODE_SR_TCM
+};
+
+enum {
+ PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0,
+ PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1,
+ PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2,
+};
+
+struct prestera_fw_event_handler {
+ struct list_head list;
+ struct rcu_head rcu;
+ enum prestera_event_type type;
+ prestera_event_cb_t func;
+ void *arg;
+};
+
+enum {
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_REG_PORT = 0,
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG = 1,
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_MAX = 2,
+};
+
+struct prestera_msg_cmd {
+ __le32 type;
+};
+
+struct prestera_msg_ret {
+ struct prestera_msg_cmd cmd;
+ __le32 status;
+};
+
+struct prestera_msg_common_req {
+ struct prestera_msg_cmd cmd;
+};
+
+struct prestera_msg_common_resp {
+ struct prestera_msg_ret ret;
+};
+
+struct prestera_msg_switch_attr_req {
+ struct prestera_msg_cmd cmd;
+ __le32 attr;
+ union {
+ __le32 ageing_timeout_ms;
+ struct {
+ u8 mac[ETH_ALEN];
+ u8 __pad[2];
+ };
+ } param;
+};
+
+struct prestera_msg_switch_init_resp {
+ struct prestera_msg_ret ret;
+ __le32 port_count;
+ __le32 mtu_max;
+ __le32 size_tbl_router_nexthop;
+ u8 switch_id;
+ u8 lag_max;
+ u8 lag_member_max;
+};
+
+struct prestera_msg_event_port_param {
+ union {
+ struct {
+ __le32 mode;
+ __le32 speed;
+ u8 oper;
+ u8 duplex;
+ u8 fc;
+ u8 fec;
+ } mac;
+ struct {
+ __le64 lmode_bmap;
+ u8 mdix;
+ u8 fc;
+ u8 __pad[2];
+ } __packed phy; /* make sure always 12 bytes size */
+ };
+};
+
+struct prestera_msg_port_cap_param {
+ __le64 link_mode;
+ u8 type;
+ u8 fec;
+ u8 fc;
+ u8 transceiver;
+};
+
+struct prestera_msg_port_flood_param {
+ u8 type;
+ u8 enable;
+ u8 __pad[2];
+};
+
+union prestera_msg_port_param {
+ __le32 mtu;
+ __le32 speed;
+ __le32 link_mode;
+ u8 admin_state;
+ u8 oper_state;
+ u8 mac[ETH_ALEN];
+ u8 accept_frm_type;
+ u8 learning;
+ u8 flood;
+ u8 type;
+ u8 duplex;
+ u8 fec;
+ u8 fc;
+ u8 br_locked;
+ union {
+ struct {
+ u8 admin;
+ u8 fc;
+ u8 ap_enable;
+ u8 __reserved[5];
+ union {
+ struct {
+ __le32 mode;
+ __le32 speed;
+ u8 inband;
+ u8 duplex;
+ u8 fec;
+ u8 fec_supp;
+ } reg_mode;
+ struct {
+ __le32 mode;
+ __le32 speed;
+ u8 fec;
+ u8 fec_supp;
+ u8 __pad[2];
+ } ap_modes[PRESTERA_AP_PORT_MAX];
+ };
+ } mac;
+ struct {
+ __le64 modes;
+ __le32 mode;
+ u8 admin;
+ u8 adv_enable;
+ u8 mdix;
+ u8 __pad;
+ } phy;
+ } link;
+
+ struct prestera_msg_port_cap_param cap;
+ struct prestera_msg_port_flood_param flood_ext;
+ struct prestera_msg_event_port_param link_evt;
+};
+
+struct prestera_msg_port_attr_req {
+ struct prestera_msg_cmd cmd;
+ __le32 attr;
+ __le32 port;
+ __le32 dev;
+ union prestera_msg_port_param param;
+};
+
+struct prestera_msg_port_attr_resp {
+ struct prestera_msg_ret ret;
+ union prestera_msg_port_param param;
+};
+
+struct prestera_msg_port_stats_resp {
+ struct prestera_msg_ret ret;
+ __le64 stats[PRESTERA_PORT_CNT_MAX];
+};
+
+struct prestera_msg_port_info_req {
+ struct prestera_msg_cmd cmd;
+ __le32 port;
+};
+
+struct prestera_msg_port_info_resp {
+ struct prestera_msg_ret ret;
+ __le32 hw_id;
+ __le32 dev_id;
+ __le16 fp_id;
+ u8 pad[2];
+};
+
+struct prestera_msg_vlan_req {
+ struct prestera_msg_cmd cmd;
+ __le32 port;
+ __le32 dev;
+ __le16 vid;
+ u8 is_member;
+ u8 is_tagged;
+};
+
+struct prestera_msg_fdb_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flush_mode;
+ union {
+ struct {
+ __le32 port;
+ __le32 dev;
+ };
+ __le16 lag_id;
+ } dest;
+ __le16 vid;
+ u8 dest_type;
+ u8 dynamic;
+ u8 mac[ETH_ALEN];
+ u8 __pad[2];
+};
+
+struct prestera_msg_bridge_req {
+ struct prestera_msg_cmd cmd;
+ __le32 port;
+ __le32 dev;
+ __le16 bridge;
+ u8 pad[2];
+};
+
+struct prestera_msg_bridge_resp {
+ struct prestera_msg_ret ret;
+ __le16 bridge;
+ u8 pad[2];
+};
+
+struct prestera_msg_vtcam_create_req {
+ struct prestera_msg_cmd cmd;
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ u8 direction;
+ u8 lookup;
+ u8 pad[2];
+};
+
+struct prestera_msg_vtcam_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 vtcam_id;
+};
+
+struct prestera_msg_vtcam_rule_add_req {
+ struct prestera_msg_cmd cmd;
+ __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 vtcam_id;
+ __le32 prio;
+ __le32 n_act;
+};
+
+struct prestera_msg_vtcam_rule_del_req {
+ struct prestera_msg_cmd cmd;
+ __le32 vtcam_id;
+ __le32 id;
+};
+
+struct prestera_msg_vtcam_bind_req {
+ struct prestera_msg_cmd cmd;
+ union {
+ struct {
+ __le32 hw_id;
+ __le32 dev_id;
+ } port;
+ __le32 index;
+ };
+ __le32 vtcam_id;
+ __le16 pcl_id;
+ __le16 type;
+};
+
+struct prestera_msg_vtcam_resp {
+ struct prestera_msg_ret ret;
+ __le32 vtcam_id;
+ __le32 rule_id;
+};
+
+struct prestera_msg_acl_action {
+ __le32 id;
+ __le32 __reserved;
+ union {
+ struct {
+ __le32 index;
+ } jump;
+ struct {
+ __le32 id;
+ } police;
+ struct {
+ __le32 id;
+ } count;
+ __le32 reserved[6];
+ };
+};
+
+struct prestera_msg_counter_req {
+ struct prestera_msg_cmd cmd;
+ __le32 client;
+ __le32 block_id;
+ __le32 num_counters;
+};
+
+struct prestera_msg_counter_stats {
+ __le64 packets;
+ __le64 bytes;
+};
+
+struct prestera_msg_counter_resp {
+ struct prestera_msg_ret ret;
+ __le32 block_id;
+ __le32 offset;
+ __le32 num_counters;
+ __le32 done;
+ struct prestera_msg_counter_stats stats[];
+};
+
+struct prestera_msg_span_req {
+ struct prestera_msg_cmd cmd;
+ __le32 port;
+ __le32 dev;
+ u8 id;
+ u8 pad[3];
+};
+
+struct prestera_msg_span_resp {
+ struct prestera_msg_ret ret;
+ u8 id;
+ u8 pad[3];
+};
+
+struct prestera_msg_stp_req {
+ struct prestera_msg_cmd cmd;
+ __le32 port;
+ __le32 dev;
+ __le16 vid;
+ u8 state;
+ u8 __pad;
+};
+
+struct prestera_msg_rxtx_req {
+ struct prestera_msg_cmd cmd;
+ u8 use_sdma;
+ u8 pad[3];
+};
+
+struct prestera_msg_rxtx_resp {
+ struct prestera_msg_ret ret;
+ __le32 map_addr;
+};
+
+struct prestera_msg_iface {
+ union {
+ struct {
+ __le32 dev;
+ __le32 port;
+ };
+ __le16 lag_id;
+ };
+ __le16 vr_id;
+ __le16 vid;
+ u8 type;
+ u8 __pad[3];
+};
+
+struct prestera_msg_ip_addr {
+ union {
+ __be32 ipv4;
+ __be32 ipv6[4];
+ } u;
+ u8 v; /* e.g. PRESTERA_IPV4 */
+ u8 __pad[3];
+};
+
+struct prestera_msg_nh {
+ struct prestera_msg_iface oif;
+ __le32 hw_id;
+ u8 mac[ETH_ALEN];
+ u8 is_active;
+ u8 pad;
+};
+
+struct prestera_msg_rif_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_iface iif;
+ __le32 mtu;
+ __le16 rif_id;
+ __le16 __reserved;
+ u8 mac[ETH_ALEN];
+ u8 __pad[2];
+};
+
+struct prestera_msg_rif_resp {
+ struct prestera_msg_ret ret;
+ __le16 rif_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_lpm_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_ip_addr dst;
+ __le32 grp_id;
+ __le32 dst_len;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_nh_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_nh nh[PRESTERA_NHGR_SIZE_MAX];
+ __le32 size;
+ __le32 grp_id;
+};
+
+struct prestera_msg_nh_chunk_req {
+ struct prestera_msg_cmd cmd;
+ __le32 offset;
+};
+
+struct prestera_msg_nh_chunk_resp {
+ struct prestera_msg_ret ret;
+ u8 hw_state[PRESTERA_MSG_CHUNK_SIZE];
+};
+
+struct prestera_msg_nh_grp_req {
+ struct prestera_msg_cmd cmd;
+ __le32 grp_id;
+ __le32 size;
+};
+
+struct prestera_msg_nh_grp_resp {
+ struct prestera_msg_ret ret;
+ __le32 grp_id;
+};
+
+struct prestera_msg_vr_req {
+ struct prestera_msg_cmd cmd;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_vr_resp {
+ struct prestera_msg_ret ret;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_lag_req {
+ struct prestera_msg_cmd cmd;
+ __le32 port;
+ __le32 dev;
+ __le16 lag_id;
+ u8 pad[2];
+};
+
+struct prestera_msg_cpu_code_counter_req {
+ struct prestera_msg_cmd cmd;
+ u8 counter_type;
+ u8 code;
+ u8 pad[2];
+};
+
+struct mvsw_msg_cpu_code_counter_ret {
+ struct prestera_msg_ret ret;
+ __le64 packet_count;
+};
+
+struct prestera_msg_policer_req {
+ struct prestera_msg_cmd cmd;
+ __le32 id;
+ union {
+ struct {
+ __le64 cir;
+ __le32 cbs;
+ } __packed sr_tcm; /* make sure always 12 bytes size */
+ __le32 reserved[6];
+ };
+ u8 mode;
+ u8 type;
+ u8 pad[2];
+};
+
+struct prestera_msg_policer_resp {
+ struct prestera_msg_ret ret;
+ __le32 id;
+};
+
+struct prestera_msg_event {
+ __le16 type;
+ __le16 id;
+};
+
+struct prestera_msg_event_port {
+ struct prestera_msg_event id;
+ __le32 port_id;
+ struct prestera_msg_event_port_param param;
+};
+
+union prestera_msg_event_fdb_param {
+ u8 mac[ETH_ALEN];
+};
+
+struct prestera_msg_event_fdb {
+ struct prestera_msg_event id;
+ __le32 vid;
+ union {
+ __le32 port_id;
+ __le16 lag_id;
+ } dest;
+ union prestera_msg_event_fdb_param param;
+ u8 dest_type;
+};
+
+struct prestera_msg_flood_domain_create_req {
+ struct prestera_msg_cmd cmd;
+};
+
+struct prestera_msg_flood_domain_create_resp {
+ struct prestera_msg_ret ret;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_ports_set_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le32 ports_num;
+};
+
+struct prestera_msg_flood_domain_ports_reset_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_port {
+ union {
+ struct {
+ __le32 port_num;
+ __le32 dev_num;
+ };
+ __le16 lag_id;
+ };
+ __le16 vid;
+ __le16 port_type;
+};
+
+struct prestera_msg_mdb_create_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le16 vid;
+ u8 mac[ETH_ALEN];
+};
+
+struct prestera_msg_mdb_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le16 vid;
+ u8 mac[ETH_ALEN];
+};
+
+static void prestera_hw_build_tests(void)
+{
+ /* check requests */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_common_req) != 4);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_switch_attr_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_req) != 144);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_create_req) != 84);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_destroy_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_add_req) != 168);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_del_req) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_bind_req) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_action) != 32);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_req) != 4);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_destroy_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_set_req) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_req) != 124);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_req) != 12);
+
+ /* structure that are part of req/resp fw messages */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh) != 28);
+
+ /* check responses */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_switch_init_resp) != 24);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_resp) != 136);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_resp) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_resp) != 1032);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_resp) != 12);
+
+ /* check events */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_event_fdb) != 20);
+}
+
+static u8 prestera_hw_mdix_to_eth(u8 mode);
+static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause);
+
+static int __prestera_cmd_ret(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen,
+ int waitms)
+{
+ struct prestera_device *dev = sw->dev;
+ int err;
+
+ cmd->type = __cpu_to_le32(type);
+
+ err = dev->send_req(dev, 0, cmd, clen, ret, rlen, waitms);
+ if (err)
+ return err;
+
+ if (ret->cmd.type != __cpu_to_le32(PRESTERA_CMD_TYPE_ACK))
+ return -EBADE;
+ if (ret->status != __cpu_to_le32(PRESTERA_CMD_ACK_OK))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prestera_cmd_ret(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen)
+{
+ return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, 0);
+}
+
+static int prestera_cmd_ret_wait(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen,
+ int waitms)
+{
+ return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, waitms);
+}
+
+static int prestera_cmd(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen)
+{
+ struct prestera_msg_common_resp resp;
+
+ return prestera_cmd_ret(sw, type, cmd, clen, &resp.ret, sizeof(resp));
+}
+
+static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt)
+{
+ struct prestera_msg_event_port *hw_evt;
+
+ hw_evt = (struct prestera_msg_event_port *)msg;
+
+ evt->port_evt.port_id = __le32_to_cpu(hw_evt->port_id);
+
+ if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
+ evt->port_evt.data.mac.oper = hw_evt->param.mac.oper;
+ evt->port_evt.data.mac.mode =
+ __le32_to_cpu(hw_evt->param.mac.mode);
+ evt->port_evt.data.mac.speed =
+ __le32_to_cpu(hw_evt->param.mac.speed);
+ evt->port_evt.data.mac.duplex = hw_evt->param.mac.duplex;
+ evt->port_evt.data.mac.fc = hw_evt->param.mac.fc;
+ evt->port_evt.data.mac.fec = hw_evt->param.mac.fec;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt)
+{
+ struct prestera_msg_event_fdb *hw_evt = msg;
+
+ switch (hw_evt->dest_type) {
+ case PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT:
+ evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_REG_PORT;
+ evt->fdb_evt.dest.port_id = __le32_to_cpu(hw_evt->dest.port_id);
+ break;
+ case PRESTERA_HW_FDB_ENTRY_TYPE_LAG:
+ evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_LAG;
+ evt->fdb_evt.dest.lag_id = __le16_to_cpu(hw_evt->dest.lag_id);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ evt->fdb_evt.vid = __le32_to_cpu(hw_evt->vid);
+
+ ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac);
+
+ return 0;
+}
+
+static struct prestera_fw_evt_parser {
+ int (*func)(void *msg, struct prestera_event *evt);
+} fw_event_parsers[PRESTERA_EVENT_TYPE_MAX] = {
+ [PRESTERA_EVENT_TYPE_PORT] = { .func = prestera_fw_parse_port_evt },
+ [PRESTERA_EVENT_TYPE_FDB] = { .func = prestera_fw_parse_fdb_evt },
+};
+
+static struct prestera_fw_event_handler *
+__find_event_handler(const struct prestera_switch *sw,
+ enum prestera_event_type type)
+{
+ struct prestera_fw_event_handler *eh;
+
+ list_for_each_entry_rcu(eh, &sw->event_handlers, list) {
+ if (eh->type == type)
+ return eh;
+ }
+
+ return NULL;
+}
+
+static int prestera_find_event_handler(const struct prestera_switch *sw,
+ enum prestera_event_type type,
+ struct prestera_fw_event_handler *eh)
+{
+ struct prestera_fw_event_handler *tmp;
+ int err = 0;
+
+ rcu_read_lock();
+ tmp = __find_event_handler(sw, type);
+ if (tmp)
+ *eh = *tmp;
+ else
+ err = -ENOENT;
+ rcu_read_unlock();
+
+ return err;
+}
+
+static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size)
+{
+ struct prestera_switch *sw = dev->priv;
+ struct prestera_msg_event *msg = buf;
+ struct prestera_fw_event_handler eh;
+ struct prestera_event evt;
+ u16 msg_type;
+ int err;
+
+ msg_type = __le16_to_cpu(msg->type);
+ if (msg_type >= PRESTERA_EVENT_TYPE_MAX)
+ return -EINVAL;
+ if (!fw_event_parsers[msg_type].func)
+ return -ENOENT;
+
+ err = prestera_find_event_handler(sw, msg_type, &eh);
+ if (err)
+ return err;
+
+ evt.id = __le16_to_cpu(msg->id);
+
+ err = fw_event_parsers[msg_type].func(buf, &evt);
+ if (err)
+ return err;
+
+ eh.func(sw, &evt, eh.arg);
+
+ return 0;
+}
+
+static void prestera_pkt_recv(struct prestera_device *dev)
+{
+ struct prestera_switch *sw = dev->priv;
+ struct prestera_fw_event_handler eh;
+ struct prestera_event ev;
+ int err;
+
+ ev.id = PRESTERA_RXTX_EVENT_RCV_PKT;
+
+ err = prestera_find_event_handler(sw, PRESTERA_EVENT_TYPE_RXTX, &eh);
+ if (err)
+ return;
+
+ eh.func(sw, &ev, eh.arg);
+}
+
+static u8 prestera_hw_mdix_to_eth(u8 mode)
+{
+ switch (mode) {
+ case PRESTERA_PORT_TP_MDI:
+ return ETH_TP_MDI;
+ case PRESTERA_PORT_TP_MDIX:
+ return ETH_TP_MDI_X;
+ case PRESTERA_PORT_TP_AUTO:
+ return ETH_TP_MDI_AUTO;
+ default:
+ return ETH_TP_MDI_INVALID;
+ }
+}
+
+static u8 prestera_hw_mdix_from_eth(u8 mode)
+{
+ switch (mode) {
+ case ETH_TP_MDI:
+ return PRESTERA_PORT_TP_MDI;
+ case ETH_TP_MDI_X:
+ return PRESTERA_PORT_TP_MDIX;
+ case ETH_TP_MDI_AUTO:
+ return PRESTERA_PORT_TP_AUTO;
+ default:
+ return PRESTERA_PORT_TP_NA;
+ }
+}
+
+int prestera_hw_port_info_get(const struct prestera_port *port,
+ u32 *dev_id, u32 *hw_id, u16 *fp_id)
+{
+ struct prestera_msg_port_info_req req = {
+ .port = __cpu_to_le32(port->id),
+ };
+ struct prestera_msg_port_info_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_INFO_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *dev_id = __le32_to_cpu(resp.dev_id);
+ *hw_id = __le32_to_cpu(resp.hw_id);
+ *fp_id = __le16_to_cpu(resp.fp_id);
+
+ return 0;
+}
+
+int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac)
+{
+ struct prestera_msg_switch_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_MAC),
+ };
+
+ ether_addr_copy(req.param.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_msg_switch_init_resp resp;
+ struct prestera_msg_common_req req;
+ int err;
+
+ INIT_LIST_HEAD(&sw->event_handlers);
+
+ prestera_hw_build_tests();
+
+ err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT,
+ &req.cmd, sizeof(req),
+ &resp.ret, sizeof(resp),
+ PRESTERA_SWITCH_INIT_TIMEOUT_MS);
+ if (err)
+ return err;
+
+ sw->dev->recv_msg = prestera_evt_recv;
+ sw->dev->recv_pkt = prestera_pkt_recv;
+ sw->port_count = __le32_to_cpu(resp.port_count);
+ sw->mtu_min = PRESTERA_MIN_MTU;
+ sw->mtu_max = __le32_to_cpu(resp.mtu_max);
+ sw->id = resp.switch_id;
+ sw->lag_member_max = resp.lag_member_max;
+ sw->lag_max = resp.lag_max;
+ sw->size_tbl_router_nexthop =
+ __le32_to_cpu(resp.size_tbl_router_nexthop);
+
+ return 0;
+}
+
+void prestera_hw_switch_fini(struct prestera_switch *sw)
+{
+ WARN_ON(!list_empty(&sw->event_handlers));
+}
+
+int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms)
+{
+ struct prestera_msg_switch_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_AGEING),
+ .param = {
+ .ageing_timeout_ms = __cpu_to_le32(ageing_ms),
+ },
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mac_mode_get(const struct prestera_port *port,
+ u32 *mode, u32 *speed, u8 *duplex, u8 *fec)
+{
+ struct prestera_msg_port_attr_resp resp;
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id)
+ };
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ if (mode)
+ *mode = __le32_to_cpu(resp.param.link_evt.mac.mode);
+
+ if (speed)
+ *speed = __le32_to_cpu(resp.param.link_evt.mac.speed);
+
+ if (duplex)
+ *duplex = resp.param.link_evt.mac.duplex;
+
+ if (fec)
+ *fec = resp.param.link_evt.mac.fec;
+
+ return err;
+}
+
+int prestera_hw_port_mac_mode_set(const struct prestera_port *port,
+ bool admin, u32 mode, u8 inband,
+ u32 speed, u8 duplex, u8 fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .link = {
+ .mac = {
+ .admin = admin,
+ .reg_mode.mode = __cpu_to_le32(mode),
+ .reg_mode.inband = inband,
+ .reg_mode.speed = __cpu_to_le32(speed),
+ .reg_mode.duplex = duplex,
+ .reg_mode.fec = fec
+ }
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_phy_mode_get(const struct prestera_port *port,
+ u8 *mdix, u64 *lmode_bmap,
+ bool *fc_pause, bool *fc_asym)
+{
+ struct prestera_msg_port_attr_resp resp;
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id)
+ };
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ if (mdix)
+ *mdix = prestera_hw_mdix_to_eth(resp.param.link_evt.phy.mdix);
+
+ if (lmode_bmap)
+ *lmode_bmap = __le64_to_cpu(resp.param.link_evt.phy.lmode_bmap);
+
+ if (fc_pause && fc_asym)
+ prestera_hw_remote_fc_to_eth(resp.param.link_evt.phy.fc,
+ fc_pause, fc_asym);
+
+ return err;
+}
+
+int prestera_hw_port_phy_mode_set(const struct prestera_port *port,
+ bool admin, bool adv, u32 mode, u64 modes,
+ u8 mdix)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .link = {
+ .phy = {
+ .admin = admin,
+ .adv_enable = adv ? 1 : 0,
+ .mode = __cpu_to_le32(mode),
+ .modes = __cpu_to_le64(modes),
+ }
+ }
+ }
+ };
+
+ req.param.link.phy.mdix = prestera_hw_mdix_from_eth(mdix);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MTU),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .mtu = __cpu_to_le32(mtu),
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ };
+
+ ether_addr_copy(req.param.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_accept_frm_type(struct prestera_port *port,
+ enum prestera_accept_frm_type type)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .accept_frm_type = type,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_cap_get(const struct prestera_port *port,
+ struct prestera_port_caps *caps)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_CAPABILITY),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ caps->supp_link_modes = __le64_to_cpu(resp.param.cap.link_mode);
+ caps->transceiver = resp.param.cap.transceiver;
+ caps->supp_fec = resp.param.cap.fec;
+ caps->type = resp.param.cap.type;
+
+ return err;
+}
+
+static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause)
+{
+ switch (fc) {
+ case PRESTERA_FC_SYMMETRIC:
+ *pause = true;
+ *asym_pause = false;
+ break;
+ case PRESTERA_FC_ASYMMETRIC:
+ *pause = false;
+ *asym_pause = true;
+ break;
+ case PRESTERA_FC_SYMM_ASYMM:
+ *pause = true;
+ *asym_pause = true;
+ break;
+ default:
+ *pause = false;
+ *asym_pause = false;
+ }
+}
+
+int prestera_hw_vtcam_create(struct prestera_switch *sw,
+ u8 lookup, const u32 *keymask, u32 *vtcam_id,
+ enum prestera_hw_vtcam_direction_t dir)
+{
+ int err;
+ struct prestera_msg_vtcam_resp resp;
+ struct prestera_msg_vtcam_create_req req = {
+ .lookup = lookup,
+ .direction = dir,
+ };
+
+ if (keymask)
+ memcpy(req.keymask, keymask, sizeof(req.keymask));
+ else
+ memset(req.keymask, 0, sizeof(req.keymask));
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *vtcam_id = __le32_to_cpu(resp.vtcam_id);
+ return 0;
+}
+
+int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id)
+{
+ struct prestera_msg_vtcam_destroy_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_DESTROY,
+ &req.cmd, sizeof(req));
+}
+
+static int
+prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action,
+ struct prestera_acl_hw_action_info *info)
+{
+ action->id = __cpu_to_le32(info->id);
+
+ switch (info->id) {
+ case PRESTERA_ACL_RULE_ACTION_ACCEPT:
+ case PRESTERA_ACL_RULE_ACTION_DROP:
+ case PRESTERA_ACL_RULE_ACTION_TRAP:
+ /* just rule action id, no specific data */
+ break;
+ case PRESTERA_ACL_RULE_ACTION_JUMP:
+ action->jump.index = __cpu_to_le32(info->jump.index);
+ break;
+ case PRESTERA_ACL_RULE_ACTION_POLICE:
+ action->police.id = __cpu_to_le32(info->police.id);
+ break;
+ case PRESTERA_ACL_RULE_ACTION_COUNT:
+ action->count.id = __cpu_to_le32(info->count.id);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
+ u32 vtcam_id, u32 prio, void *key, void *keymask,
+ struct prestera_acl_hw_action_info *act,
+ u8 n_act, u32 *rule_id)
+{
+ struct prestera_msg_acl_action *actions_msg;
+ struct prestera_msg_vtcam_rule_add_req *req;
+ struct prestera_msg_vtcam_resp resp;
+ void *buff;
+ u32 size;
+ int err;
+ u8 i;
+
+ size = sizeof(*req) + sizeof(*actions_msg) * n_act;
+
+ buff = kzalloc(size, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ req = buff;
+ req->n_act = __cpu_to_le32(n_act);
+ actions_msg = buff + sizeof(*req);
+
+ /* put acl matches into the message */
+ memcpy(req->key, key, sizeof(req->key));
+ memcpy(req->keymask, keymask, sizeof(req->keymask));
+
+ /* put acl actions into the message */
+ for (i = 0; i < n_act; i++) {
+ err = prestera_acl_rule_add_put_action(&actions_msg[i],
+ &act[i]);
+ if (err)
+ goto free_buff;
+ }
+
+ req->vtcam_id = __cpu_to_le32(vtcam_id);
+ req->prio = __cpu_to_le32(prio);
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_ADD,
+ &req->cmd, size, &resp.ret, sizeof(resp));
+ if (err)
+ goto free_buff;
+
+ *rule_id = __le32_to_cpu(resp.rule_id);
+free_buff:
+ kfree(buff);
+ return err;
+}
+
+int prestera_hw_vtcam_rule_del(struct prestera_switch *sw,
+ u32 vtcam_id, u32 rule_id)
+{
+ struct prestera_msg_vtcam_rule_del_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
+ .id = __cpu_to_le32(rule_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id, u16 pcl_id)
+{
+ struct prestera_msg_vtcam_bind_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
+ .type = __cpu_to_le16(iface->type),
+ .pcl_id = __cpu_to_le16(pcl_id)
+ };
+
+ if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) {
+ req.port.dev_id = __cpu_to_le32(iface->port->dev_id);
+ req.port.hw_id = __cpu_to_le32(iface->port->hw_id);
+ } else {
+ req.index = __cpu_to_le32(iface->index);
+ }
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id)
+{
+ struct prestera_msg_vtcam_bind_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
+ .type = __cpu_to_le16(iface->type)
+ };
+
+ if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) {
+ req.port.dev_id = __cpu_to_le32(iface->port->dev_id);
+ req.port.hw_id = __cpu_to_le32(iface->port->hw_id);
+ } else {
+ req.index = __cpu_to_le32(iface->index);
+ }
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
+{
+ struct prestera_msg_span_resp resp;
+ struct prestera_msg_span_req req = {
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ };
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_SPAN_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *span_id = resp.id;
+
+ return 0;
+}
+
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
+ bool ingress)
+{
+ struct prestera_msg_span_req req = {
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .id = span_id,
+ };
+ enum prestera_cmd_type_t cmd_type;
+
+ if (ingress)
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND;
+ else
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND;
+
+ return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
+
+}
+
+int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress)
+{
+ struct prestera_msg_span_req req = {
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ };
+ enum prestera_cmd_type_t cmd_type;
+
+ if (ingress)
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND;
+ else
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND;
+
+ return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
+}
+
+int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id)
+{
+ struct prestera_msg_span_req req = {
+ .id = span_id
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SPAN_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_TYPE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *type = resp.param.type;
+
+ return 0;
+}
+
+int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_SPEED),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *speed = __le32_to_cpu(resp.param.speed);
+
+ return 0;
+}
+
+int prestera_hw_port_autoneg_restart(struct prestera_port *port)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr =
+ __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_stats_get(const struct prestera_port *port,
+ struct prestera_port_stats *st)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_STATS),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ };
+ struct prestera_msg_port_stats_resp resp;
+ __le64 *hw = resp.stats;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ st->good_octets_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT]);
+ st->bad_octets_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT]);
+ st->mac_trans_error =
+ __le64_to_cpu(hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT]);
+ st->broadcast_frames_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT]);
+ st->multicast_frames_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_RCV_CNT]);
+ st->frames_64_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_64L_CNT]);
+ st->frames_65_to_127_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_65TO127L_CNT]);
+ st->frames_128_to_255_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_128TO255L_CNT]);
+ st->frames_256_to_511_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_256TO511L_CNT]);
+ st->frames_512_to_1023_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_512TO1023L_CNT]);
+ st->frames_1024_to_max_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT]);
+ st->excessive_collision =
+ __le64_to_cpu(hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT]);
+ st->multicast_frames_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_SENT_CNT]);
+ st->broadcast_frames_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT]);
+ st->fc_sent = __le64_to_cpu(hw[PRESTERA_PORT_FC_SENT_CNT]);
+ st->fc_received = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_FC_RCV_CNT]);
+ st->buffer_overrun = __le64_to_cpu(hw[PRESTERA_PORT_DROP_EVENTS_CNT]);
+ st->undersize = __le64_to_cpu(hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT]);
+ st->fragments = __le64_to_cpu(hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT]);
+ st->oversize = __le64_to_cpu(hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT]);
+ st->jabber = __le64_to_cpu(hw[PRESTERA_PORT_JABBER_PKTS_CNT]);
+ st->rx_error_frame_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT]);
+ st->bad_crc = __le64_to_cpu(hw[PRESTERA_PORT_BAD_CRC_CNT]);
+ st->collisions = __le64_to_cpu(hw[PRESTERA_PORT_COLLISIONS_CNT]);
+ st->late_collision =
+ __le64_to_cpu(hw[PRESTERA_PORT_LATE_COLLISIONS_CNT]);
+ st->unicast_frames_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT]);
+ st->unicast_frames_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT]);
+ st->sent_multiple =
+ __le64_to_cpu(hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT]);
+ st->sent_deferred =
+ __le64_to_cpu(hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT]);
+ st->good_octets_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT]);
+
+ return 0;
+}
+
+int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LEARNING),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .learning = enable,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .flood_ext = {
+ .type = PRESTERA_PORT_FLOOD_TYPE_UC,
+ .enable = flood,
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .flood_ext = {
+ .type = PRESTERA_PORT_FLOOD_TYPE_MC,
+ .enable = flood,
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_br_locked_set(const struct prestera_port *port,
+ bool br_locked)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LOCKED),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .br_locked = br_locked,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .vid = __cpu_to_le16(vid),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .vid = __cpu_to_le16(vid),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
+ bool is_member, bool untagged)
+{
+ struct prestera_msg_vlan_req req = {
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .vid = __cpu_to_le16(vid),
+ .is_member = is_member,
+ .is_tagged = !untagged,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PORT_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .vid = __cpu_to_le16(vid),
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state)
+{
+ struct prestera_msg_stp_req req = {
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .vid = __cpu_to_le16(vid),
+ .state = state,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_STP_PORT_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
+ u16 vid, bool dynamic)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest = {
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
+ },
+ .vid = __cpu_to_le16(vid),
+ .dynamic = dynamic,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
+ u16 vid)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest = {
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
+ },
+ .vid = __cpu_to_le16(vid),
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid, bool dynamic)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = __cpu_to_le16(lag_id),
+ },
+ .vid = __cpu_to_le16(vid),
+ .dynamic = dynamic,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = __cpu_to_le16(lag_id),
+ },
+ .vid = __cpu_to_le16(vid),
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest = {
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
+ },
+ .flush_mode = __cpu_to_le32(mode),
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .vid = __cpu_to_le16(vid),
+ .flush_mode = __cpu_to_le32(mode),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest = {
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
+ },
+ .vid = __cpu_to_le16(vid),
+ .flush_mode = __cpu_to_le32(mode),
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id,
+ u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = __cpu_to_le16(lag_id),
+ },
+ .flush_mode = __cpu_to_le32(mode),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw,
+ u16 lag_id, u16 vid, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = __cpu_to_le16(lag_id),
+ },
+ .vid = __cpu_to_le16(vid),
+ .flush_mode = __cpu_to_le32(mode),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
+{
+ struct prestera_msg_bridge_resp resp;
+ struct prestera_msg_bridge_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_BRIDGE_CREATE,
+ &req.cmd, sizeof(req),
+ &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *bridge_id = __le16_to_cpu(resp.bridge);
+
+ return 0;
+}
+
+int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = __cpu_to_le16(bridge_id),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = __cpu_to_le16(bridge_id),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = __cpu_to_le16(bridge_id),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+static int prestera_iface_to_msg(struct prestera_iface *iface,
+ struct prestera_msg_iface *msg_if)
+{
+ switch (iface->type) {
+ case PRESTERA_IF_PORT_E:
+ case PRESTERA_IF_VID_E:
+ msg_if->port = __cpu_to_le32(iface->dev_port.port_num);
+ msg_if->dev = __cpu_to_le32(iface->dev_port.hw_dev_num);
+ break;
+ case PRESTERA_IF_LAG_E:
+ msg_if->lag_id = __cpu_to_le16(iface->lag_id);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msg_if->vr_id = __cpu_to_le16(iface->vr_id);
+ msg_if->vid = __cpu_to_le16(iface->vlan_id);
+ msg_if->type = iface->type;
+ return 0;
+}
+
+int prestera_hw_rif_create(struct prestera_switch *sw,
+ struct prestera_iface *iif, u8 *mac, u16 *rif_id)
+{
+ struct prestera_msg_rif_resp resp;
+ struct prestera_msg_rif_req req;
+ int err;
+
+ memcpy(req.mac, mac, ETH_ALEN);
+
+ err = prestera_iface_to_msg(iif, &req.iif);
+ if (err)
+ return err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *rif_id = __le16_to_cpu(resp.rif_id);
+ return err;
+}
+
+int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
+ struct prestera_iface *iif)
+{
+ struct prestera_msg_rif_req req = {
+ .rif_id = __cpu_to_le16(rif_id),
+ };
+ int err;
+
+ err = prestera_iface_to_msg(iif, &req.iif);
+ if (err)
+ return err;
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id)
+{
+ struct prestera_msg_vr_resp resp;
+ struct prestera_msg_vr_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *vr_id = __le16_to_cpu(resp.vr_id);
+ return err;
+}
+
+int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id)
+{
+ struct prestera_msg_vr_req req = {
+ .vr_id = __cpu_to_le16(vr_id),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .grp_id = __cpu_to_le32(grp_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
+ struct prestera_neigh_info *nhs, u32 grp_id)
+{
+ struct prestera_msg_nh_req req = { .size = __cpu_to_le32((u32)count),
+ .grp_id = __cpu_to_le32(grp_id) };
+ int i, err;
+
+ for (i = 0; i < count; i++) {
+ req.nh[i].is_active = nhs[i].connected;
+ memcpy(&req.nh[i].mac, nhs[i].ha, ETH_ALEN);
+ err = prestera_iface_to_msg(&nhs[i].iface, &req.nh[i].oif);
+ if (err)
+ return err;
+ }
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
+ u8 *hw_state, u32 buf_size /* Buffer in bytes */)
+{
+ static struct prestera_msg_nh_chunk_resp resp;
+ struct prestera_msg_nh_chunk_req req;
+ u32 buf_offset;
+ int err;
+
+ memset(&hw_state[0], 0, buf_size);
+ buf_offset = 0;
+ while (1) {
+ if (buf_offset >= buf_size)
+ break;
+
+ memset(&req, 0, sizeof(req));
+ req.offset = __cpu_to_le32(buf_offset * 8); /* 8 bits in u8 */
+ err = prestera_cmd_ret(sw,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET,
+ &req.cmd, sizeof(req), &resp.ret,
+ sizeof(resp));
+ if (err)
+ return err;
+
+ memcpy(&hw_state[buf_offset], &resp.hw_state[0],
+ buf_offset + PRESTERA_MSG_CHUNK_SIZE > buf_size ?
+ buf_size - buf_offset : PRESTERA_MSG_CHUNK_SIZE);
+ buf_offset += PRESTERA_MSG_CHUNK_SIZE;
+ }
+
+ return 0;
+}
+
+int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
+ u32 *grp_id)
+{
+ struct prestera_msg_nh_grp_req req = { .size = __cpu_to_le32((u32)nh_count) };
+ struct prestera_msg_nh_grp_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *grp_id = __le32_to_cpu(resp.grp_id);
+ return err;
+}
+
+int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
+ u32 grp_id)
+{
+ struct prestera_msg_nh_grp_req req = {
+ .grp_id = __cpu_to_le32(grp_id),
+ .size = __cpu_to_le32(nh_count)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_rxtx_init(struct prestera_switch *sw,
+ struct prestera_rxtx_params *params)
+{
+ struct prestera_msg_rxtx_resp resp;
+ struct prestera_msg_rxtx_req req;
+ int err;
+
+ req.use_sdma = params->use_sdma;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_RXTX_INIT,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ params->map_addr = __le32_to_cpu(resp.map_addr);
+
+ return 0;
+}
+
+int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id)
+{
+ struct prestera_msg_lag_req req = {
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .lag_id = __cpu_to_le16(lag_id),
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id)
+{
+ struct prestera_msg_lag_req req = {
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .lag_id = __cpu_to_le16(lag_id),
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id,
+ bool enable)
+{
+ struct prestera_msg_lag_req req = {
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .lag_id = __cpu_to_le16(lag_id),
+ };
+ u32 cmd;
+
+ cmd = enable ? PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE :
+ PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE;
+
+ return prestera_cmd(port->sw, cmd, &req.cmd, sizeof(req));
+}
+
+int
+prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
+ enum prestera_hw_cpu_code_cnt_t counter_type,
+ u64 *packet_count)
+{
+ struct prestera_msg_cpu_code_counter_req req = {
+ .counter_type = counter_type,
+ .code = code,
+ };
+ struct mvsw_msg_cpu_code_counter_ret resp;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *packet_count = __le64_to_cpu(resp.packet_count);
+
+ return 0;
+}
+
+int prestera_hw_event_handler_register(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn,
+ void *arg)
+{
+ struct prestera_fw_event_handler *eh;
+
+ eh = __find_event_handler(sw, type);
+ if (eh)
+ return -EEXIST;
+
+ eh = kmalloc(sizeof(*eh), GFP_KERNEL);
+ if (!eh)
+ return -ENOMEM;
+
+ eh->type = type;
+ eh->func = fn;
+ eh->arg = arg;
+
+ INIT_LIST_HEAD(&eh->list);
+
+ list_add_rcu(&eh->list, &sw->event_handlers);
+
+ return 0;
+}
+
+void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn)
+{
+ struct prestera_fw_event_handler *eh;
+
+ eh = __find_event_handler(sw, type);
+ if (!eh)
+ return;
+
+ list_del_rcu(&eh->list);
+ kfree_rcu(eh, rcu);
+}
+
+int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_TRIGGER,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counter_abort(struct prestera_switch *sw)
+{
+ struct prestera_msg_counter_req req;
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_ABORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx,
+ u32 *len, bool *done,
+ struct prestera_counter_stats *stats)
+{
+ struct prestera_msg_counter_resp *resp;
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(idx),
+ .num_counters = __cpu_to_le32(*len),
+ };
+ size_t size = struct_size(resp, stats, *len);
+ int err, i;
+
+ resp = kmalloc(size, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_GET,
+ &req.cmd, sizeof(req), &resp->ret, size);
+ if (err)
+ goto free_buff;
+
+ for (i = 0; i < __le32_to_cpu(resp->num_counters); i++) {
+ stats[i].packets += __le64_to_cpu(resp->stats[i].packets);
+ stats[i].bytes += __le64_to_cpu(resp->stats[i].bytes);
+ }
+
+ *len = __le32_to_cpu(resp->num_counters);
+ *done = __le32_to_cpu(resp->done);
+
+free_buff:
+ kfree(resp);
+ return err;
+}
+
+int prestera_hw_counter_block_get(struct prestera_switch *sw,
+ u32 client, u32 *block_id, u32 *offset,
+ u32 *num_counters)
+{
+ struct prestera_msg_counter_resp resp;
+ struct prestera_msg_counter_req req = {
+ .client = __cpu_to_le32(client)
+ };
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *block_id = __le32_to_cpu(resp.block_id);
+ *offset = __le32_to_cpu(resp.offset);
+ *num_counters = __le32_to_cpu(resp.num_counters);
+
+ return 0;
+}
+
+int prestera_hw_counter_block_release(struct prestera_switch *sw,
+ u32 block_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
+ u32 counter_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id),
+ .num_counters = __cpu_to_le32(counter_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_policer_create(struct prestera_switch *sw, u8 type,
+ u32 *policer_id)
+{
+ struct prestera_msg_policer_resp resp;
+ struct prestera_msg_policer_req req = {
+ .type = type
+ };
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_POLICER_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *policer_id = __le32_to_cpu(resp.id);
+ return 0;
+}
+
+int prestera_hw_policer_release(struct prestera_switch *sw,
+ u32 policer_id)
+{
+ struct prestera_msg_policer_req req = {
+ .id = __cpu_to_le32(policer_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
+ u32 policer_id, u64 cir, u32 cbs)
+{
+ struct prestera_msg_policer_req req = {
+ .mode = PRESTERA_POLICER_MODE_SR_TCM,
+ .id = __cpu_to_le32(policer_id),
+ .sr_tcm = {
+ .cir = __cpu_to_le64(cir),
+ .cbs = __cpu_to_le32(cbs)
+ }
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_create_resp resp;
+ struct prestera_msg_flood_domain_create_req req;
+ int err;
+
+ err = prestera_cmd_ret(domain->sw,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE, &req.cmd,
+ sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ domain->idx = __le32_to_cpu(resp.flood_domain_idx);
+
+ return 0;
+}
+
+int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_destroy_req req = {
+ .flood_domain_idx = __cpu_to_le32(domain->idx),
+ };
+
+ return prestera_cmd(domain->sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+ struct prestera_msg_flood_domain_ports_set_req *req;
+ struct prestera_msg_flood_domain_port *ports;
+ struct prestera_switch *sw = domain->sw;
+ struct prestera_port *port;
+ u32 ports_num = 0;
+ int buf_size;
+ void *buff;
+ u16 lag_id;
+ int err;
+
+ list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
+ flood_domain_port_node)
+ ports_num++;
+
+ if (!ports_num)
+ return -EINVAL;
+
+ buf_size = sizeof(*req) + sizeof(*ports) * ports_num;
+
+ buff = kmalloc(buf_size, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ req = buff;
+ ports = buff + sizeof(*req);
+
+ req->flood_domain_idx = __cpu_to_le32(domain->idx);
+ req->ports_num = __cpu_to_le32(ports_num);
+
+ list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
+ flood_domain_port_node) {
+ if (netif_is_lag_master(flood_domain_port->dev)) {
+ if (prestera_lag_id(sw, flood_domain_port->dev,
+ &lag_id)) {
+ kfree(buff);
+ return -EINVAL;
+ }
+
+ ports->port_type =
+ __cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG);
+ ports->lag_id = __cpu_to_le16(lag_id);
+ } else {
+ port = prestera_port_dev_lower_find(flood_domain_port->dev);
+
+ ports->port_type =
+ __cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT);
+ ports->dev_num = __cpu_to_le32(port->dev_id);
+ ports->port_num = __cpu_to_le32(port->hw_id);
+ }
+
+ ports->vid = __cpu_to_le16(flood_domain_port->vid);
+
+ ports++;
+ }
+
+ err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET,
+ &req->cmd, buf_size);
+
+ kfree(buff);
+
+ return err;
+}
+
+int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_ports_reset_req req = {
+ .flood_domain_idx = __cpu_to_le32(domain->idx),
+ };
+
+ return prestera_cmd(domain->sw,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb)
+{
+ struct prestera_msg_mdb_create_req req = {
+ .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
+ .vid = __cpu_to_le16(mdb->vid),
+ };
+
+ memcpy(req.mac, mdb->addr, ETH_ALEN);
+
+ return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_CREATE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb)
+{
+ struct prestera_msg_mdb_destroy_req req = {
+ .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
+ .vid = __cpu_to_le16(mdb->vid),
+ };
+
+ memcpy(req.mac, mdb->addr, ETH_ALEN);
+
+ return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_DESTROY, &req.cmd,
+ sizeof(req));
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
new file mode 100644
index 000000000..0a929279e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_HW_H_
+#define _PRESTERA_HW_H_
+
+#include <linux/types.h>
+#include "prestera_acl.h"
+
+enum prestera_accept_frm_type {
+ PRESTERA_ACCEPT_FRAME_TYPE_TAGGED,
+ PRESTERA_ACCEPT_FRAME_TYPE_UNTAGGED,
+ PRESTERA_ACCEPT_FRAME_TYPE_ALL,
+};
+
+enum prestera_fdb_flush_mode {
+ PRESTERA_FDB_FLUSH_MODE_DYNAMIC = BIT(0),
+ PRESTERA_FDB_FLUSH_MODE_STATIC = BIT(1),
+ PRESTERA_FDB_FLUSH_MODE_ALL = PRESTERA_FDB_FLUSH_MODE_DYNAMIC
+ | PRESTERA_FDB_FLUSH_MODE_STATIC,
+};
+
+enum {
+ PRESTERA_MAC_MODE_INTERNAL,
+ PRESTERA_MAC_MODE_SGMII,
+ PRESTERA_MAC_MODE_1000BASE_X,
+ PRESTERA_MAC_MODE_KR,
+ PRESTERA_MAC_MODE_KR2,
+ PRESTERA_MAC_MODE_KR4,
+ PRESTERA_MAC_MODE_CR,
+ PRESTERA_MAC_MODE_CR2,
+ PRESTERA_MAC_MODE_CR4,
+ PRESTERA_MAC_MODE_SR_LR,
+ PRESTERA_MAC_MODE_SR_LR2,
+ PRESTERA_MAC_MODE_SR_LR4,
+
+ PRESTERA_MAC_MODE_MAX
+};
+
+enum {
+ PRESTERA_LINK_MODE_10baseT_Half,
+ PRESTERA_LINK_MODE_10baseT_Full,
+ PRESTERA_LINK_MODE_100baseT_Half,
+ PRESTERA_LINK_MODE_100baseT_Full,
+ PRESTERA_LINK_MODE_1000baseT_Half,
+ PRESTERA_LINK_MODE_1000baseT_Full,
+ PRESTERA_LINK_MODE_1000baseX_Full,
+ PRESTERA_LINK_MODE_1000baseKX_Full,
+ PRESTERA_LINK_MODE_2500baseX_Full,
+ PRESTERA_LINK_MODE_10GbaseKR_Full,
+ PRESTERA_LINK_MODE_10GbaseSR_Full,
+ PRESTERA_LINK_MODE_10GbaseLR_Full,
+ PRESTERA_LINK_MODE_20GbaseKR2_Full,
+ PRESTERA_LINK_MODE_25GbaseCR_Full,
+ PRESTERA_LINK_MODE_25GbaseKR_Full,
+ PRESTERA_LINK_MODE_25GbaseSR_Full,
+ PRESTERA_LINK_MODE_40GbaseKR4_Full,
+ PRESTERA_LINK_MODE_40GbaseCR4_Full,
+ PRESTERA_LINK_MODE_40GbaseSR4_Full,
+ PRESTERA_LINK_MODE_50GbaseCR2_Full,
+ PRESTERA_LINK_MODE_50GbaseKR2_Full,
+ PRESTERA_LINK_MODE_50GbaseSR2_Full,
+ PRESTERA_LINK_MODE_100GbaseKR4_Full,
+ PRESTERA_LINK_MODE_100GbaseSR4_Full,
+ PRESTERA_LINK_MODE_100GbaseCR4_Full,
+
+ PRESTERA_LINK_MODE_MAX
+};
+
+enum {
+ PRESTERA_PORT_TYPE_NONE,
+ PRESTERA_PORT_TYPE_TP,
+ PRESTERA_PORT_TYPE_AUI,
+ PRESTERA_PORT_TYPE_MII,
+ PRESTERA_PORT_TYPE_FIBRE,
+ PRESTERA_PORT_TYPE_BNC,
+ PRESTERA_PORT_TYPE_DA,
+ PRESTERA_PORT_TYPE_OTHER,
+
+ PRESTERA_PORT_TYPE_MAX
+};
+
+enum {
+ PRESTERA_PORT_TCVR_COPPER,
+ PRESTERA_PORT_TCVR_SFP,
+
+ PRESTERA_PORT_TCVR_MAX
+};
+
+enum {
+ PRESTERA_PORT_FEC_OFF,
+ PRESTERA_PORT_FEC_BASER,
+ PRESTERA_PORT_FEC_RS,
+
+ PRESTERA_PORT_FEC_MAX
+};
+
+enum {
+ PRESTERA_PORT_DUPLEX_HALF,
+ PRESTERA_PORT_DUPLEX_FULL,
+};
+
+enum {
+ PRESTERA_STP_DISABLED,
+ PRESTERA_STP_BLOCK_LISTEN,
+ PRESTERA_STP_LEARN,
+ PRESTERA_STP_FORWARD,
+};
+
+enum {
+ PRESTERA_POLICER_TYPE_INGRESS,
+ PRESTERA_POLICER_TYPE_EGRESS
+};
+
+enum prestera_hw_cpu_code_cnt_t {
+ PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP = 0,
+ PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1,
+};
+
+enum prestera_hw_vtcam_direction_t {
+ PRESTERA_HW_VTCAM_DIR_INGRESS = 0,
+ PRESTERA_HW_VTCAM_DIR_EGRESS = 1,
+};
+
+enum {
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0 = 0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1 = 1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2 = 2,
+ PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP = 3,
+};
+
+struct prestera_switch;
+struct prestera_port;
+struct prestera_port_stats;
+struct prestera_port_caps;
+enum prestera_event_type;
+struct prestera_event;
+
+typedef void (*prestera_event_cb_t)
+ (struct prestera_switch *sw, struct prestera_event *evt, void *arg);
+
+struct prestera_rxtx_params;
+struct prestera_acl_hw_action_info;
+struct prestera_acl_iface;
+struct prestera_counter_stats;
+struct prestera_iface;
+struct prestera_flood_domain;
+struct prestera_mdb_entry;
+struct prestera_neigh_info;
+
+/* Switch API */
+int prestera_hw_switch_init(struct prestera_switch *sw);
+void prestera_hw_switch_fini(struct prestera_switch *sw);
+int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms);
+int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac);
+
+/* Port API */
+int prestera_hw_port_info_get(const struct prestera_port *port,
+ u32 *dev_id, u32 *hw_id, u16 *fp_id);
+
+int prestera_hw_port_mac_mode_get(const struct prestera_port *port,
+ u32 *mode, u32 *speed, u8 *duplex, u8 *fec);
+int prestera_hw_port_mac_mode_set(const struct prestera_port *port,
+ bool admin, u32 mode, u8 inband,
+ u32 speed, u8 duplex, u8 fec);
+int prestera_hw_port_phy_mode_get(const struct prestera_port *port,
+ u8 *mdix, u64 *lmode_bmap,
+ bool *fc_pause, bool *fc_asym);
+int prestera_hw_port_phy_mode_set(const struct prestera_port *port,
+ bool admin, bool adv, u32 mode, u64 modes,
+ u8 mdix);
+
+int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu);
+int prestera_hw_port_mtu_get(const struct prestera_port *port, u32 *mtu);
+int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac);
+int prestera_hw_port_mac_get(const struct prestera_port *port, char *mac);
+int prestera_hw_port_cap_get(const struct prestera_port *port,
+ struct prestera_port_caps *caps);
+int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type);
+int prestera_hw_port_autoneg_restart(struct prestera_port *port);
+int prestera_hw_port_stats_get(const struct prestera_port *port,
+ struct prestera_port_stats *stats);
+int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
+int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
+int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood);
+int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood);
+int prestera_hw_port_br_locked_set(const struct prestera_port *port,
+ bool br_locked);
+int prestera_hw_port_accept_frm_type(struct prestera_port *port,
+ enum prestera_accept_frm_type type);
+/* Vlan API */
+int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid);
+int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid);
+int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
+ bool is_member, bool untagged);
+int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid);
+int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state);
+
+/* FDB API */
+int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
+ u16 vid, bool dynamic);
+int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
+ u16 vid);
+int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode);
+int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode);
+int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode);
+
+/* Bridge API */
+int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id);
+int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id);
+int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id);
+int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id);
+
+/* vTCAM API */
+int prestera_hw_vtcam_create(struct prestera_switch *sw,
+ u8 lookup, const u32 *keymask, u32 *vtcam_id,
+ enum prestera_hw_vtcam_direction_t direction);
+int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, u32 vtcam_id,
+ u32 prio, void *key, void *keymask,
+ struct prestera_acl_hw_action_info *act,
+ u8 n_act, u32 *rule_id);
+int prestera_hw_vtcam_rule_del(struct prestera_switch *sw,
+ u32 vtcam_id, u32 rule_id);
+int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id);
+int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id, u16 pcl_id);
+int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id);
+
+/* Counter API */
+int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id);
+int prestera_hw_counter_abort(struct prestera_switch *sw);
+int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx,
+ u32 *len, bool *done,
+ struct prestera_counter_stats *stats);
+int prestera_hw_counter_block_get(struct prestera_switch *sw,
+ u32 client, u32 *block_id, u32 *offset,
+ u32 *num_counters);
+int prestera_hw_counter_block_release(struct prestera_switch *sw,
+ u32 block_id);
+int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
+ u32 counter_id);
+
+/* SPAN API */
+int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id);
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
+ bool ingress);
+int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress);
+int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id);
+
+/* Router API */
+int prestera_hw_rif_create(struct prestera_switch *sw,
+ struct prestera_iface *iif, u8 *mac, u16 *rif_id);
+int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
+ struct prestera_iface *iif);
+
+/* Virtual Router API */
+int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id);
+int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id);
+
+/* LPM PI */
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id);
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len);
+
+/* NH API */
+int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
+ struct prestera_neigh_info *nhs, u32 grp_id);
+int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
+ u8 *hw_state, u32 buf_size /* Buffer in bytes */);
+int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
+ u32 *grp_id);
+int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
+ u32 grp_id);
+
+/* Event handlers */
+int prestera_hw_event_handler_register(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn,
+ void *arg);
+void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn);
+
+/* RX/TX */
+int prestera_hw_rxtx_init(struct prestera_switch *sw,
+ struct prestera_rxtx_params *params);
+
+/* LAG API */
+int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id);
+int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id);
+int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id,
+ bool enable);
+int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid, bool dynamic);
+int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid);
+int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id,
+ u32 mode);
+int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw,
+ u16 lag_id, u16 vid, u32 mode);
+
+/* HW trap/drop counters API */
+int
+prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
+ enum prestera_hw_cpu_code_cnt_t counter_type,
+ u64 *packet_count);
+
+/* Policer API */
+int prestera_hw_policer_create(struct prestera_switch *sw, u8 type,
+ u32 *policer_id);
+int prestera_hw_policer_release(struct prestera_switch *sw,
+ u32 policer_id);
+int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
+ u32 policer_id, u64 cir, u32 cbs);
+
+/* Flood domain / MDB API */
+int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain);
+
+int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb);
+int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb);
+
+#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
new file mode 100644
index 000000000..4fb886c57
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -0,0 +1,1525 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdev_features.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/if_vlan.h>
+#include <linux/phylink.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_flow.h"
+#include "prestera_span.h"
+#include "prestera_rxtx.h"
+#include "prestera_devlink.h"
+#include "prestera_ethtool.h"
+#include "prestera_counter.h"
+#include "prestera_switchdev.h"
+
+#define PRESTERA_MTU_DEFAULT 1536
+
+#define PRESTERA_STATS_DELAY_MS 1000
+
+#define PRESTERA_MAC_ADDR_NUM_MAX 255
+
+static struct workqueue_struct *prestera_wq;
+static struct workqueue_struct *prestera_owq;
+
+void prestera_queue_work(struct work_struct *work)
+{
+ queue_work(prestera_owq, work);
+}
+
+void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay)
+{
+ queue_delayed_work(prestera_wq, work, delay);
+}
+
+void prestera_queue_drain(void)
+{
+ drain_workqueue(prestera_wq);
+ drain_workqueue(prestera_owq);
+}
+
+int prestera_port_learning_set(struct prestera_port *port, bool learn)
+{
+ return prestera_hw_port_learning_set(port, learn);
+}
+
+int prestera_port_uc_flood_set(struct prestera_port *port, bool flood)
+{
+ return prestera_hw_port_uc_flood_set(port, flood);
+}
+
+int prestera_port_mc_flood_set(struct prestera_port *port, bool flood)
+{
+ return prestera_hw_port_mc_flood_set(port, flood);
+}
+
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked)
+{
+ return prestera_hw_port_br_locked_set(port, br_locked);
+}
+
+int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
+{
+ enum prestera_accept_frm_type frm_type;
+ int err;
+
+ frm_type = PRESTERA_ACCEPT_FRAME_TYPE_TAGGED;
+
+ if (vid) {
+ err = prestera_hw_vlan_port_vid_set(port, vid);
+ if (err)
+ return err;
+
+ frm_type = PRESTERA_ACCEPT_FRAME_TYPE_ALL;
+ }
+
+ err = prestera_hw_port_accept_frm_type(port, frm_type);
+ if (err && frm_type == PRESTERA_ACCEPT_FRAME_TYPE_ALL)
+ prestera_hw_vlan_port_vid_set(port, port->pvid);
+
+ port->pvid = vid;
+ return 0;
+}
+
+struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
+ u32 dev_id, u32 hw_id)
+{
+ struct prestera_port *port = NULL, *tmp;
+
+ read_lock(&sw->port_list_lock);
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
+ port = tmp;
+ break;
+ }
+ }
+ read_unlock(&sw->port_list_lock);
+
+ return port;
+}
+
+struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
+{
+ struct prestera_port *port = NULL, *tmp;
+
+ read_lock(&sw->port_list_lock);
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->id == id) {
+ port = tmp;
+ break;
+ }
+ }
+ read_unlock(&sw->port_list_lock);
+
+ return port;
+}
+
+struct prestera_switch *prestera_switch_get(struct net_device *dev)
+{
+ struct prestera_port *port;
+
+ port = prestera_port_dev_lower_find(dev);
+ return port ? port->sw : NULL;
+}
+
+int prestera_port_cfg_mac_read(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg)
+{
+ *cfg = port->cfg_mac;
+ return 0;
+}
+
+int prestera_port_cfg_mac_write(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg)
+{
+ int err;
+
+ err = prestera_hw_port_mac_mode_set(port, cfg->admin,
+ cfg->mode, cfg->inband, cfg->speed,
+ cfg->duplex, cfg->fec);
+ if (err)
+ return err;
+
+ port->cfg_mac = *cfg;
+ return 0;
+}
+
+static int prestera_port_open(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_mac_config cfg_mac;
+ int err = 0;
+
+ if (port->phy_link) {
+ phylink_start(port->phy_link);
+ } else {
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = true;
+ err = prestera_port_cfg_mac_write(port,
+ &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = true;
+ err = prestera_hw_port_phy_mode_set(port, true,
+ port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
+ }
+
+ netif_start_queue(dev);
+
+ return err;
+}
+
+static int prestera_port_close(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_mac_config cfg_mac;
+ int err = 0;
+
+ netif_stop_queue(dev);
+
+ if (port->phy_link) {
+ phylink_stop(port->phy_link);
+ phylink_disconnect_phy(port->phy_link);
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = false;
+ prestera_port_cfg_mac_write(port, &cfg_mac);
+ }
+ } else {
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = false;
+ prestera_port_cfg_mac_write(port, &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = false;
+ err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
+ }
+
+ return err;
+}
+
+static void
+prestera_port_mac_state_cache_read(struct prestera_port *port,
+ struct prestera_port_mac_state *state)
+{
+ spin_lock(&port->state_mac_lock);
+ *state = port->state_mac;
+ spin_unlock(&port->state_mac_lock);
+}
+
+static void
+prestera_port_mac_state_cache_write(struct prestera_port *port,
+ struct prestera_port_mac_state *state)
+{
+ spin_lock(&port->state_mac_lock);
+ port->state_mac = *state;
+ spin_unlock(&port->state_mac_lock);
+}
+
+static struct prestera_port *prestera_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct prestera_port, phylink_pcs);
+}
+
+static void prestera_mac_config(struct phylink_config *config,
+ unsigned int an_mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void prestera_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct prestera_port *port = netdev_priv(ndev);
+ struct prestera_port_mac_state state_mac;
+
+ /* Invalidate. Parameters will update on next link event. */
+ memset(&state_mac, 0, sizeof(state_mac));
+ state_mac.valid = false;
+ prestera_port_mac_state_cache_write(port, &state_mac);
+}
+
+static void prestera_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+}
+
+static struct phylink_pcs *
+prestera_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *dev = to_net_dev(config->dev);
+ struct prestera_port *port = netdev_priv(dev);
+
+ return &port->phylink_pcs;
+}
+
+static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct prestera_port *port = container_of(pcs, struct prestera_port,
+ phylink_pcs);
+ struct prestera_port_mac_state smac;
+
+ prestera_port_mac_state_cache_read(port, &smac);
+
+ if (smac.valid) {
+ state->link = smac.oper ? 1 : 0;
+ /* AN is completed, when port is up */
+ state->an_complete = (smac.oper && port->autoneg) ? 1 : 0;
+ state->speed = smac.speed;
+ state->duplex = smac.duplex;
+ } else {
+ state->link = 0;
+ state->an_complete = 0;
+ }
+}
+
+static int prestera_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct prestera_port *port = prestera_pcs_to_port(pcs);
+ struct prestera_port_mac_config cfg_mac;
+ int err;
+
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (err)
+ return err;
+
+ cfg_mac.admin = true;
+ cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
+ cfg_mac.inband = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ cfg_mac.speed = SPEED_10000;
+ cfg_mac.mode = PRESTERA_MAC_MODE_SR_LR;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ cfg_mac.speed = SPEED_2500;
+ cfg_mac.duplex = DUPLEX_FULL;
+ cfg_mac.mode = PRESTERA_MAC_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ cfg_mac.mode = PRESTERA_MAC_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ default:
+ cfg_mac.speed = SPEED_1000;
+ cfg_mac.duplex = DUPLEX_FULL;
+ cfg_mac.mode = PRESTERA_MAC_MODE_1000BASE_X;
+ break;
+ }
+
+ err = prestera_port_cfg_mac_write(port, &cfg_mac);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void prestera_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ /* TODO: add 1000basex AN restart support
+ * (Currently FW has no support for 1000baseX AN restart, but it will in the future,
+ * so as for now the function would stay empty.)
+ */
+}
+
+static const struct phylink_mac_ops prestera_mac_ops = {
+ .mac_select_pcs = prestera_mac_select_pcs,
+ .mac_config = prestera_mac_config,
+ .mac_link_down = prestera_mac_link_down,
+ .mac_link_up = prestera_mac_link_up,
+};
+
+static const struct phylink_pcs_ops prestera_pcs_ops = {
+ .pcs_get_state = prestera_pcs_get_state,
+ .pcs_config = prestera_pcs_config,
+ .pcs_an_restart = prestera_pcs_an_restart,
+};
+
+static int prestera_port_sfp_bind(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct device_node *ports, *node;
+ struct fwnode_handle *fwnode;
+ struct phylink *phy_link;
+ int err;
+
+ if (!sw->np)
+ return 0;
+
+ of_node_get(sw->np);
+ ports = of_find_node_by_name(sw->np, "ports");
+
+ for_each_child_of_node(ports, node) {
+ int num;
+
+ err = of_property_read_u32(node, "prestera,port-num", &num);
+ if (err) {
+ dev_err(sw->dev->dev,
+ "device node %pOF has no valid reg property: %d\n",
+ node, err);
+ goto out;
+ }
+
+ if (port->fp_id != num)
+ continue;
+
+ port->phylink_pcs.ops = &prestera_pcs_ops;
+ port->phylink_pcs.neg_mode = true;
+
+ port->phy_config.dev = &port->dev->dev;
+ port->phy_config.type = PHYLINK_NETDEV;
+
+ fwnode = of_fwnode_handle(node);
+
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phy_config.supported_interfaces);
+
+ port->phy_config.mac_capabilities =
+ MAC_1000 | MAC_2500FD | MAC_10000FD;
+
+ phy_link = phylink_create(&port->phy_config, fwnode,
+ PHY_INTERFACE_MODE_INTERNAL,
+ &prestera_mac_ops);
+ if (IS_ERR(phy_link)) {
+ netdev_err(port->dev, "failed to create phylink\n");
+ err = PTR_ERR(phy_link);
+ goto out;
+ }
+
+ port->phy_link = phy_link;
+ break;
+ }
+
+out:
+ of_node_put(node);
+ of_node_put(ports);
+ return err;
+}
+
+static int prestera_port_sfp_unbind(struct prestera_port *port)
+{
+ if (port->phy_link)
+ phylink_destroy(port->phy_link);
+
+ return 0;
+}
+
+static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return prestera_rxtx_xmit(netdev_priv(dev), skb);
+}
+
+int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr)
+{
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+
+ /* firmware requires that port's MAC address contains first 5 bytes
+ * of the base MAC address
+ */
+ if (memcmp(port->sw->base_mac, addr, ETH_ALEN - 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prestera_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err;
+
+ err = prestera_is_valid_mac_addr(port, addr->sa_data);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_mac_set(port, addr->sa_data);
+ if (err)
+ return err;
+
+ eth_hw_addr_set(dev, addr->sa_data);
+
+ return 0;
+}
+
+static int prestera_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ err = prestera_hw_port_mtu_set(port, mtu);
+ if (err)
+ return err;
+
+ dev->mtu = mtu;
+
+ return 0;
+}
+
+static void prestera_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_stats *port_stats = &port->cached_hw_stats.stats;
+
+ stats->rx_packets = port_stats->broadcast_frames_received +
+ port_stats->multicast_frames_received +
+ port_stats->unicast_frames_received;
+
+ stats->tx_packets = port_stats->broadcast_frames_sent +
+ port_stats->multicast_frames_sent +
+ port_stats->unicast_frames_sent;
+
+ stats->rx_bytes = port_stats->good_octets_received;
+
+ stats->tx_bytes = port_stats->good_octets_sent;
+
+ stats->rx_errors = port_stats->rx_error_frame_received;
+ stats->tx_errors = port_stats->mac_trans_error;
+
+ stats->rx_dropped = port_stats->buffer_overrun;
+ stats->tx_dropped = 0;
+
+ stats->multicast = port_stats->multicast_frames_received;
+ stats->collisions = port_stats->excessive_collision;
+
+ stats->rx_crc_errors = port_stats->bad_crc;
+}
+
+static void prestera_port_get_hw_stats(struct prestera_port *port)
+{
+ prestera_hw_port_stats_get(port, &port->cached_hw_stats.stats);
+}
+
+static void prestera_port_stats_update(struct work_struct *work)
+{
+ struct prestera_port *port =
+ container_of(work, struct prestera_port,
+ cached_hw_stats.caching_dw.work);
+
+ prestera_port_get_hw_stats(port);
+
+ queue_delayed_work(prestera_wq, &port->cached_hw_stats.caching_dw,
+ msecs_to_jiffies(PRESTERA_STATS_DELAY_MS));
+}
+
+static int prestera_port_setup_tc(struct net_device *dev,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return prestera_flow_block_setup(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops prestera_netdev_ops = {
+ .ndo_open = prestera_port_open,
+ .ndo_stop = prestera_port_close,
+ .ndo_start_xmit = prestera_port_xmit,
+ .ndo_setup_tc = prestera_port_setup_tc,
+ .ndo_change_mtu = prestera_port_change_mtu,
+ .ndo_get_stats64 = prestera_port_get_stats64,
+ .ndo_set_mac_address = prestera_port_set_mac_address,
+};
+
+int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes)
+{
+ int err;
+
+ if (port->autoneg && port->adver_link_modes == link_modes)
+ return 0;
+
+ err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin,
+ true, 0, link_modes,
+ port->cfg_phy.mdix);
+ if (err)
+ return err;
+
+ port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
+ port->adver_link_modes = link_modes;
+ port->cfg_phy.mode = 0;
+ port->autoneg = true;
+
+ return 0;
+}
+
+static void prestera_port_list_add(struct prestera_port *port)
+{
+ write_lock(&port->sw->port_list_lock);
+ list_add(&port->list, &port->sw->port_list);
+ write_unlock(&port->sw->port_list_lock);
+}
+
+static void prestera_port_list_del(struct prestera_port *port)
+{
+ write_lock(&port->sw->port_list_lock);
+ list_del(&port->list);
+ write_unlock(&port->sw->port_list_lock);
+}
+
+static int prestera_port_create(struct prestera_switch *sw, u32 id)
+{
+ struct prestera_port_mac_config cfg_mac;
+ struct prestera_port *port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(*port));
+ if (!dev)
+ return -ENOMEM;
+
+ port = netdev_priv(dev);
+
+ INIT_LIST_HEAD(&port->vlans_list);
+ port->pvid = PRESTERA_DEFAULT_VID;
+ port->lag = NULL;
+ port->dev = dev;
+ port->id = id;
+ port->sw = sw;
+
+ spin_lock_init(&port->state_mac_lock);
+
+ err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id,
+ &port->fp_id);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to get port(%u) info\n", id);
+ goto err_port_info_get;
+ }
+
+ err = prestera_devlink_port_register(port);
+ if (err)
+ goto err_dl_port_register;
+
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+ dev->netdev_ops = &prestera_netdev_ops;
+ dev->ethtool_ops = &prestera_ethtool_ops;
+ SET_NETDEV_DEV(dev, sw->dev->dev);
+ SET_NETDEV_DEVLINK_PORT(dev, &port->dl_port);
+
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP)
+ netif_carrier_off(dev);
+
+ dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT);
+ dev->min_mtu = sw->mtu_min;
+ dev->max_mtu = sw->mtu_max;
+
+ err = prestera_hw_port_mtu_set(port, dev->mtu);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mtu(%d)\n",
+ id, dev->mtu);
+ goto err_port_init;
+ }
+
+ if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) {
+ err = -EINVAL;
+ goto err_port_init;
+ }
+
+ eth_hw_addr_gen(dev, sw->base_mac, port->fp_id);
+ /* firmware requires that port's MAC address consist of the first
+ * 5 bytes of the base MAC address
+ */
+ if (memcmp(dev->dev_addr, sw->base_mac, ETH_ALEN - 1)) {
+ dev_warn(prestera_dev(sw), "Port MAC address wraps for port(%u)\n", id);
+ dev_addr_mod(dev, 0, sw->base_mac, ETH_ALEN - 1);
+ }
+
+ err = prestera_hw_port_mac_set(port, dev->dev_addr);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mac addr\n", id);
+ goto err_port_init;
+ }
+
+ err = prestera_hw_port_cap_get(port, &port->caps);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to get port(%u) caps\n", id);
+ goto err_port_init;
+ }
+
+ port->adver_link_modes = port->caps.supp_link_modes;
+ port->adver_fec = 0;
+ port->autoneg = true;
+
+ /* initialize config mac */
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) {
+ cfg_mac.admin = true;
+ cfg_mac.mode = PRESTERA_MAC_MODE_INTERNAL;
+ } else {
+ cfg_mac.admin = false;
+ cfg_mac.mode = PRESTERA_MAC_MODE_MAX;
+ }
+ cfg_mac.inband = 0;
+ cfg_mac.speed = 0;
+ cfg_mac.duplex = DUPLEX_UNKNOWN;
+ cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
+
+ err = prestera_port_cfg_mac_write(port, &cfg_mac);
+ if (err) {
+ dev_err(prestera_dev(sw),
+ "Failed to set port(%u) mac mode\n", id);
+ goto err_port_init;
+ }
+
+ /* initialize config phy (if this is inegral) */
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) {
+ port->cfg_phy.mdix = ETH_TP_MDI_AUTO;
+ port->cfg_phy.admin = false;
+ err = prestera_hw_port_phy_mode_set(port,
+ port->cfg_phy.admin,
+ false, 0, 0,
+ port->cfg_phy.mdix);
+ if (err) {
+ dev_err(prestera_dev(sw),
+ "Failed to set port(%u) phy mode\n", id);
+ goto err_port_init;
+ }
+ }
+
+ err = prestera_rxtx_port_init(port);
+ if (err)
+ goto err_port_init;
+
+ INIT_DELAYED_WORK(&port->cached_hw_stats.caching_dw,
+ &prestera_port_stats_update);
+
+ prestera_port_list_add(port);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_register_netdev;
+
+ err = prestera_port_sfp_bind(port);
+ if (err)
+ goto err_sfp_bind;
+
+ return 0;
+
+err_sfp_bind:
+ unregister_netdev(dev);
+err_register_netdev:
+ prestera_port_list_del(port);
+err_port_init:
+ prestera_devlink_port_unregister(port);
+err_dl_port_register:
+err_port_info_get:
+ free_netdev(dev);
+ return err;
+}
+
+static void prestera_port_destroy(struct prestera_port *port)
+{
+ struct net_device *dev = port->dev;
+
+ cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw);
+ unregister_netdev(dev);
+ prestera_port_list_del(port);
+ prestera_devlink_port_unregister(port);
+ free_netdev(dev);
+}
+
+static void prestera_destroy_ports(struct prestera_switch *sw)
+{
+ struct prestera_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ prestera_port_destroy(port);
+}
+
+static int prestera_create_ports(struct prestera_switch *sw)
+{
+ struct prestera_port *port, *tmp;
+ u32 port_idx;
+ int err;
+
+ for (port_idx = 0; port_idx < sw->port_count; port_idx++) {
+ err = prestera_port_create(sw, port_idx);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list) {
+ prestera_port_sfp_unbind(port);
+ prestera_port_destroy(port);
+ }
+
+ return err;
+}
+
+static void prestera_port_handle_event(struct prestera_switch *sw,
+ struct prestera_event *evt, void *arg)
+{
+ struct prestera_port_mac_state smac;
+ struct prestera_port_event *pevt;
+ struct delayed_work *caching_dw;
+ struct prestera_port *port;
+
+ if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
+ pevt = &evt->port_evt;
+ port = prestera_find_port(sw, pevt->port_id);
+ if (!port || !port->dev)
+ return;
+
+ caching_dw = &port->cached_hw_stats.caching_dw;
+
+ memset(&smac, 0, sizeof(smac));
+ smac.valid = true;
+ smac.oper = pevt->data.mac.oper;
+ if (smac.oper) {
+ smac.mode = pevt->data.mac.mode;
+ smac.speed = pevt->data.mac.speed;
+ smac.duplex = pevt->data.mac.duplex;
+ smac.fc = pevt->data.mac.fc;
+ smac.fec = pevt->data.mac.fec;
+ }
+ prestera_port_mac_state_cache_write(port, &smac);
+
+ if (port->state_mac.oper) {
+ if (port->phy_link)
+ phylink_mac_change(port->phy_link, true);
+ else
+ netif_carrier_on(port->dev);
+
+ if (!delayed_work_pending(caching_dw))
+ queue_delayed_work(prestera_wq, caching_dw, 0);
+ } else {
+ if (port->phy_link)
+ phylink_mac_change(port->phy_link, false);
+ else if (netif_running(port->dev) && netif_carrier_ok(port->dev))
+ netif_carrier_off(port->dev);
+
+ if (delayed_work_pending(caching_dw))
+ cancel_delayed_work(caching_dw);
+ }
+ }
+}
+
+static int prestera_event_handlers_register(struct prestera_switch *sw)
+{
+ return prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_PORT,
+ prestera_port_handle_event,
+ NULL);
+}
+
+static void prestera_event_handlers_unregister(struct prestera_switch *sw)
+{
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_PORT,
+ prestera_port_handle_event);
+}
+
+static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
+{
+ int ret;
+
+ if (sw->np)
+ ret = of_get_mac_address(sw->np, sw->base_mac);
+ if (!is_valid_ether_addr(sw->base_mac) || ret) {
+ eth_random_addr(sw->base_mac);
+ dev_info(prestera_dev(sw), "using random base mac address\n");
+ }
+
+ return prestera_hw_switch_mac_set(sw, sw->base_mac);
+}
+
+struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id)
+{
+ return id < sw->lag_max ? &sw->lags[id] : NULL;
+}
+
+static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw,
+ struct net_device *dev)
+{
+ struct prestera_lag *lag;
+ u16 id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = &sw->lags[id];
+ if (lag->dev == dev)
+ return lag;
+ }
+
+ return NULL;
+}
+
+int prestera_lag_id(struct prestera_switch *sw,
+ struct net_device *lag_dev, u16 *lag_id)
+{
+ struct prestera_lag *lag;
+ int free_id = -1;
+ int id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = prestera_lag_by_id(sw, id);
+ if (lag->member_count) {
+ if (lag->dev == lag_dev) {
+ *lag_id = id;
+ return 0;
+ }
+ } else if (free_id < 0) {
+ free_id = id;
+ }
+ }
+ if (free_id < 0)
+ return -ENOSPC;
+ *lag_id = free_id;
+ return 0;
+}
+
+static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw,
+ struct net_device *lag_dev)
+{
+ struct prestera_lag *lag = NULL;
+ u16 id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = &sw->lags[id];
+ if (!lag->dev)
+ break;
+ }
+ if (lag) {
+ INIT_LIST_HEAD(&lag->members);
+ lag->dev = lag_dev;
+ }
+
+ return lag;
+}
+
+static void prestera_lag_destroy(struct prestera_switch *sw,
+ struct prestera_lag *lag)
+{
+ WARN_ON(!list_empty(&lag->members));
+ lag->member_count = 0;
+ lag->dev = NULL;
+}
+
+static int prestera_lag_port_add(struct prestera_port *port,
+ struct net_device *lag_dev)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_lag *lag;
+ int err;
+
+ lag = prestera_lag_by_dev(sw, lag_dev);
+ if (!lag) {
+ lag = prestera_lag_create(sw, lag_dev);
+ if (!lag)
+ return -ENOSPC;
+ }
+
+ if (lag->member_count >= sw->lag_member_max)
+ return -ENOSPC;
+
+ err = prestera_hw_lag_member_add(port, lag->lag_id);
+ if (err) {
+ if (!lag->member_count)
+ prestera_lag_destroy(sw, lag);
+ return err;
+ }
+
+ list_add(&port->lag_member, &lag->members);
+ lag->member_count++;
+ port->lag = lag;
+
+ return 0;
+}
+
+static int prestera_lag_port_del(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_lag *lag = port->lag;
+ int err;
+
+ if (!lag || !lag->member_count)
+ return -EINVAL;
+
+ err = prestera_hw_lag_member_del(port, lag->lag_id);
+ if (err)
+ return err;
+
+ list_del(&port->lag_member);
+ lag->member_count--;
+ port->lag = NULL;
+
+ if (netif_is_bridge_port(lag->dev)) {
+ struct net_device *br_dev;
+
+ br_dev = netdev_master_upper_dev_get(lag->dev);
+
+ prestera_bridge_port_leave(br_dev, port);
+ }
+
+ if (!lag->member_count)
+ prestera_lag_destroy(sw, lag);
+
+ return 0;
+}
+
+bool prestera_port_is_lag_member(const struct prestera_port *port)
+{
+ return !!port->lag;
+}
+
+u16 prestera_port_lag_id(const struct prestera_port *port)
+{
+ return port->lag->lag_id;
+}
+
+static int prestera_lag_init(struct prestera_switch *sw)
+{
+ u16 id;
+
+ sw->lags = kcalloc(sw->lag_max, sizeof(*sw->lags), GFP_KERNEL);
+ if (!sw->lags)
+ return -ENOMEM;
+
+ for (id = 0; id < sw->lag_max; id++)
+ sw->lags[id].lag_id = id;
+
+ return 0;
+}
+
+static void prestera_lag_fini(struct prestera_switch *sw)
+{
+ u8 idx;
+
+ for (idx = 0; idx < sw->lag_max; idx++)
+ WARN_ON(sw->lags[idx].member_count);
+
+ kfree(sw->lags);
+}
+
+bool prestera_netdev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &prestera_netdev_ops;
+}
+
+static int prestera_lower_dev_walk(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct prestera_port **pport = (struct prestera_port **)priv->data;
+
+ if (prestera_netdev_check(dev)) {
+ *pport = netdev_priv(dev);
+ return 1;
+ }
+
+ return 0;
+}
+
+struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev)
+{
+ struct prestera_port *port = NULL;
+ struct netdev_nested_priv priv = {
+ .data = (void *)&port,
+ };
+
+ if (prestera_netdev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_walk_all_lower_dev(dev, prestera_lower_dev_walk, &priv);
+
+ return port;
+}
+
+static int prestera_netdev_port_lower_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changelowerstate_info *info = ptr;
+ struct netdev_lag_lower_state_info *lower_state_info;
+ struct prestera_port *port = netdev_priv(dev);
+ bool enabled;
+
+ if (!netif_is_lag_port(dev))
+ return 0;
+ if (!prestera_port_is_lag_member(port))
+ return 0;
+
+ lower_state_info = info->lower_state_info;
+ enabled = lower_state_info->link_up && lower_state_info->tx_enabled;
+
+ return prestera_hw_lag_member_enable(port, port->lag->lag_id, enabled);
+}
+
+static bool prestera_lag_master_check(struct net_device *lag_dev,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *ext_ack)
+{
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(ext_ack, "Unsupported LAG Tx type");
+ return false;
+ }
+
+ return true;
+}
+
+static int prestera_netdev_port_event(struct net_device *lower,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_info *info = ptr;
+ struct netdev_notifier_changeupper_info *cu_info;
+ struct prestera_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
+ struct net_device *upper;
+
+ extack = netdev_notifier_info_to_extack(info);
+ cu_info = container_of(info,
+ struct netdev_notifier_changeupper_info,
+ info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper = cu_info->upper_dev;
+ if (!netif_is_bridge_master(upper) &&
+ !netif_is_lag_master(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EINVAL;
+ }
+
+ if (!cu_info->linking)
+ break;
+
+ if (netdev_has_any_upper_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved");
+ return -EINVAL;
+ }
+
+ if (netif_is_lag_master(upper) &&
+ !prestera_lag_master_check(upper, cu_info->upper_info, extack))
+ return -EOPNOTSUPP;
+ if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Master device is a LAG master and port has a VLAN");
+ return -EINVAL;
+ }
+ if (netif_is_lag_port(dev) && is_vlan_dev(upper) &&
+ !netif_is_lag_master(vlan_dev_real_dev(upper))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can not put a VLAN on a LAG port");
+ return -EINVAL;
+ }
+ break;
+
+ case NETDEV_CHANGEUPPER:
+ upper = cu_info->upper_dev;
+ if (netif_is_bridge_master(upper)) {
+ if (cu_info->linking)
+ return prestera_bridge_port_join(upper, port,
+ extack);
+ else
+ prestera_bridge_port_leave(upper, port);
+ } else if (netif_is_lag_master(upper)) {
+ if (cu_info->linking)
+ return prestera_lag_port_add(port, upper);
+ else
+ prestera_lag_port_del(port);
+ }
+ break;
+
+ case NETDEV_CHANGELOWERSTATE:
+ return prestera_netdev_port_lower_event(dev, event, ptr);
+ }
+
+ return 0;
+}
+
+static int prestera_netdevice_lag_event(struct net_device *lag_dev,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(lag_dev, dev, iter) {
+ if (prestera_netdev_check(dev)) {
+ err = prestera_netdev_port_event(lag_dev, dev, event,
+ ptr);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int prestera_netdev_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ if (prestera_netdev_check(dev))
+ err = prestera_netdev_port_event(dev, dev, event, ptr);
+ else if (netif_is_lag_master(dev))
+ err = prestera_netdevice_lag_event(dev, event, ptr);
+
+ return notifier_from_errno(err);
+}
+
+struct prestera_mdb_entry *
+prestera_mdb_entry_create(struct prestera_switch *sw,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_flood_domain *flood_domain;
+ struct prestera_mdb_entry *mdb_entry;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ goto err_mdb_alloc;
+
+ flood_domain = prestera_flood_domain_create(sw);
+ if (!flood_domain)
+ goto err_flood_domain_create;
+
+ mdb_entry->sw = sw;
+ mdb_entry->vid = vid;
+ mdb_entry->flood_domain = flood_domain;
+ ether_addr_copy(mdb_entry->addr, addr);
+
+ if (prestera_hw_mdb_create(mdb_entry))
+ goto err_mdb_hw_create;
+
+ return mdb_entry;
+
+err_mdb_hw_create:
+ prestera_flood_domain_destroy(flood_domain);
+err_flood_domain_create:
+ kfree(mdb_entry);
+err_mdb_alloc:
+ return NULL;
+}
+
+void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry)
+{
+ prestera_hw_mdb_destroy(mdb_entry);
+ prestera_flood_domain_destroy(mdb_entry->flood_domain);
+ kfree(mdb_entry);
+}
+
+struct prestera_flood_domain *
+prestera_flood_domain_create(struct prestera_switch *sw)
+{
+ struct prestera_flood_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->sw = sw;
+
+ if (prestera_hw_flood_domain_create(domain)) {
+ kfree(domain);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&domain->flood_domain_port_list);
+
+ return domain;
+}
+
+void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain)
+{
+ WARN_ON(!list_empty(&flood_domain->flood_domain_port_list));
+ WARN_ON_ONCE(prestera_hw_flood_domain_destroy(flood_domain));
+ kfree(flood_domain);
+}
+
+int
+prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev,
+ u16 vid)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+ bool is_first_port_in_list = false;
+ int err;
+
+ flood_domain_port = kzalloc(sizeof(*flood_domain_port), GFP_KERNEL);
+ if (!flood_domain_port) {
+ err = -ENOMEM;
+ goto err_port_alloc;
+ }
+
+ flood_domain_port->vid = vid;
+
+ if (list_empty(&flood_domain->flood_domain_port_list))
+ is_first_port_in_list = true;
+
+ list_add(&flood_domain_port->flood_domain_port_node,
+ &flood_domain->flood_domain_port_list);
+
+ flood_domain_port->flood_domain = flood_domain;
+ flood_domain_port->dev = dev;
+
+ if (!is_first_port_in_list) {
+ err = prestera_hw_flood_domain_ports_reset(flood_domain);
+ if (err)
+ goto err_prestera_mdb_port_create_hw;
+ }
+
+ err = prestera_hw_flood_domain_ports_set(flood_domain);
+ if (err)
+ goto err_prestera_mdb_port_create_hw;
+
+ return 0;
+
+err_prestera_mdb_port_create_hw:
+ list_del(&flood_domain_port->flood_domain_port_node);
+ kfree(flood_domain_port);
+err_port_alloc:
+ return err;
+}
+
+void
+prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port)
+{
+ struct prestera_flood_domain *flood_domain = port->flood_domain;
+
+ list_del(&port->flood_domain_port_node);
+
+ WARN_ON_ONCE(prestera_hw_flood_domain_ports_reset(flood_domain));
+
+ if (!list_empty(&flood_domain->flood_domain_port_list))
+ WARN_ON_ONCE(prestera_hw_flood_domain_ports_set(flood_domain));
+
+ kfree(port);
+}
+
+struct prestera_flood_domain_port *
+prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev, u16 vid)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+
+ list_for_each_entry(flood_domain_port,
+ &flood_domain->flood_domain_port_list,
+ flood_domain_port_node)
+ if (flood_domain_port->dev == dev &&
+ vid == flood_domain_port->vid)
+ return flood_domain_port;
+
+ return NULL;
+}
+
+static int prestera_netdev_event_handler_register(struct prestera_switch *sw)
+{
+ sw->netdev_nb.notifier_call = prestera_netdev_event_handler;
+
+ return register_netdevice_notifier(&sw->netdev_nb);
+}
+
+static void prestera_netdev_event_handler_unregister(struct prestera_switch *sw)
+{
+ unregister_netdevice_notifier(&sw->netdev_nb);
+}
+
+static int prestera_switch_init(struct prestera_switch *sw)
+{
+ int err;
+
+ sw->np = sw->dev->dev->of_node;
+
+ err = prestera_hw_switch_init(sw);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to init Switch device\n");
+ return err;
+ }
+
+ rwlock_init(&sw->port_list_lock);
+ INIT_LIST_HEAD(&sw->port_list);
+
+ err = prestera_switch_set_base_mac_addr(sw);
+ if (err)
+ return err;
+
+ err = prestera_netdev_event_handler_register(sw);
+ if (err)
+ return err;
+
+ err = prestera_router_init(sw);
+ if (err)
+ goto err_router_init;
+
+ err = prestera_switchdev_init(sw);
+ if (err)
+ goto err_swdev_register;
+
+ err = prestera_rxtx_switch_init(sw);
+ if (err)
+ goto err_rxtx_register;
+
+ err = prestera_event_handlers_register(sw);
+ if (err)
+ goto err_handlers_register;
+
+ err = prestera_counter_init(sw);
+ if (err)
+ goto err_counter_init;
+
+ err = prestera_acl_init(sw);
+ if (err)
+ goto err_acl_init;
+
+ err = prestera_span_init(sw);
+ if (err)
+ goto err_span_init;
+
+ err = prestera_devlink_traps_register(sw);
+ if (err)
+ goto err_dl_register;
+
+ err = prestera_lag_init(sw);
+ if (err)
+ goto err_lag_init;
+
+ err = prestera_create_ports(sw);
+ if (err)
+ goto err_ports_create;
+
+ prestera_devlink_register(sw);
+ return 0;
+
+err_ports_create:
+ prestera_lag_fini(sw);
+err_lag_init:
+ prestera_devlink_traps_unregister(sw);
+err_dl_register:
+ prestera_span_fini(sw);
+err_span_init:
+ prestera_acl_fini(sw);
+err_acl_init:
+ prestera_counter_fini(sw);
+err_counter_init:
+ prestera_event_handlers_unregister(sw);
+err_handlers_register:
+ prestera_rxtx_switch_fini(sw);
+err_rxtx_register:
+ prestera_switchdev_fini(sw);
+err_swdev_register:
+ prestera_router_fini(sw);
+err_router_init:
+ prestera_netdev_event_handler_unregister(sw);
+ prestera_hw_switch_fini(sw);
+
+ return err;
+}
+
+static void prestera_switch_fini(struct prestera_switch *sw)
+{
+ prestera_devlink_unregister(sw);
+ prestera_destroy_ports(sw);
+ prestera_lag_fini(sw);
+ prestera_devlink_traps_unregister(sw);
+ prestera_span_fini(sw);
+ prestera_acl_fini(sw);
+ prestera_counter_fini(sw);
+ prestera_event_handlers_unregister(sw);
+ prestera_rxtx_switch_fini(sw);
+ prestera_switchdev_fini(sw);
+ prestera_router_fini(sw);
+ prestera_netdev_event_handler_unregister(sw);
+ prestera_hw_switch_fini(sw);
+ of_node_put(sw->np);
+}
+
+int prestera_device_register(struct prestera_device *dev)
+{
+ struct prestera_switch *sw;
+ int err;
+
+ sw = prestera_devlink_alloc(dev);
+ if (!sw)
+ return -ENOMEM;
+
+ dev->priv = sw;
+ sw->dev = dev;
+
+ err = prestera_switch_init(sw);
+ if (err) {
+ prestera_devlink_free(sw);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(prestera_device_register);
+
+void prestera_device_unregister(struct prestera_device *dev)
+{
+ struct prestera_switch *sw = dev->priv;
+
+ prestera_switch_fini(sw);
+ prestera_devlink_free(sw);
+}
+EXPORT_SYMBOL(prestera_device_unregister);
+
+static int __init prestera_module_init(void)
+{
+ prestera_wq = alloc_workqueue("prestera", 0, 0);
+ if (!prestera_wq)
+ return -ENOMEM;
+
+ prestera_owq = alloc_ordered_workqueue("prestera_ordered", 0);
+ if (!prestera_owq) {
+ destroy_workqueue(prestera_wq);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit prestera_module_exit(void)
+{
+ destroy_workqueue(prestera_wq);
+ destroy_workqueue(prestera_owq);
+}
+
+module_init(prestera_module_init);
+module_exit(prestera_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Marvell Prestera switch driver");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
new file mode 100644
index 000000000..1da9c1bc1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2022 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_flow.h"
+#include "prestera_flower.h"
+#include "prestera_matchall.h"
+#include "prestera_span.h"
+
+static int prestera_mall_prio_check(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ u32 flower_prio_min;
+ u32 flower_prio_max;
+ int err;
+
+ err = prestera_flower_prio_get(block, f->common.chain_index,
+ &flower_prio_min, &flower_prio_max);
+ if (err == -ENOENT)
+ /* No flower filters installed on this chain. */
+ return 0;
+
+ if (err) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
+ return err;
+ }
+
+ if (f->common.prio <= flower_prio_max && !block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.prio >= flower_prio_min && block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing flower rules");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int prestera_mall_prio_get(struct prestera_flow_block *block,
+ u32 *prio_min, u32 *prio_max)
+{
+ if (!block->mall.bound)
+ return -ENOENT;
+
+ *prio_min = block->mall.prio_min;
+ *prio_max = block->mall.prio_max;
+ return 0;
+}
+
+static void prestera_mall_prio_update(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ block->mall.prio_min = min(block->mall.prio_min, f->common.prio);
+ block->mall.prio_max = max(block->mall.prio_max, f->common.prio);
+}
+
+int prestera_mall_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct prestera_flow_block_binding *binding;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ struct prestera_port *port;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+
+ if (!prestera_netdev_check(act->dev)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only Marvell Prestera port is supported");
+ return -EINVAL;
+ }
+ if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
+ return -EOPNOTSUPP;
+ if (act->id != FLOW_ACTION_MIRRED)
+ return -EOPNOTSUPP;
+ if (protocol != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ err = prestera_mall_prio_check(block, f);
+ if (err)
+ return err;
+
+ port = netdev_priv(act->dev);
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_span_rule_add(binding, port, block->ingress);
+ if (err == -EEXIST)
+ return err;
+ if (err)
+ goto rollback;
+ }
+
+ prestera_mall_prio_update(block, f);
+
+ block->mall.bound = true;
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding,
+ &block->binding_list, list)
+ prestera_span_rule_del(binding, block->ingress);
+ return err;
+}
+
+void prestera_mall_destroy(struct prestera_flow_block *block)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_span_rule_del(binding, block->ingress);
+
+ block->mall.prio_min = UINT_MAX;
+ block->mall.prio_max = 0;
+ block->mall.bound = false;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.h b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h
new file mode 100644
index 000000000..fed08be80
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2022 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_MATCHALL_H_
+#define _PRESTERA_MATCHALL_H_
+
+#include <net/pkt_cls.h>
+
+struct prestera_flow_block;
+
+int prestera_mall_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void prestera_mall_destroy(struct prestera_flow_block *block);
+int prestera_mall_prio_get(struct prestera_flow_block *block,
+ u32 *prio_min, u32 *prio_max);
+
+#endif /* _PRESTERA_MATCHALL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
new file mode 100644
index 000000000..35857dc19
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "prestera.h"
+
+#define PRESTERA_MSG_MAX_SIZE 1500
+
+#define PRESTERA_SUPP_FW_MAJ_VER 4
+#define PRESTERA_SUPP_FW_MIN_VER 1
+
+#define PRESTERA_PREV_FW_MAJ_VER 4
+#define PRESTERA_PREV_FW_MIN_VER 0
+
+#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
+#define PRESTERA_FW_ARM64_PATH_FMT "mrvl/prestera/mvsw_prestera_fw_arm64-v%u.%u.img"
+
+#define PRESTERA_FW_HDR_MAGIC 0x351D9D06
+#define PRESTERA_FW_DL_TIMEOUT_MS 50000
+#define PRESTERA_FW_BLK_SZ 1024
+
+#define PRESTERA_FW_VER_MAJ_MUL 1000000
+#define PRESTERA_FW_VER_MIN_MUL 1000
+
+#define PRESTERA_FW_VER_MAJ(v) ((v) / PRESTERA_FW_VER_MAJ_MUL)
+
+#define PRESTERA_FW_VER_MIN(v) \
+ (((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL)) / \
+ PRESTERA_FW_VER_MIN_MUL)
+
+#define PRESTERA_FW_VER_PATCH(v) \
+ ((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL) - \
+ (PRESTERA_FW_VER_MIN(v) * PRESTERA_FW_VER_MIN_MUL))
+
+enum prestera_pci_bar_t {
+ PRESTERA_PCI_BAR_FW = 2,
+ PRESTERA_PCI_BAR_PP = 4,
+};
+
+struct prestera_fw_header {
+ __be32 magic_number;
+ __be32 version_value;
+ u8 reserved[8];
+};
+
+struct prestera_ldr_regs {
+ u32 ldr_ready;
+ u32 pad1;
+
+ u32 ldr_img_size;
+ u32 ldr_ctl_flags;
+
+ u32 ldr_buf_offs;
+ u32 ldr_buf_size;
+
+ u32 ldr_buf_rd;
+ u32 pad2;
+ u32 ldr_buf_wr;
+
+ u32 ldr_status;
+};
+
+#define PRESTERA_LDR_REG_OFFSET(f) offsetof(struct prestera_ldr_regs, f)
+
+#define PRESTERA_LDR_READY_MAGIC 0xf00dfeed
+
+#define PRESTERA_LDR_STATUS_IMG_DL BIT(0)
+#define PRESTERA_LDR_STATUS_START_FW BIT(1)
+#define PRESTERA_LDR_STATUS_INVALID_IMG BIT(2)
+#define PRESTERA_LDR_STATUS_NOMEM BIT(3)
+
+#define PRESTERA_LDR_REG_BASE(fw) ((fw)->ldr_regs)
+#define PRESTERA_LDR_REG_ADDR(fw, reg) (PRESTERA_LDR_REG_BASE(fw) + (reg))
+
+/* fw loader registers */
+#define PRESTERA_LDR_READY_REG PRESTERA_LDR_REG_OFFSET(ldr_ready)
+#define PRESTERA_LDR_IMG_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_img_size)
+#define PRESTERA_LDR_CTL_REG PRESTERA_LDR_REG_OFFSET(ldr_ctl_flags)
+#define PRESTERA_LDR_BUF_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_size)
+#define PRESTERA_LDR_BUF_OFFS_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_offs)
+#define PRESTERA_LDR_BUF_RD_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_rd)
+#define PRESTERA_LDR_BUF_WR_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_wr)
+#define PRESTERA_LDR_STATUS_REG PRESTERA_LDR_REG_OFFSET(ldr_status)
+
+#define PRESTERA_LDR_CTL_DL_START BIT(0)
+
+#define PRESTERA_EVT_QNUM_MAX 4
+
+struct prestera_fw_evtq_regs {
+ u32 rd_idx;
+ u32 pad1;
+ u32 wr_idx;
+ u32 pad2;
+ u32 offs;
+ u32 len;
+};
+
+#define PRESTERA_CMD_QNUM_MAX 4
+
+struct prestera_fw_cmdq_regs {
+ u32 req_ctl;
+ u32 req_len;
+ u32 rcv_ctl;
+ u32 rcv_len;
+ u32 offs;
+ u32 len;
+};
+
+struct prestera_fw_regs {
+ u32 fw_ready;
+ u32 cmd_offs;
+ u32 cmd_len;
+ u32 cmd_qnum;
+ u32 evt_offs;
+ u32 evt_qnum;
+
+ u32 fw_status;
+ u32 rx_status;
+
+ struct prestera_fw_cmdq_regs cmdq_list[PRESTERA_EVT_QNUM_MAX];
+ struct prestera_fw_evtq_regs evtq_list[PRESTERA_CMD_QNUM_MAX];
+};
+
+#define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f)
+
+#define PRESTERA_FW_READY_MAGIC 0xcafebabe
+
+/* fw registers */
+#define PRESTERA_FW_READY_REG PRESTERA_FW_REG_OFFSET(fw_ready)
+
+#define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs)
+#define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len)
+#define PRESTERA_CMD_QNUM_REG PRESTERA_FW_REG_OFFSET(cmd_qnum)
+#define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs)
+#define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum)
+
+#define PRESTERA_CMDQ_REG_OFFSET(q, f) \
+ (PRESTERA_FW_REG_OFFSET(cmdq_list) + \
+ (q) * sizeof(struct prestera_fw_cmdq_regs) + \
+ offsetof(struct prestera_fw_cmdq_regs, f))
+
+#define PRESTERA_CMDQ_REQ_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_ctl)
+#define PRESTERA_CMDQ_REQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_len)
+#define PRESTERA_CMDQ_RCV_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_ctl)
+#define PRESTERA_CMDQ_RCV_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_len)
+#define PRESTERA_CMDQ_OFFS_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, offs)
+#define PRESTERA_CMDQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, len)
+
+#define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status)
+#define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status)
+
+/* PRESTERA_CMD_REQ_CTL_REG flags */
+#define PRESTERA_CMD_F_REQ_SENT BIT(0)
+#define PRESTERA_CMD_F_REPL_RCVD BIT(1)
+
+/* PRESTERA_CMD_RCV_CTL_REG flags */
+#define PRESTERA_CMD_F_REPL_SENT BIT(0)
+
+#define PRESTERA_FW_EVT_CTL_STATUS_MASK GENMASK(1, 0)
+
+#define PRESTERA_FW_EVT_CTL_STATUS_ON 0
+#define PRESTERA_FW_EVT_CTL_STATUS_OFF 1
+
+#define PRESTERA_EVTQ_REG_OFFSET(q, f) \
+ (PRESTERA_FW_REG_OFFSET(evtq_list) + \
+ (q) * sizeof(struct prestera_fw_evtq_regs) + \
+ offsetof(struct prestera_fw_evtq_regs, f))
+
+#define PRESTERA_EVTQ_RD_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, rd_idx)
+#define PRESTERA_EVTQ_WR_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, wr_idx)
+#define PRESTERA_EVTQ_OFFS_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, offs)
+#define PRESTERA_EVTQ_LEN_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, len)
+
+#define PRESTERA_FW_REG_BASE(fw) ((fw)->dev.ctl_regs)
+#define PRESTERA_FW_REG_ADDR(fw, reg) PRESTERA_FW_REG_BASE((fw)) + (reg)
+
+#define PRESTERA_FW_CMD_DEFAULT_WAIT_MS 30000
+#define PRESTERA_FW_READY_WAIT_MS 20000
+
+#define PRESTERA_DEV_ID_AC3X_98DX_55 0xC804
+#define PRESTERA_DEV_ID_AC3X_98DX_65 0xC80C
+#define PRESTERA_DEV_ID_ALDRIN2 0xCC1E
+#define PRESTERA_DEV_ID_98DX7312M 0x981F
+#define PRESTERA_DEV_ID_98DX3500 0x9820
+#define PRESTERA_DEV_ID_98DX3501 0x9826
+#define PRESTERA_DEV_ID_98DX3510 0x9821
+#define PRESTERA_DEV_ID_98DX3520 0x9822
+
+struct prestera_fw_evtq {
+ u8 __iomem *addr;
+ size_t len;
+};
+
+struct prestera_fw_cmdq {
+ /* serialize access to dev->send_req */
+ struct mutex cmd_mtx;
+ u8 __iomem *addr;
+ size_t len;
+};
+
+struct prestera_fw {
+ struct prestera_fw_rev rev_supp;
+ const struct firmware *bin;
+ struct workqueue_struct *wq;
+ struct prestera_device dev;
+ struct pci_dev *pci_dev;
+ u8 __iomem *ldr_regs;
+ u8 __iomem *ldr_ring_buf;
+ u32 ldr_buf_len;
+ u32 ldr_wr_idx;
+ size_t cmd_mbox_len;
+ u8 __iomem *cmd_mbox;
+ struct prestera_fw_cmdq cmd_queue[PRESTERA_CMD_QNUM_MAX];
+ u8 cmd_qnum;
+ struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX];
+ u8 evt_qnum;
+ struct work_struct evt_work;
+ u8 __iomem *evt_buf;
+ u8 *evt_msg;
+};
+
+static int prestera_fw_load(struct prestera_fw *fw);
+
+static void prestera_fw_write(struct prestera_fw *fw, u32 reg, u32 val)
+{
+ writel(val, PRESTERA_FW_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_fw_read(struct prestera_fw *fw, u32 reg)
+{
+ return readl(PRESTERA_FW_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid)
+{
+ return fw->evt_queue[qid].len;
+}
+
+static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid)
+{
+ u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid));
+ u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+
+ return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid));
+}
+
+static void prestera_fw_evtq_rd_set(struct prestera_fw *fw,
+ u8 qid, u32 idx)
+{
+ u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1);
+
+ prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx);
+}
+
+static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid)
+{
+ return fw->evt_queue[qid].addr;
+}
+
+static u32 prestera_fw_evtq_read32(struct prestera_fw *fw, u8 qid)
+{
+ u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+ u32 val;
+
+ val = readl(prestera_fw_evtq_buf(fw, qid) + rd_idx);
+ prestera_fw_evtq_rd_set(fw, qid, rd_idx + 4);
+ return val;
+}
+
+static ssize_t prestera_fw_evtq_read_buf(struct prestera_fw *fw,
+ u8 qid, void *buf, size_t len)
+{
+ u32 idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+ u8 __iomem *evtq_addr = prestera_fw_evtq_buf(fw, qid);
+ u32 *buf32 = buf;
+ int i;
+
+ for (i = 0; i < len / 4; buf32++, i++) {
+ *buf32 = readl_relaxed(evtq_addr + idx);
+ idx = (idx + 4) & (prestera_fw_evtq_len(fw, qid) - 1);
+ }
+
+ prestera_fw_evtq_rd_set(fw, qid, idx);
+
+ return i;
+}
+
+static u8 prestera_fw_evtq_pick(struct prestera_fw *fw)
+{
+ int qid;
+
+ for (qid = 0; qid < fw->evt_qnum; qid++) {
+ if (prestera_fw_evtq_avail(fw, qid) >= 4)
+ return qid;
+ }
+
+ return PRESTERA_EVT_QNUM_MAX;
+}
+
+static void prestera_fw_evt_ctl_status_set(struct prestera_fw *fw, u32 val)
+{
+ u32 status = prestera_fw_read(fw, PRESTERA_FW_STATUS_REG);
+
+ u32p_replace_bits(&status, val, PRESTERA_FW_EVT_CTL_STATUS_MASK);
+
+ prestera_fw_write(fw, PRESTERA_FW_STATUS_REG, status);
+}
+
+static void prestera_fw_evt_work_fn(struct work_struct *work)
+{
+ struct prestera_fw *fw;
+ void *msg;
+ u8 qid;
+
+ fw = container_of(work, struct prestera_fw, evt_work);
+ msg = fw->evt_msg;
+
+ prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_OFF);
+
+ while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) {
+ u32 idx;
+ u32 len;
+
+ len = prestera_fw_evtq_read32(fw, qid);
+ idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+
+ WARN_ON(prestera_fw_evtq_avail(fw, qid) < len);
+
+ if (WARN_ON(len > PRESTERA_MSG_MAX_SIZE)) {
+ prestera_fw_evtq_rd_set(fw, qid, idx + len);
+ continue;
+ }
+
+ prestera_fw_evtq_read_buf(fw, qid, msg, len);
+
+ if (fw->dev.recv_msg)
+ fw->dev.recv_msg(&fw->dev, msg, len);
+ }
+
+ prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_ON);
+}
+
+static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp,
+ unsigned int waitms)
+{
+ u8 __iomem *addr = PRESTERA_FW_REG_ADDR(fw, reg);
+ u32 val;
+
+ return readl_poll_timeout(addr, val, cmp == val,
+ 1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
+}
+
+static void prestera_fw_cmdq_lock(struct prestera_fw *fw, u8 qid)
+{
+ mutex_lock(&fw->cmd_queue[qid].cmd_mtx);
+}
+
+static void prestera_fw_cmdq_unlock(struct prestera_fw *fw, u8 qid)
+{
+ mutex_unlock(&fw->cmd_queue[qid].cmd_mtx);
+}
+
+static u32 prestera_fw_cmdq_len(struct prestera_fw *fw, u8 qid)
+{
+ return fw->cmd_queue[qid].len;
+}
+
+static u8 __iomem *prestera_fw_cmdq_buf(struct prestera_fw *fw, u8 qid)
+{
+ return fw->cmd_queue[qid].addr;
+}
+
+static int prestera_fw_cmd_send(struct prestera_fw *fw, int qid,
+ void *in_msg, size_t in_size,
+ void *out_msg, size_t out_size,
+ unsigned int waitms)
+{
+ u32 ret_size;
+ int err;
+
+ if (!waitms)
+ waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS;
+
+ if (ALIGN(in_size, 4) > prestera_fw_cmdq_len(fw, qid))
+ return -EMSGSIZE;
+
+ /* wait for finish previous reply from FW */
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), 0, 30);
+ if (err) {
+ dev_err(fw->dev.dev, "finish reply from FW is timed out\n");
+ return err;
+ }
+
+ prestera_fw_write(fw, PRESTERA_CMDQ_REQ_LEN_REG(qid), in_size);
+
+ memcpy_toio(prestera_fw_cmdq_buf(fw, qid), in_msg, in_size);
+
+ prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid),
+ PRESTERA_CMD_F_REQ_SENT);
+
+ /* wait for reply from FW */
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid),
+ PRESTERA_CMD_F_REPL_SENT, waitms);
+ if (err) {
+ dev_err(fw->dev.dev, "reply from FW is timed out\n");
+ goto cmd_exit;
+ }
+
+ ret_size = prestera_fw_read(fw, PRESTERA_CMDQ_RCV_LEN_REG(qid));
+ if (ret_size > out_size) {
+ dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n",
+ ret_size, out_size);
+ err = -EMSGSIZE;
+ goto cmd_exit;
+ }
+
+ memcpy_fromio(out_msg,
+ prestera_fw_cmdq_buf(fw, qid) + in_size, ret_size);
+
+cmd_exit:
+ prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid),
+ PRESTERA_CMD_F_REPL_RCVD);
+ return err;
+}
+
+static int prestera_fw_send_req(struct prestera_device *dev, int qid,
+ void *in_msg, size_t in_size, void *out_msg,
+ size_t out_size, unsigned int waitms)
+{
+ struct prestera_fw *fw;
+ ssize_t ret;
+
+ fw = container_of(dev, struct prestera_fw, dev);
+
+ prestera_fw_cmdq_lock(fw, qid);
+ ret = prestera_fw_cmd_send(fw, qid, in_msg, in_size, out_msg, out_size,
+ waitms);
+ prestera_fw_cmdq_unlock(fw, qid);
+
+ return ret;
+}
+
+static int prestera_fw_init(struct prestera_fw *fw)
+{
+ u8 __iomem *base;
+ int err;
+ u8 qid;
+
+ fw->dev.send_req = prestera_fw_send_req;
+ fw->ldr_regs = fw->dev.ctl_regs;
+
+ err = prestera_fw_load(fw);
+ if (err)
+ return err;
+
+ err = prestera_fw_wait_reg32(fw, PRESTERA_FW_READY_REG,
+ PRESTERA_FW_READY_MAGIC,
+ PRESTERA_FW_READY_WAIT_MS);
+ if (err) {
+ dev_err(fw->dev.dev, "FW failed to start\n");
+ return err;
+ }
+
+ base = fw->dev.ctl_regs;
+
+ fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG);
+ fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG);
+ fw->cmd_qnum = prestera_fw_read(fw, PRESTERA_CMD_QNUM_REG);
+
+ for (qid = 0; qid < fw->cmd_qnum; qid++) {
+ u32 offs = prestera_fw_read(fw, PRESTERA_CMDQ_OFFS_REG(qid));
+ struct prestera_fw_cmdq *cmdq = &fw->cmd_queue[qid];
+
+ cmdq->len = prestera_fw_read(fw, PRESTERA_CMDQ_LEN_REG(qid));
+ cmdq->addr = fw->cmd_mbox + offs;
+ mutex_init(&cmdq->cmd_mtx);
+ }
+
+ fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG);
+ fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG);
+ fw->evt_msg = kmalloc(PRESTERA_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!fw->evt_msg)
+ return -ENOMEM;
+
+ for (qid = 0; qid < fw->evt_qnum; qid++) {
+ u32 offs = prestera_fw_read(fw, PRESTERA_EVTQ_OFFS_REG(qid));
+ struct prestera_fw_evtq *evtq = &fw->evt_queue[qid];
+
+ evtq->len = prestera_fw_read(fw, PRESTERA_EVTQ_LEN_REG(qid));
+ evtq->addr = fw->evt_buf + offs;
+ }
+
+ return 0;
+}
+
+static void prestera_fw_uninit(struct prestera_fw *fw)
+{
+ kfree(fw->evt_msg);
+}
+
+static irqreturn_t prestera_pci_irq_handler(int irq, void *dev_id)
+{
+ struct prestera_fw *fw = dev_id;
+
+ if (prestera_fw_read(fw, PRESTERA_RX_STATUS_REG)) {
+ prestera_fw_write(fw, PRESTERA_RX_STATUS_REG, 0);
+
+ if (fw->dev.recv_pkt)
+ fw->dev.recv_pkt(&fw->dev);
+ }
+
+ queue_work(fw->wq, &fw->evt_work);
+
+ return IRQ_HANDLED;
+}
+
+static void prestera_ldr_write(struct prestera_fw *fw, u32 reg, u32 val)
+{
+ writel(val, PRESTERA_LDR_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_ldr_read(struct prestera_fw *fw, u32 reg)
+{
+ return readl(PRESTERA_LDR_REG_ADDR(fw, reg));
+}
+
+static int prestera_ldr_wait_reg32(struct prestera_fw *fw,
+ u32 reg, u32 cmp, unsigned int waitms)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, reg);
+ u32 val;
+
+ return readl_poll_timeout(addr, val, cmp == val,
+ 10 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
+}
+
+static u32 prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_BUF_RD_REG);
+ u32 buf_len = fw->ldr_buf_len;
+ u32 wr_idx = fw->ldr_wr_idx;
+ u32 rd_idx;
+
+ return readl_poll_timeout(addr, rd_idx,
+ CIRC_SPACE(wr_idx, rd_idx, buf_len) >= len,
+ 1 * USEC_PER_MSEC, 100 * USEC_PER_MSEC);
+}
+
+static int prestera_ldr_wait_dl_finish(struct prestera_fw *fw)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_STATUS_REG);
+ unsigned long mask = ~(PRESTERA_LDR_STATUS_IMG_DL);
+ u32 val;
+ int err;
+
+ err = readl_poll_timeout(addr, val, val & mask, 10 * USEC_PER_MSEC,
+ PRESTERA_FW_DL_TIMEOUT_MS * USEC_PER_MSEC);
+ if (err) {
+ dev_err(fw->dev.dev, "Timeout to load FW img [state=%d]",
+ prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG));
+ return err;
+ }
+
+ return 0;
+}
+
+static void prestera_ldr_wr_idx_move(struct prestera_fw *fw, unsigned int n)
+{
+ fw->ldr_wr_idx = (fw->ldr_wr_idx + (n)) & (fw->ldr_buf_len - 1);
+}
+
+static void prestera_ldr_wr_idx_commit(struct prestera_fw *fw)
+{
+ prestera_ldr_write(fw, PRESTERA_LDR_BUF_WR_REG, fw->ldr_wr_idx);
+}
+
+static u8 __iomem *prestera_ldr_wr_ptr(struct prestera_fw *fw)
+{
+ return fw->ldr_ring_buf + fw->ldr_wr_idx;
+}
+
+static int prestera_ldr_send(struct prestera_fw *fw, const u8 *buf, size_t len)
+{
+ int err;
+ int i;
+
+ err = prestera_ldr_wait_buf(fw, len);
+ if (err) {
+ dev_err(fw->dev.dev, "failed wait for sending firmware\n");
+ return err;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ writel_relaxed(*(u32 *)(buf + i), prestera_ldr_wr_ptr(fw));
+ prestera_ldr_wr_idx_move(fw, 4);
+ }
+
+ prestera_ldr_wr_idx_commit(fw);
+ return 0;
+}
+
+static int prestera_ldr_fw_send(struct prestera_fw *fw,
+ const char *img, u32 fw_size)
+{
+ u32 status;
+ u32 pos;
+ int err;
+
+ err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_STATUS_REG,
+ PRESTERA_LDR_STATUS_IMG_DL,
+ 5 * MSEC_PER_SEC);
+ if (err) {
+ dev_err(fw->dev.dev, "Loader is not ready to load image\n");
+ return err;
+ }
+
+ for (pos = 0; pos < fw_size; pos += PRESTERA_FW_BLK_SZ) {
+ if (pos + PRESTERA_FW_BLK_SZ > fw_size)
+ break;
+
+ err = prestera_ldr_send(fw, img + pos, PRESTERA_FW_BLK_SZ);
+ if (err)
+ return err;
+ }
+
+ if (pos < fw_size) {
+ err = prestera_ldr_send(fw, img + pos, fw_size - pos);
+ if (err)
+ return err;
+ }
+
+ err = prestera_ldr_wait_dl_finish(fw);
+ if (err)
+ return err;
+
+ status = prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG);
+
+ switch (status) {
+ case PRESTERA_LDR_STATUS_INVALID_IMG:
+ dev_err(fw->dev.dev, "FW img has bad CRC\n");
+ return -EINVAL;
+ case PRESTERA_LDR_STATUS_NOMEM:
+ dev_err(fw->dev.dev, "Loader has no enough mem\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr,
+ struct prestera_fw_rev *rev)
+{
+ u32 version = be32_to_cpu(hdr->version_value);
+
+ rev->maj = PRESTERA_FW_VER_MAJ(version);
+ rev->min = PRESTERA_FW_VER_MIN(version);
+ rev->sub = PRESTERA_FW_VER_PATCH(version);
+}
+
+static int prestera_fw_rev_check(struct prestera_fw *fw)
+{
+ struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+
+ if (rev->maj == fw->rev_supp.maj && rev->min >= fw->rev_supp.min)
+ return 0;
+
+ dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'",
+ fw->rev_supp.maj, fw->rev_supp.min);
+
+ return -EINVAL;
+}
+
+static int prestera_fw_hdr_parse(struct prestera_fw *fw)
+{
+ struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ struct prestera_fw_header *hdr;
+ u32 magic;
+
+ hdr = (struct prestera_fw_header *)fw->bin->data;
+
+ magic = be32_to_cpu(hdr->magic_number);
+ if (magic != PRESTERA_FW_HDR_MAGIC) {
+ dev_err(fw->dev.dev, "FW img hdr magic is invalid");
+ return -EINVAL;
+ }
+
+ prestera_fw_rev_parse(hdr, rev);
+
+ dev_info(fw->dev.dev, "FW version '%u.%u.%u'\n",
+ rev->maj, rev->min, rev->sub);
+
+ return prestera_fw_rev_check(fw);
+}
+
+static const char *prestera_fw_path_fmt_get(struct prestera_fw *fw)
+{
+ switch (fw->pci_dev->device) {
+ case PRESTERA_DEV_ID_98DX3500:
+ case PRESTERA_DEV_ID_98DX3501:
+ case PRESTERA_DEV_ID_98DX3510:
+ case PRESTERA_DEV_ID_98DX3520:
+ return PRESTERA_FW_ARM64_PATH_FMT;
+
+ default:
+ return PRESTERA_FW_PATH_FMT;
+ }
+}
+
+static int prestera_fw_get(struct prestera_fw *fw)
+{
+ int ver_maj = PRESTERA_SUPP_FW_MAJ_VER;
+ int ver_min = PRESTERA_SUPP_FW_MIN_VER;
+ char fw_path[128];
+ int err;
+
+pick_fw_ver:
+ snprintf(fw_path, sizeof(fw_path), prestera_fw_path_fmt_get(fw),
+ ver_maj, ver_min);
+
+ err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev);
+ if (err) {
+ if (ver_maj != PRESTERA_PREV_FW_MAJ_VER ||
+ ver_min != PRESTERA_PREV_FW_MIN_VER) {
+ ver_maj = PRESTERA_PREV_FW_MAJ_VER;
+ ver_min = PRESTERA_PREV_FW_MIN_VER;
+
+ dev_warn(fw->dev.dev,
+ "missing latest %s firmware, fall-back to previous %u.%u version\n",
+ fw_path, ver_maj, ver_min);
+
+ goto pick_fw_ver;
+ } else {
+ dev_err(fw->dev.dev, "failed to request previous firmware: %s\n",
+ fw_path);
+ return err;
+ }
+ }
+
+ dev_info(fw->dev.dev, "Loading %s ...", fw_path);
+
+ fw->rev_supp.maj = ver_maj;
+ fw->rev_supp.min = ver_min;
+ fw->rev_supp.sub = 0;
+
+ return 0;
+}
+
+static void prestera_fw_put(struct prestera_fw *fw)
+{
+ release_firmware(fw->bin);
+}
+
+static int prestera_fw_load(struct prestera_fw *fw)
+{
+ size_t hlen = sizeof(struct prestera_fw_header);
+ int err;
+
+ err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG,
+ PRESTERA_LDR_READY_MAGIC,
+ 5 * MSEC_PER_SEC);
+ if (err) {
+ dev_err(fw->dev.dev, "waiting for FW loader is timed out");
+ return err;
+ }
+
+ fw->ldr_ring_buf = fw->ldr_regs +
+ prestera_ldr_read(fw, PRESTERA_LDR_BUF_OFFS_REG);
+
+ fw->ldr_buf_len =
+ prestera_ldr_read(fw, PRESTERA_LDR_BUF_SIZE_REG);
+
+ fw->ldr_wr_idx = 0;
+
+ err = prestera_fw_get(fw);
+ if (err)
+ return err;
+
+ err = prestera_fw_hdr_parse(fw);
+ if (err) {
+ dev_err(fw->dev.dev, "FW image header is invalid\n");
+ goto out_release;
+ }
+
+ prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, fw->bin->size - hlen);
+ prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START);
+
+ err = prestera_ldr_fw_send(fw, fw->bin->data + hlen,
+ fw->bin->size - hlen);
+
+out_release:
+ prestera_fw_put(fw);
+ return err;
+}
+
+static bool prestera_pci_pp_use_bar2(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PRESTERA_DEV_ID_98DX7312M:
+ case PRESTERA_DEV_ID_98DX3500:
+ case PRESTERA_DEV_ID_98DX3501:
+ case PRESTERA_DEV_ID_98DX3510:
+ case PRESTERA_DEV_ID_98DX3520:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static u32 prestera_pci_pp_bar2_offs(struct pci_dev *pdev)
+{
+ if (pci_resource_len(pdev, 2) == 0x1000000)
+ return 0x0;
+ else
+ return (pci_resource_len(pdev, 2) / 2);
+}
+
+static u32 prestera_pci_fw_bar2_offs(struct pci_dev *pdev)
+{
+ if (pci_resource_len(pdev, 2) == 0x1000000)
+ return 0x400000;
+ else
+ return 0x0;
+}
+
+static int prestera_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ const char *driver_name = dev_driver_string(&pdev->dev);
+ u8 __iomem *mem_addr, *pp_addr = NULL;
+ struct prestera_fw *fw;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
+
+ err = pci_request_regions(pdev, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30));
+ if (err) {
+ dev_err(&pdev->dev, "fail to set DMA mask\n");
+ goto err_dma_mask;
+ }
+
+ mem_addr = pcim_iomap(pdev, 2, 0);
+ if (!mem_addr) {
+ dev_err(&pdev->dev, "pci mem ioremap failed\n");
+ err = -EIO;
+ goto err_mem_ioremap;
+ }
+
+ /* AC5X devices use second half of BAR2 */
+ if (prestera_pci_pp_use_bar2(pdev)) {
+ pp_addr = mem_addr + prestera_pci_pp_bar2_offs(pdev);
+ mem_addr = mem_addr + prestera_pci_fw_bar2_offs(pdev);
+ } else {
+ pp_addr = pcim_iomap(pdev, 4, 0);
+ if (!pp_addr) {
+ dev_err(&pdev->dev, "pp regs ioremap failed\n");
+ err = -EIO;
+ goto err_pp_ioremap;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ fw = devm_kzalloc(&pdev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw) {
+ err = -ENOMEM;
+ goto err_pci_dev_alloc;
+ }
+
+ fw->pci_dev = pdev;
+ fw->dev.ctl_regs = mem_addr;
+ fw->dev.pp_regs = pp_addr;
+ fw->dev.dev = &pdev->dev;
+
+ pci_set_drvdata(pdev, fw);
+
+ err = prestera_fw_init(fw);
+ if (err)
+ goto err_prestera_fw_init;
+
+ dev_info(fw->dev.dev, "Prestera FW is ready\n");
+
+ fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
+ if (!fw->wq) {
+ err = -ENOMEM;
+ goto err_wq_alloc;
+ }
+
+ INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn);
+
+ err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (err < 0) {
+ dev_err(&pdev->dev, "MSI IRQ init failed\n");
+ goto err_irq_alloc;
+ }
+
+ err = request_irq(pci_irq_vector(pdev, 0), prestera_pci_irq_handler,
+ 0, driver_name, fw);
+ if (err) {
+ dev_err(&pdev->dev, "fail to request IRQ\n");
+ goto err_request_irq;
+ }
+
+ err = prestera_device_register(&fw->dev);
+ if (err)
+ goto err_prestera_dev_register;
+
+ return 0;
+
+err_prestera_dev_register:
+ free_irq(pci_irq_vector(pdev, 0), fw);
+err_request_irq:
+ pci_free_irq_vectors(pdev);
+err_irq_alloc:
+ destroy_workqueue(fw->wq);
+err_wq_alloc:
+ prestera_fw_uninit(fw);
+err_prestera_fw_init:
+err_pci_dev_alloc:
+err_pp_ioremap:
+err_mem_ioremap:
+err_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+err_pci_enable_device:
+ return err;
+}
+
+static void prestera_pci_remove(struct pci_dev *pdev)
+{
+ struct prestera_fw *fw = pci_get_drvdata(pdev);
+
+ prestera_device_unregister(&fw->dev);
+ free_irq(pci_irq_vector(pdev, 0), fw);
+ pci_free_irq_vectors(pdev);
+ destroy_workqueue(fw->wq);
+ prestera_fw_uninit(fw);
+ pci_release_regions(pdev);
+}
+
+static const struct pci_device_id prestera_pci_devices[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_AC3X_98DX_55) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_AC3X_98DX_65) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_ALDRIN2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX7312M) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3500) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3501) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3510) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3520) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
+
+static struct pci_driver prestera_pci_driver = {
+ .name = "Prestera DX",
+ .id_table = prestera_pci_devices,
+ .probe = prestera_pci_probe,
+ .remove = prestera_pci_remove,
+};
+module_pci_driver(prestera_pci_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Marvell Prestera switch PCI interface");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
new file mode 100644
index 000000000..de317179a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -0,0 +1,1645 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/inetdevice.h>
+#include <net/inet_dscp.h>
+#include <net/switchdev.h>
+#include <linux/rhashtable.h>
+#include <net/nexthop.h>
+#include <net/arp.h>
+#include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
+#include <net/netevent.h>
+
+#include "prestera.h"
+#include "prestera_router_hw.h"
+
+#define PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
+#define PRESTERA_NH_PROBE_INTERVAL 5000 /* ms */
+
+struct prestera_kern_neigh_cache_key {
+ struct prestera_ip_addr addr;
+ struct net_device *dev;
+};
+
+struct prestera_kern_neigh_cache {
+ struct prestera_kern_neigh_cache_key key;
+ struct rhash_head ht_node;
+ struct list_head kern_fib_cache_list;
+ /* Hold prepared nh_neigh info if is in_kernel */
+ struct prestera_neigh_info nh_neigh_info;
+ /* Indicate if neighbour is reachable by direct route */
+ bool reachable;
+ /* Lock cache if neigh is present in kernel */
+ bool in_kernel;
+};
+
+struct prestera_kern_fib_cache_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 kern_tb_id; /* tb_id from kernel (not fixed) */
+};
+
+/* Subscribing on neighbours in kernel */
+struct prestera_kern_fib_cache {
+ struct prestera_kern_fib_cache_key key;
+ struct {
+ struct prestera_fib_key fib_key;
+ enum prestera_fib_type fib_type;
+ struct prestera_nexthop_group_key nh_grp_key;
+ } lpm_info; /* hold prepared lpm info */
+ /* Indicate if route is not overlapped by another table */
+ struct rhash_head ht_node; /* node of prestera_router */
+ struct prestera_kern_neigh_cache_head {
+ struct prestera_kern_fib_cache *this;
+ struct list_head head;
+ struct prestera_kern_neigh_cache *n_cache;
+ } kern_neigh_cache_head[PRESTERA_NHGR_SIZE_MAX];
+ union {
+ struct fib_notifier_info info; /* point to any of 4/6 */
+ struct fib_entry_notifier_info fen4_info;
+ };
+ bool reachable;
+};
+
+static const struct rhashtable_params __prestera_kern_neigh_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_neigh_cache, key),
+ .head_offset = offsetof(struct prestera_kern_neigh_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_neigh_cache_key),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_fib_cache, key),
+ .head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_fib_cache_key),
+ .automatic_shrinking = true,
+};
+
+/* This util to be used, to convert kernel rules for default vr in hw_vr */
+static u32 prestera_fix_tb_id(u32 tb_id)
+{
+ if (tb_id == RT_TABLE_UNSPEC ||
+ tb_id == RT_TABLE_LOCAL ||
+ tb_id == RT_TABLE_DEFAULT)
+ tb_id = RT_TABLE_MAIN;
+
+ return tb_id;
+}
+
+static void
+prestera_util_fen_info2fib_cache_key(struct fib_notifier_info *info,
+ struct prestera_kern_fib_cache_key *key)
+{
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ memset(key, 0, sizeof(*key));
+ key->addr.v = PRESTERA_IPV4;
+ key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
+ key->prefix_len = fen_info->dst_len;
+ key->kern_tb_id = fen_info->tb_id;
+}
+
+static int prestera_util_nhc2nc_key(struct prestera_switch *sw,
+ struct fib_nh_common *nhc,
+ struct prestera_kern_neigh_cache_key *nk)
+{
+ memset(nk, 0, sizeof(*nk));
+ if (nhc->nhc_gw_family == AF_INET) {
+ nk->addr.v = PRESTERA_IPV4;
+ nk->addr.u.ipv4 = nhc->nhc_gw.ipv4;
+ } else {
+ nk->addr.v = PRESTERA_IPV6;
+ nk->addr.u.ipv6 = nhc->nhc_gw.ipv6;
+ }
+
+ nk->dev = nhc->nhc_dev;
+ return 0;
+}
+
+static void
+prestera_util_nc_key2nh_key(struct prestera_kern_neigh_cache_key *ck,
+ struct prestera_nh_neigh_key *nk)
+{
+ memset(nk, 0, sizeof(*nk));
+ nk->addr = ck->addr;
+ nk->rif = (void *)ck->dev;
+}
+
+static bool
+prestera_util_nhc_eq_n_cache_key(struct prestera_switch *sw,
+ struct fib_nh_common *nhc,
+ struct prestera_kern_neigh_cache_key *nk)
+{
+ struct prestera_kern_neigh_cache_key tk;
+ int err;
+
+ err = prestera_util_nhc2nc_key(sw, nhc, &tk);
+ if (err)
+ return false;
+
+ if (memcmp(&tk, nk, sizeof(tk)))
+ return false;
+
+ return true;
+}
+
+static int
+prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ memset(key, 0, sizeof(*key));
+ if (n->tbl->family == AF_INET) {
+ key->addr.v = PRESTERA_IPV4;
+ key->addr.u.ipv4 = *(__be32 *)n->primary_key;
+ } else {
+ return -ENOENT;
+ }
+
+ key->dev = n->dev;
+
+ return 0;
+}
+
+static bool __prestera_fi_is_direct(struct fib_info *fi)
+{
+ struct fib_nh_common *fib_nhc;
+
+ if (fib_info_num_path(fi) == 1) {
+ fib_nhc = fib_info_nhc(fi, 0);
+ if (fib_nhc->nhc_gw_family == AF_UNSPEC)
+ return true;
+ }
+
+ return false;
+}
+
+static bool prestera_fi_is_direct(struct fib_info *fi)
+{
+ if (fi->fib_type != RTN_UNICAST)
+ return false;
+
+ return __prestera_fi_is_direct(fi);
+}
+
+static bool prestera_fi_is_nh(struct fib_info *fi)
+{
+ if (fi->fib_type != RTN_UNICAST)
+ return false;
+
+ return !__prestera_fi_is_direct(fi);
+}
+
+static bool __prestera_fi6_is_direct(struct fib6_info *fi)
+{
+ if (!fi->fib6_nh->nh_common.nhc_gw_family)
+ return true;
+
+ return false;
+}
+
+static bool prestera_fi6_is_direct(struct fib6_info *fi)
+{
+ if (fi->fib6_type != RTN_UNICAST)
+ return false;
+
+ return __prestera_fi6_is_direct(fi);
+}
+
+static bool prestera_fi6_is_nh(struct fib6_info *fi)
+{
+ if (fi->fib6_type != RTN_UNICAST)
+ return false;
+
+ return !__prestera_fi6_is_direct(fi);
+}
+
+static bool prestera_fib_info_is_direct(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info =
+ container_of(info, struct fib6_entry_notifier_info, info);
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ if (info->family == AF_INET)
+ return prestera_fi_is_direct(fen_info->fi);
+ else
+ return prestera_fi6_is_direct(fen6_info->rt);
+}
+
+static bool prestera_fib_info_is_nh(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info =
+ container_of(info, struct fib6_entry_notifier_info, info);
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ if (info->family == AF_INET)
+ return prestera_fi_is_nh(fen_info->fi);
+ else
+ return prestera_fi6_is_nh(fen6_info->rt);
+}
+
+/* must be called with rcu_read_lock() */
+static int prestera_util_kern_get_route(struct fib_result *res, u32 tb_id,
+ __be32 *addr)
+{
+ struct flowi4 fl4;
+
+ /* TODO: walkthrough appropriate tables in kernel
+ * to know if the same prefix exists in several tables
+ */
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = *addr;
+ return fib_lookup(&init_net, &fl4, res, 0 /* FIB_LOOKUP_NOREF */);
+}
+
+static bool
+__prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
+ struct net_device *dev)
+{
+ struct fib_nh_common *fib_nhc;
+ struct fib_result res;
+ bool reachable;
+
+ reachable = false;
+
+ if (!prestera_util_kern_get_route(&res, tb_id, addr))
+ if (prestera_fi_is_direct(res.fi)) {
+ fib_nhc = fib_info_nhc(res.fi, 0);
+ if (dev == fib_nhc->nhc_dev)
+ reachable = true;
+ }
+
+ return reachable;
+}
+
+/* Check if neigh route is reachable */
+static bool
+prestera_util_kern_n_is_reachable(u32 tb_id,
+ struct prestera_ip_addr *addr,
+ struct net_device *dev)
+{
+ if (addr->v == PRESTERA_IPV4)
+ return __prestera_util_kern_n_is_reachable_v4(tb_id,
+ &addr->u.ipv4,
+ dev);
+ else
+ return false;
+}
+
+static void prestera_util_kern_set_neigh_offload(struct neighbour *n,
+ bool offloaded)
+{
+ if (offloaded)
+ n->flags |= NTF_OFFLOADED;
+ else
+ n->flags &= ~NTF_OFFLOADED;
+}
+
+static void
+prestera_util_kern_set_nh_offload(struct fib_nh_common *nhc, bool offloaded, bool trap)
+{
+ if (offloaded)
+ nhc->nhc_flags |= RTNH_F_OFFLOAD;
+ else
+ nhc->nhc_flags &= ~RTNH_F_OFFLOAD;
+
+ if (trap)
+ nhc->nhc_flags |= RTNH_F_TRAP;
+ else
+ nhc->nhc_flags &= ~RTNH_F_TRAP;
+}
+
+static struct fib_nh_common *
+prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+ struct fib6_info *iter;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fib_info_nhc(fen4_info->fi, n);
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ if (!n)
+ return &fen6_info->rt->fib6_nh->nh_common;
+
+ list_for_each_entry(iter, &fen6_info->rt->fib6_siblings,
+ fib6_siblings) {
+ if (!--n)
+ return &iter->fib6_nh->nh_common;
+ }
+ }
+
+ /* if family is incorrect - than upper functions has BUG */
+ /* if doesn't find requested index - there is alsi bug, because
+ * valid index must be produced by nhs, which checks list length
+ */
+ WARN(1, "Invalid parameters passed to %s n=%d i=%p",
+ __func__, n, info);
+ return NULL;
+}
+
+static int prestera_kern_fib_info_nhs(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fib_info_num_path(fen4_info->fi);
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ return fen6_info->rt->fib6_nsiblings + 1;
+ }
+
+ return 0;
+}
+
+static unsigned char
+prestera_kern_fib_info_type(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fen4_info->fi->fib_type;
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ /* TODO: ECMP in ipv6 is several routes.
+ * Every route has single nh.
+ */
+ return fen6_info->rt->fib6_type;
+ }
+
+ return RTN_UNSPEC;
+}
+
+/* Decided, that uc_nh route with key==nh is obviously neighbour route */
+static bool
+prestera_fib_node_util_is_neighbour(struct prestera_fib_node *fib_node)
+{
+ if (fib_node->info.type != PRESTERA_FIB_TYPE_UC_NH)
+ return false;
+
+ if (fib_node->info.nh_grp->nh_neigh_head[1].neigh)
+ return false;
+
+ if (!fib_node->info.nh_grp->nh_neigh_head[0].neigh)
+ return false;
+
+ if (memcmp(&fib_node->info.nh_grp->nh_neigh_head[0].neigh->key.addr,
+ &fib_node->key.addr, sizeof(struct prestera_ip_addr)))
+ return false;
+
+ return true;
+}
+
+static int prestera_dev_if_type(const struct net_device *dev)
+{
+ struct macvlan_dev *vlan;
+
+ if (is_vlan_dev(dev) &&
+ netif_is_bridge_master(vlan_dev_real_dev(dev))) {
+ return PRESTERA_IF_VID_E;
+ } else if (netif_is_bridge_master(dev)) {
+ return PRESTERA_IF_VID_E;
+ } else if (netif_is_lag_master(dev)) {
+ return PRESTERA_IF_LAG_E;
+ } else if (netif_is_macvlan(dev)) {
+ vlan = netdev_priv(dev);
+ return prestera_dev_if_type(vlan->lowerdev);
+ } else {
+ return PRESTERA_IF_PORT_E;
+ }
+}
+
+static int
+prestera_neigh_iface_init(struct prestera_switch *sw,
+ struct prestera_iface *iface,
+ struct neighbour *n)
+{
+ struct prestera_port *port;
+
+ iface->vlan_id = 0; /* TODO: vlan egress */
+ iface->type = prestera_dev_if_type(n->dev);
+ if (iface->type != PRESTERA_IF_PORT_E)
+ return -EINVAL;
+
+ if (!prestera_netdev_check(n->dev))
+ return -EINVAL;
+
+ port = netdev_priv(n->dev);
+ iface->dev_port.hw_dev_num = port->dev_id;
+ iface->dev_port.port_num = port->hw_id;
+
+ return 0;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+
+ n_cache =
+ rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key,
+ __prestera_kern_neigh_cache_ht_params);
+ return n_cache;
+}
+
+static void
+__prestera_kern_neigh_cache_destruct(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ dev_put(n_cache->key.dev);
+}
+
+static void
+__prestera_kern_neigh_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ rhashtable_remove_fast(&sw->router->kern_neigh_cache_ht,
+ &n_cache->ht_node,
+ __prestera_kern_neigh_cache_ht_params);
+ __prestera_kern_neigh_cache_destruct(sw, n_cache);
+ kfree(n_cache);
+}
+
+static struct prestera_kern_neigh_cache *
+__prestera_kern_neigh_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ int err;
+
+ n_cache = kzalloc(sizeof(*n_cache), GFP_KERNEL);
+ if (!n_cache)
+ goto err_kzalloc;
+
+ memcpy(&n_cache->key, key, sizeof(*key));
+ dev_hold(n_cache->key.dev);
+
+ INIT_LIST_HEAD(&n_cache->kern_fib_cache_list);
+ err = rhashtable_insert_fast(&sw->router->kern_neigh_cache_ht,
+ &n_cache->ht_node,
+ __prestera_kern_neigh_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return n_cache;
+
+err_ht_insert:
+ dev_put(n_cache->key.dev);
+ kfree(n_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_get(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+
+ n_cache = prestera_kern_neigh_cache_find(sw, key);
+ if (!n_cache)
+ n_cache = __prestera_kern_neigh_cache_create(sw, key);
+
+ return n_cache;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_put(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ if (!n_cache->in_kernel &&
+ list_empty(&n_cache->kern_fib_cache_list)) {
+ __prestera_kern_neigh_cache_destroy(sw, n_cache);
+ return NULL;
+ }
+
+ return n_cache;
+}
+
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key)
+{
+ struct prestera_kern_fib_cache *fib_cache;
+
+ fib_cache =
+ rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key,
+ __prestera_kern_fib_cache_ht_params);
+ return fib_cache;
+}
+
+static void
+__prestera_kern_fib_cache_destruct(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ int i;
+
+ for (i = 0; i < PRESTERA_NHGR_SIZE_MAX; i++) {
+ n_cache = fib_cache->kern_neigh_cache_head[i].n_cache;
+ if (n_cache) {
+ list_del(&fib_cache->kern_neigh_cache_head[i].head);
+ prestera_kern_neigh_cache_put(sw, n_cache);
+ }
+ }
+
+ fib_info_put(fib_cache->fen4_info.fi);
+}
+
+static void
+prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ __prestera_kern_fib_cache_destruct(sw, fib_cache);
+ kfree(fib_cache);
+}
+
+static int
+__prestera_kern_fib_cache_create_nhs(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_neigh_cache_key nc_key;
+ struct prestera_kern_neigh_cache *n_cache;
+ struct fib_nh_common *nhc;
+ int i, nhs, err;
+
+ if (!prestera_fib_info_is_nh(&fc->info))
+ return 0;
+
+ nhs = prestera_kern_fib_info_nhs(&fc->info);
+ if (nhs > PRESTERA_NHGR_SIZE_MAX)
+ return 0;
+
+ for (i = 0; i < nhs; i++) {
+ nhc = prestera_kern_fib_info_nhc(&fc->fen4_info.info, i);
+ err = prestera_util_nhc2nc_key(sw, nhc, &nc_key);
+ if (err)
+ return 0;
+
+ n_cache = prestera_kern_neigh_cache_get(sw, &nc_key);
+ if (!n_cache)
+ return 0;
+
+ fc->kern_neigh_cache_head[i].this = fc;
+ fc->kern_neigh_cache_head[i].n_cache = n_cache;
+ list_add(&fc->kern_neigh_cache_head[i].head,
+ &n_cache->kern_fib_cache_list);
+ }
+
+ return 0;
+}
+
+/* Operations on fi (offload, etc) must be wrapped in utils.
+ * This function just create storage.
+ */
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key,
+ struct fib_notifier_info *info)
+{
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL);
+ if (!fib_cache)
+ goto err_kzalloc;
+
+ memcpy(&fib_cache->key, key, sizeof(*key));
+ fib_info_hold(fen_info->fi);
+ memcpy(&fib_cache->fen4_info, fen_info, sizeof(*fen_info));
+
+ err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ /* Handle nexthops */
+ err = __prestera_kern_fib_cache_create_nhs(sw, fib_cache);
+ if (err)
+ goto out; /* Not critical */
+
+out:
+ return fib_cache;
+
+err_ht_insert:
+ fib_info_put(fen_info->fi);
+ kfree(fib_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_k_arb_fib_nh_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fibc,
+ struct prestera_kern_neigh_cache *nc,
+ bool offloaded, bool trap)
+{
+ struct fib_nh_common *nhc;
+ int i, nhs;
+
+ nhs = prestera_kern_fib_info_nhs(&fibc->info);
+ for (i = 0; i < nhs; i++) {
+ nhc = prestera_kern_fib_info_nhc(&fibc->info, i);
+ if (!nc) {
+ prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
+ continue;
+ }
+
+ if (prestera_util_nhc_eq_n_cache_key(sw, nhc, &nc->key)) {
+ prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
+ break;
+ }
+ }
+}
+
+static void
+__prestera_k_arb_n_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc,
+ bool offloaded)
+{
+ struct neighbour *n;
+
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ if (!n)
+ return;
+
+ prestera_util_kern_set_neigh_offload(n, offloaded);
+ neigh_release(n);
+}
+
+static void
+__prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool fail, bool offload, bool trap)
+{
+ struct fib_rt_info fri;
+
+ switch (fc->key.addr.v) {
+ case PRESTERA_IPV4:
+ fri.fi = fc->fen4_info.fi;
+ fri.tb_id = fc->key.kern_tb_id;
+ fri.dst = fc->key.addr.u.ipv4;
+ fri.dst_len = fc->key.prefix_len;
+ fri.dscp = fc->fen4_info.dscp;
+ fri.type = fc->fen4_info.type;
+ /* flags begin */
+ fri.offload = offload;
+ fri.trap = trap;
+ fri.offload_failed = fail;
+ /* flags end */
+ fib_alias_hw_flags_set(&init_net, &fri);
+ return;
+ case PRESTERA_IPV6:
+ /* TODO */
+ return;
+ }
+}
+
+static void
+__prestera_k_arb_n_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache,
+ bool enabled)
+{
+ struct prestera_nexthop_group_key nh_grp_key;
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ struct prestera_fib_node *fib_node;
+ struct prestera_fib_key fib_key;
+
+ /* Exception for fc with prefix 32: LPM entry is already used by fib */
+ memset(&fc_key, 0, sizeof(fc_key));
+ fc_key.addr = n_cache->key.addr;
+ fc_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
+ /* But better to use tb_id of route, which pointed to this neighbour. */
+ /* We take it from rif, because rif inconsistent.
+ * Must be separated in_rif and out_rif.
+ * Also note: for each fib pointed to this neigh should be separated
+ * neigh lpm entry (for each ingress vr)
+ */
+ fc_key.kern_tb_id = l3mdev_fib_table(n_cache->key.dev);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ memset(&fib_key, 0, sizeof(fib_key));
+ fib_key.addr = n_cache->key.addr;
+ fib_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
+ fib_key.tb_id = prestera_fix_tb_id(fc_key.kern_tb_id);
+ fib_node = prestera_fib_node_find(sw, &fib_key);
+ if (!fib_cache || !fib_cache->reachable) {
+ if (!enabled && fib_node) {
+ if (prestera_fib_node_util_is_neighbour(fib_node))
+ prestera_fib_node_destroy(sw, fib_node);
+ return;
+ }
+ }
+
+ if (enabled && !fib_node) {
+ memset(&nh_grp_key, 0, sizeof(nh_grp_key));
+ prestera_util_nc_key2nh_key(&n_cache->key,
+ &nh_grp_key.neigh[0]);
+ fib_node = prestera_fib_node_create(sw, &fib_key,
+ PRESTERA_FIB_TYPE_UC_NH,
+ &nh_grp_key);
+ if (!fib_node)
+ pr_err("%s failed ip=%pI4n", "prestera_fib_node_create",
+ &fib_key.addr.u.ipv4);
+ return;
+ }
+}
+
+static void
+__prestera_k_arb_nc_kern_fib_fetch(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ if (prestera_util_kern_n_is_reachable(l3mdev_fib_table(nc->key.dev),
+ &nc->key.addr, nc->key.dev))
+ nc->reachable = true;
+ else
+ nc->reachable = false;
+}
+
+/* Kernel neighbour -> neigh_cache info */
+static void
+__prestera_k_arb_nc_kern_n_fetch(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct neighbour *n;
+ int err;
+
+ memset(&nc->nh_neigh_info, 0, sizeof(nc->nh_neigh_info));
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev);
+ if (!n)
+ goto out;
+
+ read_lock_bh(&n->lock);
+ if (n->nud_state & NUD_VALID && !n->dead) {
+ err = prestera_neigh_iface_init(sw, &nc->nh_neigh_info.iface,
+ n);
+ if (err)
+ goto n_read_out;
+
+ memcpy(&nc->nh_neigh_info.ha[0], &n->ha[0], ETH_ALEN);
+ nc->nh_neigh_info.connected = true;
+ }
+n_read_out:
+ read_unlock_bh(&n->lock);
+out:
+ nc->in_kernel = nc->nh_neigh_info.connected;
+ if (n)
+ neigh_release(n);
+}
+
+/* neigh_cache info -> lpm update */
+static void
+__prestera_k_arb_nc_apply(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct prestera_kern_neigh_cache_head *nhead;
+ struct prestera_nh_neigh_key nh_key;
+ struct prestera_nh_neigh *nh_neigh;
+ int err;
+
+ __prestera_k_arb_n_lpm_set(sw, nc, nc->reachable && nc->in_kernel);
+ __prestera_k_arb_n_offload_set(sw, nc, nc->reachable && nc->in_kernel);
+
+ prestera_util_nc_key2nh_key(&nc->key, &nh_key);
+ nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
+ if (!nh_neigh)
+ goto out;
+
+ /* Do hw update only if something changed to prevent nh flap */
+ if (memcmp(&nc->nh_neigh_info, &nh_neigh->info,
+ sizeof(nh_neigh->info))) {
+ memcpy(&nh_neigh->info, &nc->nh_neigh_info,
+ sizeof(nh_neigh->info));
+ err = prestera_nh_neigh_set(sw, nh_neigh);
+ if (err) {
+ pr_err("%s failed with err=%d ip=%pI4n mac=%pM",
+ "prestera_nh_neigh_set", err,
+ &nh_neigh->key.addr.u.ipv4,
+ &nh_neigh->info.ha[0]);
+ goto out;
+ }
+ }
+
+out:
+ list_for_each_entry(nhead, &nc->kern_fib_cache_list, head) {
+ __prestera_k_arb_fib_nh_offload_set(sw, nhead->this, nc,
+ nc->in_kernel,
+ !nc->in_kernel);
+ }
+}
+
+static int
+__prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct fib_nh_common *nhc;
+ int nh_cnt;
+
+ memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
+
+ switch (prestera_kern_fib_info_type(&fc->info)) {
+ case RTN_UNICAST:
+ if (prestera_fib_info_is_direct(&fc->info) &&
+ fc->key.prefix_len ==
+ PRESTERA_IP_ADDR_PLEN(fc->key.addr.v)) {
+ /* This is special case.
+ * When prefix is 32. Than we will have conflict in lpm
+ * for direct route - once TRAP added, there is no
+ * place for neighbour entry. So represent direct route
+ * with prefix 32, as NH. So neighbour will be resolved
+ * as nexthop of this route.
+ */
+ nhc = prestera_kern_fib_info_nhc(&fc->info, 0);
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_UC_NH;
+ fc->lpm_info.nh_grp_key.neigh[0].addr =
+ fc->key.addr;
+ fc->lpm_info.nh_grp_key.neigh[0].rif =
+ nhc->nhc_dev;
+
+ break;
+ }
+
+ /* We can also get nh_grp_key from fi. This will be correct to
+ * because cache not always represent, what actually written to
+ * lpm. But we use nh cache, as well for now (for this case).
+ */
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ if (!fc->kern_neigh_cache_head[nh_cnt].n_cache)
+ break;
+
+ fc->lpm_info.nh_grp_key.neigh[nh_cnt].addr =
+ fc->kern_neigh_cache_head[nh_cnt].n_cache->key.addr;
+ fc->lpm_info.nh_grp_key.neigh[nh_cnt].rif =
+ fc->kern_neigh_cache_head[nh_cnt].n_cache->key.dev;
+ }
+
+ fc->lpm_info.fib_type = nh_cnt ?
+ PRESTERA_FIB_TYPE_UC_NH :
+ PRESTERA_FIB_TYPE_TRAP;
+ break;
+ /* Unsupported. Leave it for kernel: */
+ case RTN_BROADCAST:
+ case RTN_MULTICAST:
+ /* Routes we must trap by design: */
+ case RTN_LOCAL:
+ case RTN_UNREACHABLE:
+ case RTN_PROHIBIT:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ break;
+ case RTN_BLACKHOLE:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP;
+ break;
+ default:
+ dev_err(sw->dev->dev, "Unsupported fib_type");
+ return -EOPNOTSUPP;
+ }
+
+ fc->lpm_info.fib_key.addr = fc->key.addr;
+ fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len;
+ fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id);
+
+ return 0;
+}
+
+static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool enabled)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key);
+ if (fib_node)
+ prestera_fib_node_destroy(sw, fib_node);
+
+ if (!enabled)
+ return 0;
+
+ fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
+ fc->lpm_info.fib_type,
+ &fc->lpm_info.nh_grp_key);
+
+ if (!fib_node) {
+ dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
+ &fc->key.addr.u.ipv4, fc->key.prefix_len,
+ fc->key.kern_tb_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ int err;
+
+ err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc);
+ if (err)
+ return err;
+
+ err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable);
+ if (err) {
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc,
+ true, false, false);
+ return err;
+ }
+
+ switch (fc->lpm_info.fib_type) {
+ case PRESTERA_FIB_TYPE_UC_NH:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ fc->reachable, false);
+ break;
+ case PRESTERA_FIB_TYPE_TRAP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ false, fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true,
+ fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_INVALID:
+ break;
+ }
+
+ return 0;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_LOCAL) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_MAIN;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_MAIN) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_LOCAL;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static void __prestera_k_arb_hw_state_upd(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct prestera_nh_neigh_key nh_key;
+ struct prestera_nh_neigh *nh_neigh;
+ struct neighbour *n;
+ bool hw_active;
+
+ prestera_util_nc_key2nh_key(&nc->key, &nh_key);
+ nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
+ if (!nh_neigh) {
+ pr_err("Cannot find nh_neigh for cached %pI4n",
+ &nc->key.addr.u.ipv4);
+ return;
+ }
+
+ hw_active = prestera_nh_neigh_util_hw_state(sw, nh_neigh);
+
+#ifdef PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
+ if (!hw_active && nc->in_kernel)
+ goto out;
+#else /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
+ if (!hw_active)
+ goto out;
+#endif /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
+
+ if (nc->key.addr.v == PRESTERA_IPV4) {
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ if (!n)
+ n = neigh_create(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ } else {
+ n = NULL;
+ }
+
+ if (!IS_ERR(n) && n) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ } else {
+ pr_err("Cannot create neighbour %pI4n", &nc->key.addr.u.ipv4);
+ }
+
+out:
+ return;
+}
+
+/* Propagate hw state to kernel */
+static void prestera_k_arb_hw_evt(struct prestera_switch *sw)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while (1) {
+ n_cache = rhashtable_walk_next(&iter);
+
+ if (!n_cache)
+ break;
+
+ if (IS_ERR(n_cache))
+ continue;
+
+ rhashtable_walk_stop(&iter);
+ __prestera_k_arb_hw_state_upd(sw, n_cache);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+/* Propagate kernel event to hw */
+static void prestera_k_arb_n_evt(struct prestera_switch *sw,
+ struct neighbour *n)
+{
+ struct prestera_kern_neigh_cache_key n_key;
+ struct prestera_kern_neigh_cache *n_cache;
+ int err;
+
+ err = prestera_util_neigh2nc_key(sw, n, &n_key);
+ if (err)
+ return;
+
+ n_cache = prestera_kern_neigh_cache_find(sw, &n_key);
+ if (!n_cache) {
+ n_cache = prestera_kern_neigh_cache_get(sw, &n_key);
+ if (!n_cache)
+ return;
+ __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
+ }
+
+ __prestera_k_arb_nc_kern_n_fetch(sw, n_cache);
+ __prestera_k_arb_nc_apply(sw, n_cache);
+
+ prestera_kern_neigh_cache_put(sw, n_cache);
+}
+
+static void __prestera_k_arb_fib_evt2nc(struct prestera_switch *sw)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while (1) {
+ n_cache = rhashtable_walk_next(&iter);
+
+ if (!n_cache)
+ break;
+
+ if (IS_ERR(n_cache))
+ continue;
+
+ rhashtable_walk_stop(&iter);
+ __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
+ __prestera_k_arb_nc_apply(sw, n_cache);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+static int
+prestera_k_arb_fib_evt(struct prestera_switch *sw,
+ bool replace, /* replace or del */
+ struct fib_notifier_info *info)
+{
+ struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ prestera_util_fen_info2fib_cache_key(info, &fc_key);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ if (fib_cache) {
+ fib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying destroyed fib_cache failed");
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache && bfib_cache) {
+ bfib_cache->reachable = true;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ prestera_kern_fib_cache_destroy(sw, fib_cache);
+ }
+
+ if (replace) {
+ fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, info);
+ if (!fib_cache) {
+ dev_err(sw->dev->dev, "fib_cache == NULL");
+ return -ENOENT;
+ }
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache)
+ fib_cache->reachable = true;
+
+ if (bfib_cache) {
+ bfib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev, "Applying fib_cache failed");
+ }
+
+ /* Update all neighs to resolve overlapped and apply related */
+ __prestera_k_arb_fib_evt2nc(sw);
+
+ return 0;
+}
+
+static void __prestera_k_arb_abort_neigh_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_kern_neigh_cache *n_cache = ptr;
+ struct prestera_switch *sw = arg;
+
+ if (!list_empty(&n_cache->kern_fib_cache_list)) {
+ WARN_ON(1); /* BUG */
+ return;
+ }
+ __prestera_k_arb_n_offload_set(sw, n_cache, false);
+ n_cache->in_kernel = false;
+ /* No need to destroy lpm.
+ * It will be aborted by destroy_ht
+ */
+ __prestera_kern_neigh_cache_destruct(sw, n_cache);
+ kfree(n_cache);
+}
+
+static void __prestera_k_arb_abort_fib_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_kern_fib_cache *fib_cache = ptr;
+ struct prestera_switch *sw = arg;
+
+ __prestera_k_arb_fib_lpm_offload_set(sw, fib_cache,
+ false, false,
+ false);
+ __prestera_k_arb_fib_nh_offload_set(sw, fib_cache, NULL,
+ false, false);
+ /* No need to destroy lpm.
+ * It will be aborted by destroy_ht
+ */
+ __prestera_kern_fib_cache_destruct(sw, fib_cache);
+ kfree(fib_cache);
+}
+
+static void prestera_k_arb_abort(struct prestera_switch *sw)
+{
+ /* Function to remove all arbiter entries and related hw objects. */
+ /* Sequence:
+ * 1) Clear arbiter tables, but don't touch hw
+ * 2) Clear hw
+ * We use such approach, because arbiter object is not directly mapped
+ * to hw. So deletion of one arbiter object may even lead to creation of
+ * hw object (e.g. in case of overlapped routes).
+ */
+ rhashtable_free_and_destroy(&sw->router->kern_fib_cache_ht,
+ __prestera_k_arb_abort_fib_ht_cb,
+ sw);
+ rhashtable_free_and_destroy(&sw->router->kern_neigh_cache_ht,
+ __prestera_k_arb_abort_neigh_ht_cb,
+ sw);
+}
+
+static int __prestera_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port *port = netdev_priv(port_dev);
+ struct prestera_rif_entry_key re_key = {};
+ struct prestera_rif_entry *re;
+ u32 kern_tb_id;
+ int err;
+
+ err = prestera_is_valid_mac_addr(port, port_dev->dev_addr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix");
+ return err;
+ }
+
+ kern_tb_id = l3mdev_fib_table(port_dev);
+ re_key.iface.type = PRESTERA_IF_PORT_E;
+ re_key.iface.dev_port.hw_dev_num = port->dev_id;
+ re_key.iface.dev_port.port_num = port->hw_id;
+ re = prestera_rif_entry_find(port->sw, &re_key);
+
+ switch (event) {
+ case NETDEV_UP:
+ if (re) {
+ NL_SET_ERR_MSG_MOD(extack, "RIF already exist");
+ return -EEXIST;
+ }
+ re = prestera_rif_entry_create(port->sw, &re_key,
+ prestera_fix_tb_id(kern_tb_id),
+ port_dev->dev_addr);
+ if (!re) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't create RIF");
+ return -EINVAL;
+ }
+ dev_hold(port_dev);
+ break;
+ case NETDEV_DOWN:
+ if (!re) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't find RIF");
+ return -EEXIST;
+ }
+ prestera_rif_entry_destroy(port->sw, re);
+ dev_put(port_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int __prestera_inetaddr_event(struct prestera_switch *sw,
+ struct net_device *dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ if (!prestera_netdev_check(dev) || netif_is_any_bridge_port(dev) ||
+ netif_is_lag_port(dev))
+ return 0;
+
+ return __prestera_inetaddr_port_event(dev, event, extack);
+}
+
+static int __prestera_inetaddr_cb(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct prestera_router *router = container_of(nb,
+ struct prestera_router,
+ inetaddr_nb);
+ struct in_device *idev;
+ int err = 0;
+
+ if (event != NETDEV_DOWN)
+ goto out;
+
+ /* Ignore if this is not latest address */
+ idev = __in_dev_get_rtnl(dev);
+ if (idev && idev->ifa_list)
+ goto out;
+
+ err = __prestera_inetaddr_event(router->sw, dev, event, NULL);
+out:
+ return notifier_from_errno(err);
+}
+
+static int __prestera_inetaddr_valid_cb(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_validator_info *ivi = (struct in_validator_info *)ptr;
+ struct net_device *dev = ivi->ivi_dev->dev;
+ struct prestera_router *router = container_of(nb,
+ struct prestera_router,
+ inetaddr_valid_nb);
+ struct in_device *idev;
+ int err = 0;
+
+ if (event != NETDEV_UP)
+ goto out;
+
+ /* Ignore if this is not first address */
+ idev = __in_dev_get_rtnl(dev);
+ if (idev && idev->ifa_list)
+ goto out;
+
+ if (ipv4_is_multicast(ivi->ivi_addr)) {
+ NL_SET_ERR_MSG_MOD(ivi->extack,
+ "Multicast addr on RIF is not supported");
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack);
+out:
+ return notifier_from_errno(err);
+}
+
+struct prestera_fib_event_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct fib_entry_notifier_info fen_info;
+ unsigned long event;
+};
+
+static void __prestera_router_fib_event_work(struct work_struct *work)
+{
+ struct prestera_fib_event_work *fib_work =
+ container_of(work, struct prestera_fib_event_work, work);
+ struct prestera_switch *sw = fib_work->sw;
+ int err;
+
+ rtnl_lock();
+
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = prestera_k_arb_fib_evt(sw, true,
+ &fib_work->fen_info.info);
+ if (err)
+ goto err_out;
+
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ err = prestera_k_arb_fib_evt(sw, false,
+ &fib_work->fen_info.info);
+ if (err)
+ goto err_out;
+
+ break;
+ }
+
+ goto out;
+
+err_out:
+ dev_err(sw->dev->dev, "Error when processing %pI4h/%d",
+ &fib_work->fen_info.dst,
+ fib_work->fen_info.dst_len);
+out:
+ fib_info_put(fib_work->fen_info.fi);
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int __prestera_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_fib_event_work *fib_work;
+ struct fib_entry_notifier_info *fen_info;
+ struct fib_notifier_info *info = ptr;
+ struct prestera_router *router;
+
+ if (info->family != AF_INET)
+ return NOTIFY_DONE;
+
+ router = container_of(nb, struct prestera_router, fib_nb);
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_DEL:
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ if (!fen_info->fi)
+ return NOTIFY_DONE;
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (WARN_ON(!fib_work))
+ return NOTIFY_BAD;
+
+ fib_info_hold(fen_info->fi);
+ fib_work->fen_info = *fen_info;
+ fib_work->event = event;
+ fib_work->sw = router->sw;
+ INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
+ prestera_queue_work(&fib_work->work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct prestera_netevent_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct neighbour *n;
+};
+
+static void prestera_router_neigh_event_work(struct work_struct *work)
+{
+ struct prestera_netevent_work *net_work =
+ container_of(work, struct prestera_netevent_work, work);
+ struct prestera_switch *sw = net_work->sw;
+ struct neighbour *n = net_work->n;
+
+ /* neigh - its not hw related object. It stored only in kernel. So... */
+ rtnl_lock();
+
+ prestera_k_arb_n_evt(sw, n);
+
+ neigh_release(n);
+ rtnl_unlock();
+ kfree(net_work);
+}
+
+static int prestera_router_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_netevent_work *net_work;
+ struct prestera_router *router;
+ struct neighbour *n = ptr;
+
+ router = container_of(nb, struct prestera_router, netevent_nb);
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ if (n->tbl->family != AF_INET)
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (WARN_ON(!net_work))
+ return NOTIFY_BAD;
+
+ neigh_clone(n);
+ net_work->n = n;
+ net_work->sw = router->sw;
+ INIT_WORK(&net_work->work, prestera_router_neigh_event_work);
+ prestera_queue_work(&net_work->work);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void prestera_router_update_neighs_work(struct work_struct *work)
+{
+ struct prestera_router *router;
+
+ router = container_of(work, struct prestera_router,
+ neighs_update.dw.work);
+ rtnl_lock();
+
+ prestera_k_arb_hw_evt(router->sw);
+
+ rtnl_unlock();
+ prestera_queue_delayed_work(&router->neighs_update.dw,
+ msecs_to_jiffies(PRESTERA_NH_PROBE_INTERVAL));
+}
+
+static int prestera_neigh_work_init(struct prestera_switch *sw)
+{
+ INIT_DELAYED_WORK(&sw->router->neighs_update.dw,
+ prestera_router_update_neighs_work);
+ prestera_queue_delayed_work(&sw->router->neighs_update.dw, 0);
+ return 0;
+}
+
+static void prestera_neigh_work_fini(struct prestera_switch *sw)
+{
+ cancel_delayed_work_sync(&sw->router->neighs_update.dw);
+}
+
+int prestera_router_init(struct prestera_switch *sw)
+{
+ struct prestera_router *router;
+ int err, nhgrp_cache_bytes;
+
+ router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
+ if (!router)
+ return -ENOMEM;
+
+ sw->router = router;
+ router->sw = sw;
+
+ err = prestera_router_hw_init(sw);
+ if (err)
+ goto err_router_lib_init;
+
+ err = rhashtable_init(&router->kern_fib_cache_ht,
+ &__prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_kern_fib_cache_ht_init;
+
+ err = rhashtable_init(&router->kern_neigh_cache_ht,
+ &__prestera_kern_neigh_cache_ht_params);
+ if (err)
+ goto err_kern_neigh_cache_ht_init;
+
+ nhgrp_cache_bytes = sw->size_tbl_router_nexthop / 8 + 1;
+ router->nhgrp_hw_state_cache = kzalloc(nhgrp_cache_bytes, GFP_KERNEL);
+ if (!router->nhgrp_hw_state_cache) {
+ err = -ENOMEM;
+ goto err_nh_state_cache_alloc;
+ }
+
+ err = prestera_neigh_work_init(sw);
+ if (err)
+ goto err_neigh_work_init;
+
+ router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
+ err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+ if (err)
+ goto err_register_inetaddr_validator_notifier;
+
+ router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ router->netevent_nb.notifier_call = prestera_router_netevent_event;
+ err = register_netevent_notifier(&router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
+ router->fib_nb.notifier_call = __prestera_router_fib_event;
+ err = register_fib_notifier(&init_net, &router->fib_nb,
+ /* TODO: flush fib entries */ NULL, NULL);
+ if (err)
+ goto err_register_fib_notifier;
+
+ return 0;
+
+err_register_fib_notifier:
+ unregister_netevent_notifier(&router->netevent_nb);
+err_register_netevent_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
+err_register_inetaddr_notifier:
+ unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+err_register_inetaddr_validator_notifier:
+ prestera_neigh_work_fini(sw);
+err_neigh_work_init:
+ kfree(router->nhgrp_hw_state_cache);
+err_nh_state_cache_alloc:
+ rhashtable_destroy(&router->kern_neigh_cache_ht);
+err_kern_neigh_cache_ht_init:
+ rhashtable_destroy(&router->kern_fib_cache_ht);
+err_kern_fib_cache_ht_init:
+ prestera_router_hw_fini(sw);
+err_router_lib_init:
+ kfree(sw->router);
+ return err;
+}
+
+void prestera_router_fini(struct prestera_switch *sw)
+{
+ unregister_fib_notifier(&init_net, &sw->router->fib_nb);
+ unregister_netevent_notifier(&sw->router->netevent_nb);
+ unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
+ unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
+ prestera_neigh_work_fini(sw);
+ prestera_queue_drain();
+
+ prestera_k_arb_abort(sw);
+
+ kfree(sw->router->nhgrp_hw_state_cache);
+ rhashtable_destroy(&sw->router->kern_fib_cache_ht);
+ prestera_router_hw_fini(sw);
+ kfree(sw->router);
+ sw->router = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
new file mode 100644
index 000000000..02faaea2a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
+
+#include <linux/rhashtable.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_router_hw.h"
+#include "prestera_acl.h"
+
+/* Nexthop is pointed
+ * to port (not rif)
+ * +-------+
+ * +>|nexthop|
+ * | +-------+
+ * |
+ * +--+ +-----++
+ * +------->|vr|<-+ +>|nh_grp|
+ * | +--+ | | +------+
+ * | | |
+ * +-+-------+ +--+---+-+
+ * |rif_entry| |fib_node|
+ * +---------+ +--------+
+ * Rif is Fib - is exit point
+ * used as
+ * entry point
+ * for vr in hw
+ */
+
+#define PRESTERA_NHGR_UNUSED (0)
+#define PRESTERA_NHGR_DROP (0xFFFFFFFF)
+/* Need to merge it with router_manager */
+#define PRESTERA_NH_ACTIVE_JIFFER_FILTER 3000 /* ms */
+
+static const struct rhashtable_params __prestera_fib_ht_params = {
+ .key_offset = offsetof(struct prestera_fib_node, key),
+ .head_offset = offsetof(struct prestera_fib_node, ht_node),
+ .key_len = sizeof(struct prestera_fib_key),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params __prestera_nh_neigh_ht_params = {
+ .key_offset = offsetof(struct prestera_nh_neigh, key),
+ .key_len = sizeof(struct prestera_nh_neigh_key),
+ .head_offset = offsetof(struct prestera_nh_neigh, ht_node),
+};
+
+static const struct rhashtable_params __prestera_nexthop_group_ht_params = {
+ .key_offset = offsetof(struct prestera_nexthop_group, key),
+ .key_len = sizeof(struct prestera_nexthop_group_key),
+ .head_offset = offsetof(struct prestera_nexthop_group, ht_node),
+};
+
+static int prestera_nexthop_group_set(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp);
+static bool
+prestera_nexthop_group_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp);
+static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg);
+
+/* TODO: move to router.h as macros */
+static bool prestera_nh_neigh_key_is_valid(struct prestera_nh_neigh_key *key)
+{
+ return memchr_inv(key, 0, sizeof(*key)) ? true : false;
+}
+
+int prestera_router_hw_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = rhashtable_init(&sw->router->nh_neigh_ht,
+ &__prestera_nh_neigh_ht_params);
+ if (err)
+ goto err_nh_neigh_ht_init;
+
+ err = rhashtable_init(&sw->router->nexthop_group_ht,
+ &__prestera_nexthop_group_ht_params);
+ if (err)
+ goto err_nexthop_grp_ht_init;
+
+ err = rhashtable_init(&sw->router->fib_ht,
+ &__prestera_fib_ht_params);
+ if (err)
+ goto err_fib_ht_init;
+
+ INIT_LIST_HEAD(&sw->router->vr_list);
+ INIT_LIST_HEAD(&sw->router->rif_entry_list);
+
+ return 0;
+
+err_fib_ht_init:
+ rhashtable_destroy(&sw->router->nexthop_group_ht);
+err_nexthop_grp_ht_init:
+ rhashtable_destroy(&sw->router->nh_neigh_ht);
+err_nh_neigh_ht_init:
+ return 0;
+}
+
+void prestera_router_hw_fini(struct prestera_switch *sw)
+{
+ rhashtable_free_and_destroy(&sw->router->fib_ht,
+ prestera_fib_node_destroy_ht_cb, sw);
+ WARN_ON(!list_empty(&sw->router->vr_list));
+ WARN_ON(!list_empty(&sw->router->rif_entry_list));
+ rhashtable_destroy(&sw->router->fib_ht);
+ rhashtable_destroy(&sw->router->nexthop_group_ht);
+ rhashtable_destroy(&sw->router->nh_neigh_ht);
+}
+
+static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
+ u32 tb_id)
+{
+ struct prestera_vr *vr;
+
+ list_for_each_entry(vr, &sw->router->vr_list, router_node) {
+ if (vr->tb_id == tb_id)
+ return vr;
+ }
+
+ return NULL;
+}
+
+static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw,
+ u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_vr *vr;
+ int err;
+
+ vr = kzalloc(sizeof(*vr), GFP_KERNEL);
+ if (!vr) {
+ err = -ENOMEM;
+ goto err_alloc_vr;
+ }
+
+ vr->tb_id = tb_id;
+
+ err = prestera_hw_vr_create(sw, &vr->hw_vr_id);
+ if (err)
+ goto err_hw_create;
+
+ list_add(&vr->router_node, &sw->router->vr_list);
+
+ return vr;
+
+err_hw_create:
+ kfree(vr);
+err_alloc_vr:
+ return ERR_PTR(err);
+}
+
+static void __prestera_vr_destroy(struct prestera_switch *sw,
+ struct prestera_vr *vr)
+{
+ list_del(&vr->router_node);
+ prestera_hw_vr_delete(sw, vr->hw_vr_id);
+ kfree(vr);
+}
+
+static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_vr *vr;
+
+ vr = __prestera_vr_find(sw, tb_id);
+ if (vr) {
+ refcount_inc(&vr->refcount);
+ } else {
+ vr = __prestera_vr_create(sw, tb_id, extack);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ refcount_set(&vr->refcount, 1);
+ }
+
+ return vr;
+}
+
+static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr)
+{
+ if (refcount_dec_and_test(&vr->refcount))
+ __prestera_vr_destroy(sw, vr);
+}
+
+/* iface is overhead struct. vr_id also can be removed. */
+static int
+__prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in,
+ struct prestera_rif_entry_key *out)
+{
+ memset(out, 0, sizeof(*out));
+
+ switch (in->iface.type) {
+ case PRESTERA_IF_PORT_E:
+ out->iface.dev_port.hw_dev_num = in->iface.dev_port.hw_dev_num;
+ out->iface.dev_port.port_num = in->iface.dev_port.port_num;
+ break;
+ case PRESTERA_IF_LAG_E:
+ out->iface.lag_id = in->iface.lag_id;
+ break;
+ case PRESTERA_IF_VID_E:
+ out->iface.vlan_id = in->iface.vlan_id;
+ break;
+ default:
+ WARN(1, "Unsupported iface type");
+ return -EINVAL;
+ }
+
+ out->iface.type = in->iface.type;
+ return 0;
+}
+
+struct prestera_rif_entry *
+prestera_rif_entry_find(const struct prestera_switch *sw,
+ const struct prestera_rif_entry_key *k)
+{
+ struct prestera_rif_entry *rif_entry;
+ struct prestera_rif_entry_key lk; /* lookup key */
+
+ if (__prestera_rif_entry_key_copy(k, &lk))
+ return NULL;
+
+ list_for_each_entry(rif_entry, &sw->router->rif_entry_list,
+ router_node) {
+ if (!memcmp(k, &rif_entry->key, sizeof(*k)))
+ return rif_entry;
+ }
+
+ return NULL;
+}
+
+void prestera_rif_entry_destroy(struct prestera_switch *sw,
+ struct prestera_rif_entry *e)
+{
+ struct prestera_iface iface;
+
+ list_del(&e->router_node);
+
+ memcpy(&iface, &e->key.iface, sizeof(iface));
+ iface.vr_id = e->vr->hw_vr_id;
+ prestera_hw_rif_delete(sw, e->hw_id, &iface);
+
+ prestera_vr_put(sw, e->vr);
+ kfree(e);
+}
+
+struct prestera_rif_entry *
+prestera_rif_entry_create(struct prestera_switch *sw,
+ struct prestera_rif_entry_key *k,
+ u32 tb_id, const unsigned char *addr)
+{
+ int err;
+ struct prestera_rif_entry *e;
+ struct prestera_iface iface;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto err_kzalloc;
+
+ if (__prestera_rif_entry_key_copy(k, &e->key))
+ goto err_key_copy;
+
+ e->vr = prestera_vr_get(sw, tb_id, NULL);
+ if (IS_ERR(e->vr))
+ goto err_vr_get;
+
+ memcpy(&e->addr, addr, sizeof(e->addr));
+
+ /* HW */
+ memcpy(&iface, &e->key.iface, sizeof(iface));
+ iface.vr_id = e->vr->hw_vr_id;
+ err = prestera_hw_rif_create(sw, &iface, e->addr, &e->hw_id);
+ if (err)
+ goto err_hw_create;
+
+ list_add(&e->router_node, &sw->router->rif_entry_list);
+
+ return e;
+
+err_hw_create:
+ prestera_vr_put(sw, e->vr);
+err_vr_get:
+err_key_copy:
+ kfree(e);
+err_kzalloc:
+ return NULL;
+}
+
+static void __prestera_nh_neigh_destroy(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ rhashtable_remove_fast(&sw->router->nh_neigh_ht,
+ &neigh->ht_node,
+ __prestera_nh_neigh_ht_params);
+ kfree(neigh);
+}
+
+static struct prestera_nh_neigh *
+__prestera_nh_neigh_create(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *neigh;
+ int err;
+
+ neigh = kzalloc(sizeof(*neigh), GFP_KERNEL);
+ if (!neigh)
+ goto err_kzalloc;
+
+ memcpy(&neigh->key, key, sizeof(*key));
+ neigh->info.connected = false;
+ INIT_LIST_HEAD(&neigh->nexthop_group_list);
+ err = rhashtable_insert_fast(&sw->router->nh_neigh_ht,
+ &neigh->ht_node,
+ __prestera_nh_neigh_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return neigh;
+
+err_rhashtable_insert:
+ kfree(neigh);
+err_kzalloc:
+ return NULL;
+}
+
+struct prestera_nh_neigh *
+prestera_nh_neigh_find(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *nh_neigh;
+
+ nh_neigh = rhashtable_lookup_fast(&sw->router->nh_neigh_ht,
+ key, __prestera_nh_neigh_ht_params);
+ return nh_neigh;
+}
+
+struct prestera_nh_neigh *
+prestera_nh_neigh_get(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *neigh;
+
+ neigh = prestera_nh_neigh_find(sw, key);
+ if (!neigh)
+ return __prestera_nh_neigh_create(sw, key);
+
+ return neigh;
+}
+
+void prestera_nh_neigh_put(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ if (list_empty(&neigh->nexthop_group_list))
+ __prestera_nh_neigh_destroy(sw, neigh);
+}
+
+/* Updates new prestera_neigh_info */
+int prestera_nh_neigh_set(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ struct prestera_nh_neigh_head *nh_head;
+ struct prestera_nexthop_group *nh_grp;
+ int err;
+
+ list_for_each_entry(nh_head, &neigh->nexthop_group_list, head) {
+ nh_grp = nh_head->this;
+ err = prestera_nexthop_group_set(sw, nh_grp);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nh_neigh *nh_neigh)
+{
+ bool state;
+ struct prestera_nh_neigh_head *nh_head, *tmp;
+
+ state = false;
+ list_for_each_entry_safe(nh_head, tmp,
+ &nh_neigh->nexthop_group_list, head) {
+ state = prestera_nexthop_group_util_hw_state(sw, nh_head->this);
+ if (state)
+ goto out;
+ }
+
+out:
+ return state;
+}
+
+static struct prestera_nexthop_group *
+__prestera_nexthop_group_create(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+ struct prestera_nh_neigh *nh_neigh;
+ int nh_cnt, err, gid;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ goto err_kzalloc;
+
+ memcpy(&nh_grp->key, key, sizeof(*key));
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ if (!prestera_nh_neigh_key_is_valid(&nh_grp->key.neigh[nh_cnt]))
+ break;
+
+ nh_neigh = prestera_nh_neigh_get(sw,
+ &nh_grp->key.neigh[nh_cnt]);
+ if (!nh_neigh)
+ goto err_nh_neigh_get;
+
+ nh_grp->nh_neigh_head[nh_cnt].neigh = nh_neigh;
+ nh_grp->nh_neigh_head[nh_cnt].this = nh_grp;
+ list_add(&nh_grp->nh_neigh_head[nh_cnt].head,
+ &nh_neigh->nexthop_group_list);
+ }
+
+ err = prestera_hw_nh_group_create(sw, nh_cnt, &nh_grp->grp_id);
+ if (err)
+ goto err_nh_group_create;
+
+ err = prestera_nexthop_group_set(sw, nh_grp);
+ if (err)
+ goto err_nexthop_group_set;
+
+ err = rhashtable_insert_fast(&sw->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ __prestera_nexthop_group_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ /* reset cache for created group */
+ gid = nh_grp->grp_id;
+ sw->router->nhgrp_hw_state_cache[gid / 8] &= ~BIT(gid % 8);
+
+ return nh_grp;
+
+err_ht_insert:
+err_nexthop_group_set:
+ prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id);
+err_nh_group_create:
+err_nh_neigh_get:
+ for (nh_cnt--; nh_cnt >= 0; nh_cnt--) {
+ list_del(&nh_grp->nh_neigh_head[nh_cnt].head);
+ prestera_nh_neigh_put(sw, nh_grp->nh_neigh_head[nh_cnt].neigh);
+ }
+
+ kfree(nh_grp);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_nexthop_group_destroy(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ struct prestera_nh_neigh *nh_neigh;
+ int nh_cnt;
+
+ rhashtable_remove_fast(&sw->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ __prestera_nexthop_group_ht_params);
+
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ nh_neigh = nh_grp->nh_neigh_head[nh_cnt].neigh;
+ if (!nh_neigh)
+ break;
+
+ list_del(&nh_grp->nh_neigh_head[nh_cnt].head);
+ prestera_nh_neigh_put(sw, nh_neigh);
+ }
+
+ prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id);
+ kfree(nh_grp);
+}
+
+static struct prestera_nexthop_group *
+__prestera_nexthop_group_find(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+
+ nh_grp = rhashtable_lookup_fast(&sw->router->nexthop_group_ht,
+ key, __prestera_nexthop_group_ht_params);
+ return nh_grp;
+}
+
+static struct prestera_nexthop_group *
+prestera_nexthop_group_get(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+
+ nh_grp = __prestera_nexthop_group_find(sw, key);
+ if (nh_grp) {
+ refcount_inc(&nh_grp->refcount);
+ } else {
+ nh_grp = __prestera_nexthop_group_create(sw, key);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&nh_grp->refcount, 1);
+ }
+
+ return nh_grp;
+}
+
+static void prestera_nexthop_group_put(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ if (refcount_dec_and_test(&nh_grp->refcount))
+ __prestera_nexthop_group_destroy(sw, nh_grp);
+}
+
+/* Updates with new nh_neigh's info */
+static int prestera_nexthop_group_set(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ struct prestera_neigh_info info[PRESTERA_NHGR_SIZE_MAX];
+ struct prestera_nh_neigh *neigh;
+ int nh_cnt;
+
+ memset(&info[0], 0, sizeof(info));
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ neigh = nh_grp->nh_neigh_head[nh_cnt].neigh;
+ if (!neigh)
+ break;
+
+ memcpy(&info[nh_cnt], &neigh->info, sizeof(neigh->info));
+ }
+
+ return prestera_hw_nh_entries_set(sw, nh_cnt, &info[0], nh_grp->grp_id);
+}
+
+static bool
+prestera_nexthop_group_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ int err;
+ u32 buf_size = sw->size_tbl_router_nexthop / 8 + 1;
+ u32 gid = nh_grp->grp_id;
+ u8 *cache = sw->router->nhgrp_hw_state_cache;
+
+ /* Antijitter
+ * Prevent situation, when we read state of nh_grp twice in short time,
+ * and state bit is still cleared on second call. So just stuck active
+ * state for PRESTERA_NH_ACTIVE_JIFFER_FILTER, after last occurred.
+ */
+ if (!time_before(jiffies, sw->router->nhgrp_hw_cache_kick +
+ msecs_to_jiffies(PRESTERA_NH_ACTIVE_JIFFER_FILTER))) {
+ err = prestera_hw_nhgrp_blk_get(sw, cache, buf_size);
+ if (err) {
+ pr_err("Failed to get hw state nh_grp's");
+ return false;
+ }
+
+ sw->router->nhgrp_hw_cache_kick = jiffies;
+ }
+
+ if (cache[gid / 8] & BIT(gid % 8))
+ return true;
+
+ return false;
+}
+
+struct prestera_fib_node *
+prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = rhashtable_lookup_fast(&sw->router->fib_ht, key,
+ __prestera_fib_ht_params);
+ return fib_node;
+}
+
+static void __prestera_fib_node_destruct(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ struct prestera_vr *vr;
+
+ vr = fib_node->info.vr;
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4,
+ fib_node->key.prefix_len);
+ switch (fib_node->info.type) {
+ case PRESTERA_FIB_TYPE_UC_NH:
+ prestera_nexthop_group_put(sw, fib_node->info.nh_grp);
+ break;
+ case PRESTERA_FIB_TYPE_TRAP:
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ break;
+ default:
+ pr_err("Unknown fib_node->info.type = %d",
+ fib_node->info.type);
+ }
+
+ prestera_vr_put(sw, vr);
+}
+
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ __prestera_fib_node_destruct(sw, fib_node);
+ rhashtable_remove_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ kfree(fib_node);
+}
+
+static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_fib_node *node = ptr;
+ struct prestera_switch *sw = arg;
+
+ __prestera_fib_node_destruct(sw, node);
+ kfree(node);
+}
+
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type,
+ struct prestera_nexthop_group_key *nh_grp_key)
+{
+ struct prestera_fib_node *fib_node;
+ u32 grp_id;
+ struct prestera_vr *vr;
+ int err;
+
+ fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
+ if (!fib_node)
+ goto err_kzalloc;
+
+ memcpy(&fib_node->key, key, sizeof(*key));
+ fib_node->info.type = fib_type;
+
+ vr = prestera_vr_get(sw, key->tb_id, NULL);
+ if (IS_ERR(vr))
+ goto err_vr_get;
+
+ fib_node->info.vr = vr;
+
+ switch (fib_type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ grp_id = PRESTERA_NHGR_UNUSED;
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ grp_id = PRESTERA_NHGR_DROP;
+ break;
+ case PRESTERA_FIB_TYPE_UC_NH:
+ fib_node->info.nh_grp = prestera_nexthop_group_get(sw,
+ nh_grp_key);
+ if (IS_ERR(fib_node->info.nh_grp))
+ goto err_nh_grp_get;
+
+ grp_id = fib_node->info.nh_grp->grp_id;
+ break;
+ default:
+ pr_err("Unsupported fib_type %d", fib_type);
+ goto err_nh_grp_get;
+ }
+
+ err = prestera_hw_lpm_add(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len, grp_id);
+ if (err)
+ goto err_lpm_add;
+
+ err = rhashtable_insert_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return fib_node;
+
+err_ht_insert:
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len);
+err_lpm_add:
+ if (fib_type == PRESTERA_FIB_TYPE_UC_NH)
+ prestera_nexthop_group_put(sw, fib_node->info.nh_grp);
+err_nh_grp_get:
+ prestera_vr_put(sw, vr);
+err_vr_get:
+ kfree(fib_node);
+err_kzalloc:
+ return NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
new file mode 100644
index 000000000..9ca97919c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_ROUTER_HW_H_
+#define _PRESTERA_ROUTER_HW_H_
+
+struct prestera_vr {
+ struct list_head router_node;
+ refcount_t refcount;
+ u32 tb_id; /* key (kernel fib table id) */
+ u16 hw_vr_id; /* virtual router ID */
+ u8 __pad[2];
+};
+
+struct prestera_rif_entry {
+ struct prestera_rif_entry_key {
+ struct prestera_iface iface;
+ } key;
+ struct prestera_vr *vr;
+ unsigned char addr[ETH_ALEN];
+ u16 hw_id; /* rif_id */
+ struct list_head router_node; /* ht */
+};
+
+struct prestera_ip_addr {
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } u;
+ enum {
+ PRESTERA_IPV4 = 0,
+ PRESTERA_IPV6
+ } v;
+#define PRESTERA_IP_ADDR_PLEN(V) ((V) == PRESTERA_IPV4 ? 32 : \
+ /* (V) == PRESTERA_IPV6 ? */ 128 /* : 0 */)
+};
+
+struct prestera_nh_neigh_key {
+ struct prestera_ip_addr addr;
+ /* Seems like rif is obsolete, because there is iface in info ?
+ * Key can contain functional fields, or fields, which is used to
+ * filter duplicate objects on logical level (before you pass it to
+ * HW)... also key can be used to cover hardware restrictions.
+ * In our case rif - is logical interface (even can be VLAN), which
+ * is used in combination with IP address (which is also not related to
+ * hardware nexthop) to provide logical compression of created nexthops.
+ * You even can imagine, that rif+IPaddr is just cookie.
+ */
+ /* struct prestera_rif *rif; */
+ /* Use just as cookie, to divide ARP domains (in order with addr) */
+ void *rif;
+};
+
+/* Used for hw call */
+struct prestera_neigh_info {
+ struct prestera_iface iface;
+ unsigned char ha[ETH_ALEN];
+ u8 connected; /* bool. indicate, if mac/oif valid */
+ u8 __pad[1];
+};
+
+/* Used to notify nh about neigh change */
+struct prestera_nh_neigh {
+ struct prestera_nh_neigh_key key;
+ struct prestera_neigh_info info;
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct list_head nexthop_group_list;
+};
+
+#define PRESTERA_NHGR_SIZE_MAX 4
+
+struct prestera_nexthop_group {
+ struct prestera_nexthop_group_key {
+ struct prestera_nh_neigh_key neigh[PRESTERA_NHGR_SIZE_MAX];
+ } key;
+ /* Store intermediate object here.
+ * This prevent overhead kzalloc call.
+ */
+ /* nh_neigh is used only to notify nexthop_group */
+ struct prestera_nh_neigh_head {
+ struct prestera_nexthop_group *this;
+ struct list_head head;
+ /* ptr to neigh is not necessary.
+ * It used to prevent lookup of nh_neigh by key (n) on destroy
+ */
+ struct prestera_nh_neigh *neigh;
+ } nh_neigh_head[PRESTERA_NHGR_SIZE_MAX];
+ struct rhash_head ht_node; /* node of prestera_vr */
+ refcount_t refcount;
+ u32 grp_id; /* hw */
+};
+
+struct prestera_fib_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 tb_id;
+};
+
+struct prestera_fib_info {
+ struct prestera_vr *vr;
+ struct list_head vr_node;
+ enum prestera_fib_type {
+ PRESTERA_FIB_TYPE_INVALID = 0,
+ /* must be pointer to nh_grp id */
+ PRESTERA_FIB_TYPE_UC_NH,
+ /* It can be connected route
+ * and will be overlapped with neighbours
+ */
+ PRESTERA_FIB_TYPE_TRAP,
+ PRESTERA_FIB_TYPE_DROP
+ } type;
+ /* Valid only if type = UC_NH*/
+ struct prestera_nexthop_group *nh_grp;
+};
+
+struct prestera_fib_node {
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct prestera_fib_key key;
+ struct prestera_fib_info info; /* action related info */
+};
+
+struct prestera_rif_entry *
+prestera_rif_entry_find(const struct prestera_switch *sw,
+ const struct prestera_rif_entry_key *k);
+void prestera_rif_entry_destroy(struct prestera_switch *sw,
+ struct prestera_rif_entry *e);
+struct prestera_rif_entry *
+prestera_rif_entry_create(struct prestera_switch *sw,
+ struct prestera_rif_entry_key *k,
+ u32 tb_id, const unsigned char *addr);
+struct prestera_nh_neigh *
+prestera_nh_neigh_find(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key);
+struct prestera_nh_neigh *
+prestera_nh_neigh_get(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key);
+void prestera_nh_neigh_put(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh);
+int prestera_nh_neigh_set(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh);
+bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nh_neigh *nh_neigh);
+struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw,
+ struct prestera_fib_key *key);
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node);
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type,
+ struct prestera_nexthop_group_key *nh_grp_key);
+int prestera_router_hw_init(struct prestera_switch *sw);
+void prestera_router_hw_fini(struct prestera_switch *sw);
+
+#endif /* _PRESTERA_ROUTER_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
new file mode 100644
index 000000000..cc2a9ae79
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/dmapool.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/platform_device.h>
+
+#include "prestera_dsa.h"
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_rxtx.h"
+#include "prestera_devlink.h"
+
+#define PRESTERA_SDMA_WAIT_MUL 10
+
+struct prestera_sdma_desc {
+ __le32 word1;
+ __le32 word2;
+ __le32 buff;
+ __le32 next;
+} __packed __aligned(16);
+
+#define PRESTERA_SDMA_BUFF_SIZE_MAX 1544
+
+#define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
+ ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
+
+#define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
+ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
+
+#define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
+ (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
+
+#define PRESTERA_SDMA_RX_DESC_CPU_OWN 0
+#define PRESTERA_SDMA_RX_DESC_DMA_OWN 1
+
+#define PRESTERA_SDMA_RX_QUEUE_NUM 8
+
+#define PRESTERA_SDMA_RX_DESC_PER_Q 1000
+
+#define PRESTERA_SDMA_TX_DESC_PER_Q 1000
+#define PRESTERA_SDMA_TX_MAX_BURST 64
+
+#define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
+ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
+
+#define PRESTERA_SDMA_TX_DESC_CPU_OWN 0
+#define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U
+
+#define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
+ (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
+
+#define PRESTERA_SDMA_TX_DESC_LAST BIT(20)
+#define PRESTERA_SDMA_TX_DESC_FIRST BIT(21)
+#define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12)
+
+#define PRESTERA_SDMA_TX_DESC_SINGLE \
+ (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
+
+#define PRESTERA_SDMA_TX_DESC_INIT \
+ (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
+
+#define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814
+#define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680
+#define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16)
+
+#define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0
+#define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868
+
+struct prestera_sdma_buf {
+ struct prestera_sdma_desc *desc;
+ dma_addr_t desc_dma;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ bool is_used;
+};
+
+struct prestera_rx_ring {
+ struct prestera_sdma_buf *bufs;
+ int next_rx;
+};
+
+struct prestera_tx_ring {
+ struct prestera_sdma_buf *bufs;
+ int next_tx;
+ int max_burst;
+ int burst;
+};
+
+struct prestera_sdma {
+ struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
+ struct prestera_tx_ring tx_ring;
+ struct prestera_switch *sw;
+ struct dma_pool *desc_pool;
+ struct work_struct tx_work;
+ struct napi_struct rx_napi;
+ struct net_device napi_dev;
+ u32 map_addr;
+ u64 dma_mask;
+ /* protect SDMA with concurrent access from multiple CPUs */
+ spinlock_t tx_lock;
+};
+
+struct prestera_rxtx {
+ struct prestera_sdma sdma;
+};
+
+static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct prestera_sdma_desc *desc;
+ dma_addr_t dma;
+
+ desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
+ if (!desc)
+ return -ENOMEM;
+
+ buf->buf_dma = DMA_MAPPING_ERROR;
+ buf->desc_dma = dma;
+ buf->desc = desc;
+ buf->skb = NULL;
+
+ return 0;
+}
+
+static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
+{
+ return sdma->map_addr + pa;
+}
+
+static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t buf)
+{
+ u32 word = le32_to_cpu(desc->word2);
+
+ u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
+ desc->word2 = cpu_to_le32(word);
+
+ desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
+
+ /* make sure buffer is set before reset the descriptor */
+ wmb();
+
+ desc->word1 = cpu_to_le32(0xA0000000);
+}
+
+static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t next)
+{
+ desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
+}
+
+static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct device *dev = sdma->sw->dev->dev;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+
+ skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto err_dma_map;
+
+ if (buf->skb)
+ dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
+ DMA_FROM_DEVICE);
+
+ buf->buf_dma = dma;
+ buf->skb = skb;
+
+ return 0;
+
+err_dma_map:
+ kfree_skb(skb);
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ dma_addr_t buf_dma = buf->buf_dma;
+ struct sk_buff *skb = buf->skb;
+ u32 len = skb->len;
+ int err;
+
+ err = prestera_sdma_rx_skb_alloc(sdma, buf);
+ if (err) {
+ buf->buf_dma = buf_dma;
+ buf->skb = skb;
+
+ skb = alloc_skb(skb->len, GFP_ATOMIC);
+ if (skb) {
+ skb_put(skb, len);
+ skb_copy_from_linear_data(buf->skb, skb->data, len);
+ }
+ }
+
+ prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
+
+ return skb;
+}
+
+static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
+ struct sk_buff *skb)
+{
+ struct prestera_port *port;
+ struct prestera_dsa dsa;
+ u32 hw_port, dev_id;
+ u8 cpu_code;
+ int err;
+
+ skb_pull(skb, ETH_HLEN);
+
+ /* ethertype field is part of the dsa header */
+ err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
+ if (err)
+ return err;
+
+ dev_id = dsa.hw_dev_num;
+ hw_port = dsa.port_num;
+
+ port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
+ if (unlikely(!port)) {
+ dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
+ dev_id, hw_port);
+ return -ENOENT;
+ }
+
+ if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
+ return -EINVAL;
+
+ /* remove DSA tag and update checksum */
+ skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
+
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
+ ETH_ALEN * 2);
+
+ skb_push(skb, ETH_HLEN);
+
+ skb->protocol = eth_type_trans(skb, port->dev);
+
+ if (dsa.vlan.is_tagged) {
+ u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
+
+ tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
+ if (dsa.vlan.cfi_bit)
+ tci |= VLAN_CFI_MASK;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
+ }
+
+ cpu_code = dsa.cpu_code;
+ prestera_devlink_trap_report(port, skb, cpu_code);
+
+ return 0;
+}
+
+static int prestera_sdma_next_rx_buf_idx(int buf_idx)
+{
+ return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
+}
+
+static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
+{
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ unsigned int rxq_done_map = 0;
+ struct prestera_sdma *sdma;
+ struct list_head rx_list;
+ unsigned int qmask;
+ int pkts_done = 0;
+ int q;
+
+ qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ qmask = GENMASK(qnum - 1, 0);
+
+ INIT_LIST_HEAD(&rx_list);
+
+ sdma = container_of(napi, struct prestera_sdma, rx_napi);
+
+ while (pkts_done < budget && rxq_done_map != qmask) {
+ for (q = 0; q < qnum && pkts_done < budget; q++) {
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+ struct prestera_sdma_desc *desc;
+ struct prestera_sdma_buf *buf;
+ int buf_idx = ring->next_rx;
+ struct sk_buff *skb;
+
+ buf = &ring->bufs[buf_idx];
+ desc = buf->desc;
+
+ if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
+ rxq_done_map &= ~BIT(q);
+ } else {
+ rxq_done_map |= BIT(q);
+ continue;
+ }
+
+ pkts_done++;
+
+ __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
+
+ skb = prestera_sdma_rx_skb_get(sdma, buf);
+ if (!skb)
+ goto rx_next_buf;
+
+ if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
+ goto rx_next_buf;
+
+ list_add_tail(&skb->list, &rx_list);
+rx_next_buf:
+ ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
+ }
+ }
+
+ if (pkts_done < budget && napi_complete_done(napi, pkts_done))
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
+ GENMASK(9, 2));
+
+ netif_receive_skb_list(&rx_list);
+
+ return pkts_done;
+}
+
+static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
+{
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ int q, b;
+
+ /* disable all rx queues */
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(15, 8));
+
+ for (q = 0; q < qnum; q++) {
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+
+ if (!ring->bufs)
+ break;
+
+ for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
+ struct prestera_sdma_buf *buf = &ring->bufs[b];
+
+ if (buf->desc_dma)
+ dma_pool_free(sdma->desc_pool, buf->desc,
+ buf->desc_dma);
+
+ if (!buf->skb)
+ continue;
+
+ if (buf->buf_dma != DMA_MAPPING_ERROR)
+ dma_unmap_single(sdma->sw->dev->dev,
+ buf->buf_dma, buf->skb->len,
+ DMA_FROM_DEVICE);
+ kfree_skb(buf->skb);
+ }
+ }
+}
+
+static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
+{
+ int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ int err;
+ int q;
+
+ /* disable all rx queues */
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(15, 8));
+
+ for (q = 0; q < qnum; q++) {
+ struct prestera_sdma_buf *head, *tail, *next, *prev;
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+
+ ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
+ if (!ring->bufs)
+ return -ENOMEM;
+
+ ring->next_rx = 0;
+
+ tail = &ring->bufs[bnum - 1];
+ head = &ring->bufs[0];
+ next = head;
+ prev = next;
+
+ do {
+ err = prestera_sdma_buf_init(sdma, next);
+ if (err)
+ return err;
+
+ err = prestera_sdma_rx_skb_alloc(sdma, next);
+ if (err)
+ return err;
+
+ prestera_sdma_rx_desc_init(sdma, next->desc,
+ next->buf_dma);
+
+ prestera_sdma_rx_desc_set_next(sdma, prev->desc,
+ next->desc_dma);
+
+ prev = next;
+ next++;
+ } while (prev != tail);
+
+ /* join tail with head to make a circular list */
+ prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
+ prestera_sdma_map(sdma, head->desc_dma));
+ }
+
+ /* make sure all rx descs are filled before enabling all rx queues */
+ wmb();
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(7, 0));
+
+ return 0;
+}
+
+static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc)
+{
+ desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
+ desc->word2 = 0;
+}
+
+static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t next)
+{
+ desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
+}
+
+static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t buf, size_t len)
+{
+ u32 word = le32_to_cpu(desc->word2);
+
+ u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
+
+ desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
+ desc->word2 = cpu_to_le32(word);
+}
+
+static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
+{
+ u32 word = le32_to_cpu(desc->word1);
+
+ word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
+
+ /* make sure everything is written before enable xmit */
+ wmb();
+
+ desc->word1 = cpu_to_le32(word);
+}
+
+static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+ dma_addr_t dma;
+
+ dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, dma))
+ return -ENOMEM;
+
+ buf->buf_dma = dma;
+ buf->skb = skb;
+
+ return 0;
+}
+
+static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+
+ dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
+}
+
+static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
+{
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ struct prestera_tx_ring *tx_ring;
+ struct prestera_sdma *sdma;
+ int b;
+
+ sdma = container_of(work, struct prestera_sdma, tx_work);
+
+ tx_ring = &sdma->tx_ring;
+
+ for (b = 0; b < bnum; b++) {
+ struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
+
+ if (!buf->is_used)
+ continue;
+
+ if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
+ continue;
+
+ prestera_sdma_tx_buf_unmap(sdma, buf);
+ dev_consume_skb_any(buf->skb);
+ buf->skb = NULL;
+
+ /* make sure everything is cleaned up */
+ wmb();
+
+ buf->is_used = false;
+ }
+}
+
+static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
+{
+ struct prestera_sdma_buf *head, *tail, *next, *prev;
+ struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ int err;
+
+ INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
+ spin_lock_init(&sdma->tx_lock);
+
+ tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
+ if (!tx_ring->bufs)
+ return -ENOMEM;
+
+ tail = &tx_ring->bufs[bnum - 1];
+ head = &tx_ring->bufs[0];
+ next = head;
+ prev = next;
+
+ tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
+ tx_ring->burst = tx_ring->max_burst;
+ tx_ring->next_tx = 0;
+
+ do {
+ err = prestera_sdma_buf_init(sdma, next);
+ if (err)
+ return err;
+
+ next->is_used = false;
+
+ prestera_sdma_tx_desc_init(sdma, next->desc);
+
+ prestera_sdma_tx_desc_set_next(sdma, prev->desc,
+ next->desc_dma);
+
+ prev = next;
+ next++;
+ } while (prev != tail);
+
+ /* join tail with head to make a circular list */
+ prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
+
+ /* make sure descriptors are written */
+ wmb();
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
+ prestera_sdma_map(sdma, head->desc_dma));
+
+ return 0;
+}
+
+static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
+{
+ struct prestera_tx_ring *ring = &sdma->tx_ring;
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ int b;
+
+ cancel_work_sync(&sdma->tx_work);
+
+ if (!ring->bufs)
+ return;
+
+ for (b = 0; b < bnum; b++) {
+ struct prestera_sdma_buf *buf = &ring->bufs[b];
+
+ if (buf->desc)
+ dma_pool_free(sdma->desc_pool, buf->desc,
+ buf->desc_dma);
+
+ if (!buf->skb)
+ continue;
+
+ dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
+ buf->skb->len, DMA_TO_DEVICE);
+
+ dev_consume_skb_any(buf->skb);
+ }
+}
+
+static void prestera_rxtx_handle_event(struct prestera_switch *sw,
+ struct prestera_event *evt,
+ void *arg)
+{
+ struct prestera_sdma *sdma = arg;
+
+ if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
+ return;
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
+ napi_schedule(&sdma->rx_napi);
+}
+
+static int prestera_sdma_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_sdma *sdma = &sw->rxtx->sdma;
+ struct device *dev = sw->dev->dev;
+ struct prestera_rxtx_params p;
+ int err;
+
+ p.use_sdma = true;
+
+ err = prestera_hw_rxtx_init(sw, &p);
+ if (err) {
+ dev_err(dev, "failed to init rxtx by hw\n");
+ return err;
+ }
+
+ sdma->dma_mask = dma_get_mask(dev);
+ sdma->map_addr = p.map_addr;
+ sdma->sw = sw;
+
+ sdma->desc_pool = dma_pool_create("desc_pool", dev,
+ sizeof(struct prestera_sdma_desc),
+ 16, 0);
+ if (!sdma->desc_pool)
+ return -ENOMEM;
+
+ err = prestera_sdma_rx_init(sdma);
+ if (err) {
+ dev_err(dev, "failed to init rx ring\n");
+ goto err_rx_init;
+ }
+
+ err = prestera_sdma_tx_init(sdma);
+ if (err) {
+ dev_err(dev, "failed to init tx ring\n");
+ goto err_tx_init;
+ }
+
+ err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event,
+ sdma);
+ if (err)
+ goto err_evt_register;
+
+ init_dummy_netdev(&sdma->napi_dev);
+
+ netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
+ napi_enable(&sdma->rx_napi);
+
+ return 0;
+
+err_evt_register:
+err_tx_init:
+ prestera_sdma_tx_fini(sdma);
+err_rx_init:
+ prestera_sdma_rx_fini(sdma);
+
+ dma_pool_destroy(sdma->desc_pool);
+ return err;
+}
+
+static void prestera_sdma_switch_fini(struct prestera_switch *sw)
+{
+ struct prestera_sdma *sdma = &sw->rxtx->sdma;
+
+ napi_disable(&sdma->rx_napi);
+ netif_napi_del(&sdma->rx_napi);
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event);
+ prestera_sdma_tx_fini(sdma);
+ prestera_sdma_rx_fini(sdma);
+ dma_pool_destroy(sdma->desc_pool);
+}
+
+static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
+{
+ return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
+}
+
+static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
+ struct prestera_tx_ring *tx_ring)
+{
+ int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
+
+ do {
+ if (prestera_sdma_is_ready(sdma))
+ return 0;
+
+ udelay(1);
+ } while (--tx_wait_num);
+
+ return -EBUSY;
+}
+
+static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
+{
+ prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
+ schedule_work(&sdma->tx_work);
+}
+
+static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+ struct net_device *dev = skb->dev;
+ struct prestera_tx_ring *tx_ring;
+ struct prestera_sdma_buf *buf;
+ int err;
+
+ spin_lock(&sdma->tx_lock);
+
+ tx_ring = &sdma->tx_ring;
+
+ buf = &tx_ring->bufs[tx_ring->next_tx];
+ if (buf->is_used) {
+ schedule_work(&sdma->tx_work);
+ goto drop_skb;
+ }
+
+ if (unlikely(eth_skb_pad(skb)))
+ goto drop_skb_nofree;
+
+ err = prestera_sdma_tx_buf_map(sdma, buf, skb);
+ if (err)
+ goto drop_skb;
+
+ prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
+
+ dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
+ DMA_TO_DEVICE);
+
+ if (tx_ring->burst) {
+ tx_ring->burst--;
+ } else {
+ tx_ring->burst = tx_ring->max_burst;
+
+ err = prestera_sdma_tx_wait(sdma, tx_ring);
+ if (err)
+ goto drop_skb_unmap;
+ }
+
+ tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
+ prestera_sdma_tx_desc_xmit(buf->desc);
+ buf->is_used = true;
+
+ prestera_sdma_tx_start(sdma);
+
+ goto tx_done;
+
+drop_skb_unmap:
+ prestera_sdma_tx_buf_unmap(sdma, buf);
+drop_skb:
+ dev_consume_skb_any(skb);
+drop_skb_nofree:
+ dev->stats.tx_dropped++;
+tx_done:
+ spin_unlock(&sdma->tx_lock);
+ return NETDEV_TX_OK;
+}
+
+int prestera_rxtx_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_rxtx *rxtx;
+ int err;
+
+ rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
+ if (!rxtx)
+ return -ENOMEM;
+
+ sw->rxtx = rxtx;
+
+ err = prestera_sdma_switch_init(sw);
+ if (err)
+ kfree(rxtx);
+
+ return err;
+}
+
+void prestera_rxtx_switch_fini(struct prestera_switch *sw)
+{
+ prestera_sdma_switch_fini(sw);
+ kfree(sw->rxtx);
+}
+
+int prestera_rxtx_port_init(struct prestera_port *port)
+{
+ port->dev->needed_headroom = PRESTERA_DSA_HLEN;
+ return 0;
+}
+
+netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
+{
+ struct prestera_dsa dsa;
+
+ dsa.hw_dev_num = port->dev_id;
+ dsa.port_num = port->hw_id;
+
+ if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
+ return NET_XMIT_DROP;
+
+ skb_push(skb, PRESTERA_DSA_HLEN);
+ memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
+
+ if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
+ return NET_XMIT_DROP;
+
+ return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h
new file mode 100644
index 000000000..882a1225c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_RXTX_H_
+#define _PRESTERA_RXTX_H_
+
+#include <linux/netdevice.h>
+
+struct prestera_switch;
+struct prestera_port;
+
+int prestera_rxtx_switch_init(struct prestera_switch *sw);
+void prestera_rxtx_switch_fini(struct prestera_switch *sw);
+
+int prestera_rxtx_port_init(struct prestera_port *port);
+
+netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb);
+
+#endif /* _PRESTERA_RXTX_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
new file mode 100644
index 000000000..1005182ce
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_flow.h"
+#include "prestera_span.h"
+
+struct prestera_span_entry {
+ struct list_head list;
+ struct prestera_port *port;
+ refcount_t ref_count;
+ u8 id;
+};
+
+struct prestera_span {
+ struct prestera_switch *sw;
+ struct list_head entries;
+};
+
+static struct prestera_span_entry *
+prestera_span_entry_create(struct prestera_port *port, u8 span_id)
+{
+ struct prestera_span_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&entry->ref_count, 1);
+ entry->port = port;
+ entry->id = span_id;
+ list_add_tail(&entry->list, &port->sw->span->entries);
+
+ return entry;
+}
+
+static void prestera_span_entry_del(struct prestera_span_entry *entry)
+{
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+static struct prestera_span_entry *
+prestera_span_entry_find_by_id(struct prestera_span *span, u8 span_id)
+{
+ struct prestera_span_entry *entry;
+
+ list_for_each_entry(entry, &span->entries, list) {
+ if (entry->id == span_id)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static struct prestera_span_entry *
+prestera_span_entry_find_by_port(struct prestera_span *span,
+ struct prestera_port *port)
+{
+ struct prestera_span_entry *entry;
+
+ list_for_each_entry(entry, &span->entries, list) {
+ if (entry->port == port)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static int prestera_span_get(struct prestera_port *port, u8 *span_id)
+{
+ u8 new_span_id;
+ struct prestera_switch *sw = port->sw;
+ struct prestera_span_entry *entry;
+ int err;
+
+ entry = prestera_span_entry_find_by_port(sw->span, port);
+ if (entry) {
+ refcount_inc(&entry->ref_count);
+ *span_id = entry->id;
+ return 0;
+ }
+
+ err = prestera_hw_span_get(port, &new_span_id);
+ if (err)
+ return err;
+
+ entry = prestera_span_entry_create(port, new_span_id);
+ if (IS_ERR(entry)) {
+ prestera_hw_span_release(sw, new_span_id);
+ return PTR_ERR(entry);
+ }
+
+ *span_id = new_span_id;
+ return 0;
+}
+
+static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
+{
+ struct prestera_span_entry *entry;
+ int err;
+
+ entry = prestera_span_entry_find_by_id(sw->span, span_id);
+ if (!entry)
+ return -ENOENT;
+
+ if (!refcount_dec_and_test(&entry->ref_count))
+ return 0;
+
+ err = prestera_hw_span_release(sw, span_id);
+ if (err)
+ return err;
+
+ prestera_span_entry_del(entry);
+ return 0;
+}
+
+int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port,
+ bool ingress)
+{
+ struct prestera_switch *sw = binding->port->sw;
+ u8 span_id;
+ int err;
+
+ if (binding->span_id != PRESTERA_SPAN_INVALID_ID)
+ /* port already in mirroring */
+ return -EEXIST;
+
+ err = prestera_span_get(to_port, &span_id);
+ if (err)
+ return err;
+
+ err = prestera_hw_span_bind(binding->port, span_id, ingress);
+ if (err) {
+ prestera_span_put(sw, span_id);
+ return err;
+ }
+
+ binding->span_id = span_id;
+ return 0;
+}
+
+int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
+ bool ingress)
+{
+ int err;
+
+ if (binding->span_id == PRESTERA_SPAN_INVALID_ID)
+ return -ENOENT;
+
+ err = prestera_hw_span_unbind(binding->port, ingress);
+ if (err)
+ return err;
+
+ err = prestera_span_put(binding->port->sw, binding->span_id);
+ if (err)
+ return err;
+
+ binding->span_id = PRESTERA_SPAN_INVALID_ID;
+ return 0;
+}
+
+int prestera_span_init(struct prestera_switch *sw)
+{
+ struct prestera_span *span;
+
+ span = kzalloc(sizeof(*span), GFP_KERNEL);
+ if (!span)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&span->entries);
+
+ sw->span = span;
+ span->sw = sw;
+
+ return 0;
+}
+
+void prestera_span_fini(struct prestera_switch *sw)
+{
+ struct prestera_span *span = sw->span;
+
+ WARN_ON(!list_empty(&span->entries));
+ kfree(span);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.h b/drivers/net/ethernet/marvell/prestera/prestera_span.h
new file mode 100644
index 000000000..493b68524
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_SPAN_H_
+#define _PRESTERA_SPAN_H_
+
+#include <net/pkt_cls.h>
+
+#define PRESTERA_SPAN_INVALID_ID -1
+
+struct prestera_port;
+struct prestera_switch;
+struct prestera_flow_block_binding;
+
+int prestera_span_init(struct prestera_switch *sw);
+void prestera_span_fini(struct prestera_switch *sw);
+
+int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port,
+ bool ingress);
+int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
+ bool ingress);
+
+#endif /* _PRESTERA_SPAN_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
new file mode 100644
index 000000000..e548cd325
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -0,0 +1,1918 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/switchdev.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_switchdev.h"
+
+#define PRESTERA_VID_ALL (0xffff)
+
+#define PRESTERA_DEFAULT_AGEING_TIME_MS 300000
+#define PRESTERA_MAX_AGEING_TIME_MS 1000000000
+#define PRESTERA_MIN_AGEING_TIME_MS 32000
+
+struct prestera_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+struct prestera_switchdev {
+ struct prestera_switch *sw;
+ struct list_head bridge_list;
+ bool bridge_8021q_exists;
+ struct notifier_block swdev_nb_blk;
+ struct notifier_block swdev_nb;
+};
+
+struct prestera_bridge {
+ struct list_head head;
+ struct net_device *dev;
+ struct prestera_switchdev *swdev;
+ struct list_head port_list;
+ struct list_head br_mdb_entry_list;
+ bool mrouter_exist;
+ bool vlan_enabled;
+ bool multicast_enabled;
+ u16 bridge_id;
+};
+
+struct prestera_bridge_port {
+ struct list_head head;
+ struct net_device *dev;
+ struct prestera_bridge *bridge;
+ struct list_head vlan_list;
+ struct list_head br_mdb_port_list;
+ refcount_t ref_count;
+ unsigned long flags;
+ bool mrouter;
+ u8 stp_state;
+};
+
+struct prestera_bridge_vlan {
+ struct list_head head;
+ struct list_head port_vlan_list;
+ u16 vid;
+};
+
+struct prestera_port_vlan {
+ struct list_head br_vlan_head;
+ struct list_head port_head;
+ struct prestera_port *port;
+ struct prestera_bridge_port *br_port;
+ u16 vid;
+};
+
+struct prestera_br_mdb_port {
+ struct prestera_bridge_port *br_port;
+ struct list_head br_mdb_port_node;
+};
+
+/* Software representation of MDB table. */
+struct prestera_br_mdb_entry {
+ struct prestera_bridge *bridge;
+ struct prestera_mdb_entry *mdb;
+ struct list_head br_mdb_port_list;
+ struct list_head br_mdb_entry_node;
+ bool enabled;
+};
+
+static struct workqueue_struct *swdev_wq;
+
+static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
+
+static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
+ u8 state);
+
+static struct prestera_bridge *
+prestera_bridge_find(const struct prestera_switch *sw,
+ const struct net_device *br_dev)
+{
+ struct prestera_bridge *bridge;
+
+ list_for_each_entry(bridge, &sw->swdev->bridge_list, head)
+ if (bridge->dev == br_dev)
+ return bridge;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+__prestera_bridge_port_find(const struct prestera_bridge *bridge,
+ const struct net_device *brport_dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &bridge->port_list, head)
+ if (br_port->dev == brport_dev)
+ return br_port;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_find(struct prestera_switch *sw,
+ struct net_device *brport_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct prestera_bridge *bridge;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge = prestera_bridge_find(sw, br_dev);
+ if (!bridge)
+ return NULL;
+
+ return __prestera_bridge_port_find(bridge, brport_dev);
+}
+
+static void
+prestera_br_port_flags_reset(struct prestera_bridge_port *br_port,
+ struct prestera_port *port)
+{
+ prestera_port_uc_flood_set(port, false);
+ prestera_port_mc_flood_set(port, false);
+ prestera_port_learning_set(port, false);
+ prestera_port_br_locked_set(port, false);
+}
+
+static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port,
+ struct prestera_port *port)
+{
+ int err;
+
+ err = prestera_port_uc_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_mc_flood_set(port, br_port->flags & BR_MCAST_FLOOD);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_br_locked_set(port,
+ br_port->flags & BR_PORT_LOCKED);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ prestera_br_port_flags_reset(br_port, port);
+ return err;
+}
+
+static struct prestera_bridge_vlan *
+prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid)
+{
+ struct prestera_bridge_vlan *br_vlan;
+
+ br_vlan = kzalloc(sizeof(*br_vlan), GFP_KERNEL);
+ if (!br_vlan)
+ return NULL;
+
+ INIT_LIST_HEAD(&br_vlan->port_vlan_list);
+ br_vlan->vid = vid;
+ list_add(&br_vlan->head, &br_port->vlan_list);
+
+ return br_vlan;
+}
+
+static void prestera_bridge_vlan_destroy(struct prestera_bridge_vlan *br_vlan)
+{
+ list_del(&br_vlan->head);
+ WARN_ON(!list_empty(&br_vlan->port_vlan_list));
+ kfree(br_vlan);
+}
+
+static struct prestera_bridge_vlan *
+prestera_bridge_vlan_by_vid(struct prestera_bridge_port *br_port, u16 vid)
+{
+ struct prestera_bridge_vlan *br_vlan;
+
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ if (br_vlan->vid == vid)
+ return br_vlan;
+ }
+
+ return NULL;
+}
+
+static int prestera_bridge_vlan_port_count(struct prestera_bridge *bridge,
+ u16 vid)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge_vlan *br_vlan;
+ int count = 0;
+
+ list_for_each_entry(br_port, &bridge->port_list, head) {
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ if (br_vlan->vid == vid) {
+ count += 1;
+ break;
+ }
+ }
+ }
+
+ return count;
+}
+
+static void prestera_bridge_vlan_put(struct prestera_bridge_vlan *br_vlan)
+{
+ if (list_empty(&br_vlan->port_vlan_list))
+ prestera_bridge_vlan_destroy(br_vlan);
+}
+
+static struct prestera_port_vlan *
+prestera_port_vlan_by_vid(struct prestera_port *port, u16 vid)
+{
+ struct prestera_port_vlan *port_vlan;
+
+ list_for_each_entry(port_vlan, &port->vlans_list, port_head) {
+ if (port_vlan->vid == vid)
+ return port_vlan;
+ }
+
+ return NULL;
+}
+
+static struct prestera_port_vlan *
+prestera_port_vlan_create(struct prestera_port *port, u16 vid, bool untagged)
+{
+ struct prestera_port_vlan *port_vlan;
+ int err;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (port_vlan)
+ return ERR_PTR(-EEXIST);
+
+ err = prestera_hw_vlan_port_set(port, vid, true, untagged);
+ if (err)
+ return ERR_PTR(err);
+
+ port_vlan = kzalloc(sizeof(*port_vlan), GFP_KERNEL);
+ if (!port_vlan) {
+ err = -ENOMEM;
+ goto err_port_vlan_alloc;
+ }
+
+ port_vlan->port = port;
+ port_vlan->vid = vid;
+
+ list_add(&port_vlan->port_head, &port->vlans_list);
+
+ return port_vlan;
+
+err_port_vlan_alloc:
+ prestera_hw_vlan_port_set(port, vid, false, false);
+ return ERR_PTR(err);
+}
+
+static int prestera_fdb_add(struct prestera_port *port,
+ const unsigned char *mac, u16 vid, bool dynamic)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_lag_fdb_add(port->sw, prestera_port_lag_id(port),
+ mac, vid, dynamic);
+
+ return prestera_hw_fdb_add(port, mac, vid, dynamic);
+}
+
+static int prestera_fdb_del(struct prestera_port *port,
+ const unsigned char *mac, u16 vid)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_lag_fdb_del(port->sw, prestera_port_lag_id(port),
+ mac, vid);
+ else
+ return prestera_hw_fdb_del(port, mac, vid);
+}
+
+static int prestera_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_fdb_flush_lag_vlan(port->sw, prestera_port_lag_id(port),
+ vid, mode);
+ else
+ return prestera_hw_fdb_flush_port_vlan(port, vid, mode);
+}
+
+static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_fdb_flush_lag(port->sw, prestera_port_lag_id(port),
+ mode);
+ else
+ return prestera_hw_fdb_flush_port(port, mode);
+}
+
+static void
+prestera_mdb_port_del(struct prestera_mdb_entry *mdb,
+ struct net_device *orig_dev)
+{
+ struct prestera_flood_domain *fl_domain = mdb->flood_domain;
+ struct prestera_flood_domain_port *flood_domain_port;
+
+ flood_domain_port = prestera_flood_domain_port_find(fl_domain,
+ orig_dev,
+ mdb->vid);
+ if (flood_domain_port)
+ prestera_flood_domain_port_destroy(flood_domain_port);
+}
+
+static void
+prestera_br_mdb_entry_put(struct prestera_br_mdb_entry *br_mdb)
+{
+ struct prestera_bridge_port *br_port;
+
+ if (list_empty(&br_mdb->br_mdb_port_list)) {
+ list_for_each_entry(br_port, &br_mdb->bridge->port_list, head)
+ prestera_mdb_port_del(br_mdb->mdb, br_port->dev);
+
+ prestera_mdb_entry_destroy(br_mdb->mdb);
+ list_del(&br_mdb->br_mdb_entry_node);
+ kfree(br_mdb);
+ }
+}
+
+static void
+prestera_br_mdb_port_del(struct prestera_br_mdb_entry *br_mdb,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port, *tmp;
+
+ list_for_each_entry_safe(br_mdb_port, tmp, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ if (br_mdb_port->br_port == br_port) {
+ list_del(&br_mdb_port->br_mdb_port_node);
+ kfree(br_mdb_port);
+ }
+ }
+}
+
+static void
+prestera_mdb_flush_bridge_port(struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port, *tmp_port;
+ struct prestera_br_mdb_entry *br_mdb, *br_mdb_tmp;
+ struct prestera_bridge *br_dev = br_port->bridge;
+
+ list_for_each_entry_safe(br_mdb, br_mdb_tmp, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ list_for_each_entry_safe(br_mdb_port, tmp_port,
+ &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ prestera_mdb_port_del(br_mdb->mdb,
+ br_mdb_port->br_port->dev);
+ prestera_br_mdb_port_del(br_mdb, br_mdb_port->br_port);
+ }
+ prestera_br_mdb_entry_put(br_mdb);
+ }
+}
+
+static void
+prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
+{
+ u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC;
+ struct prestera_port *port = port_vlan->port;
+ struct prestera_bridge_vlan *br_vlan;
+ struct prestera_bridge_port *br_port;
+ bool last_port, last_vlan;
+ u16 vid = port_vlan->vid;
+ int port_count;
+
+ br_port = port_vlan->br_port;
+ port_count = prestera_bridge_vlan_port_count(br_port->bridge, vid);
+ br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
+
+ last_vlan = list_is_singular(&br_port->vlan_list);
+ last_port = port_count == 1;
+
+ if (last_vlan)
+ prestera_fdb_flush_port(port, fdb_flush_mode);
+ else if (last_port)
+ prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode);
+ else
+ prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+
+ prestera_mdb_flush_bridge_port(br_port);
+
+ list_del(&port_vlan->br_vlan_head);
+ prestera_bridge_vlan_put(br_vlan);
+ prestera_bridge_port_put(br_port);
+ port_vlan->br_port = NULL;
+}
+
+static void prestera_port_vlan_destroy(struct prestera_port_vlan *port_vlan)
+{
+ struct prestera_port *port = port_vlan->port;
+ u16 vid = port_vlan->vid;
+
+ if (port_vlan->br_port)
+ prestera_port_vlan_bridge_leave(port_vlan);
+
+ prestera_hw_vlan_port_set(port, vid, false, false);
+ list_del(&port_vlan->port_head);
+ kfree(port_vlan);
+}
+
+static struct prestera_bridge *
+prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev)
+{
+ bool vlan_enabled = br_vlan_enabled(dev);
+ struct prestera_bridge *bridge;
+ u16 bridge_id;
+ int err;
+
+ if (vlan_enabled && swdev->bridge_8021q_exists) {
+ netdev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ if (vlan_enabled) {
+ swdev->bridge_8021q_exists = true;
+ } else {
+ err = prestera_hw_bridge_create(swdev->sw, &bridge_id);
+ if (err) {
+ kfree(bridge);
+ return ERR_PTR(err);
+ }
+
+ bridge->bridge_id = bridge_id;
+ }
+
+ bridge->vlan_enabled = vlan_enabled;
+ bridge->swdev = swdev;
+ bridge->dev = dev;
+ bridge->multicast_enabled = br_multicast_enabled(dev);
+
+ INIT_LIST_HEAD(&bridge->port_list);
+ INIT_LIST_HEAD(&bridge->br_mdb_entry_list);
+
+ list_add(&bridge->head, &swdev->bridge_list);
+
+ return bridge;
+}
+
+static void prestera_bridge_destroy(struct prestera_bridge *bridge)
+{
+ struct prestera_switchdev *swdev = bridge->swdev;
+
+ list_del(&bridge->head);
+
+ if (bridge->vlan_enabled)
+ swdev->bridge_8021q_exists = false;
+ else
+ prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id);
+
+ WARN_ON(!list_empty(&bridge->br_mdb_entry_list));
+ WARN_ON(!list_empty(&bridge->port_list));
+ kfree(bridge);
+}
+
+static void prestera_bridge_put(struct prestera_bridge *bridge)
+{
+ if (list_empty(&bridge->port_list))
+ prestera_bridge_destroy(bridge);
+}
+
+static
+struct prestera_bridge *prestera_bridge_by_dev(struct prestera_switchdev *swdev,
+ const struct net_device *dev)
+{
+ struct prestera_bridge *bridge;
+
+ list_for_each_entry(bridge, &swdev->bridge_list, head)
+ if (bridge->dev == dev)
+ return bridge;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+__prestera_bridge_port_by_dev(struct prestera_bridge *bridge,
+ struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &bridge->port_list, head) {
+ if (br_port->dev == dev)
+ return br_port;
+ }
+
+ return NULL;
+}
+
+static int prestera_match_upper_bridge_dev(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ if (netif_is_bridge_master(dev))
+ priv->data = dev;
+
+ return 0;
+}
+
+static struct net_device *prestera_get_upper_bridge_dev(struct net_device *dev)
+{
+ struct netdev_nested_priv priv = { };
+
+ netdev_walk_all_upper_dev_rcu(dev, prestera_match_upper_bridge_dev,
+ &priv);
+ return priv.data;
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_by_dev(struct prestera_switchdev *swdev,
+ struct net_device *dev)
+{
+ struct net_device *br_dev = prestera_get_upper_bridge_dev(dev);
+ struct prestera_bridge *bridge;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
+ if (!bridge)
+ return NULL;
+
+ return __prestera_bridge_port_by_dev(bridge, dev);
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_create(struct prestera_bridge *bridge,
+ struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
+ if (!br_port)
+ return NULL;
+
+ br_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
+ BR_MCAST_FLOOD;
+ br_port->stp_state = BR_STATE_DISABLED;
+ refcount_set(&br_port->ref_count, 1);
+ br_port->bridge = bridge;
+ br_port->dev = dev;
+
+ INIT_LIST_HEAD(&br_port->vlan_list);
+ list_add(&br_port->head, &bridge->port_list);
+ INIT_LIST_HEAD(&br_port->br_mdb_port_list);
+
+ return br_port;
+}
+
+static void
+prestera_bridge_port_destroy(struct prestera_bridge_port *br_port)
+{
+ list_del(&br_port->head);
+ WARN_ON(!list_empty(&br_port->vlan_list));
+ WARN_ON(!list_empty(&br_port->br_mdb_port_list));
+ kfree(br_port);
+}
+
+static void prestera_bridge_port_get(struct prestera_bridge_port *br_port)
+{
+ refcount_inc(&br_port->ref_count);
+}
+
+static void prestera_bridge_port_put(struct prestera_bridge_port *br_port)
+{
+ struct prestera_bridge *bridge = br_port->bridge;
+
+ if (refcount_dec_and_test(&br_port->ref_count)) {
+ prestera_bridge_port_destroy(br_port);
+ prestera_bridge_put(bridge);
+ }
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_add(struct prestera_bridge *bridge, struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = __prestera_bridge_port_by_dev(bridge, dev);
+ if (br_port) {
+ prestera_bridge_port_get(br_port);
+ return br_port;
+ }
+
+ br_port = prestera_bridge_port_create(bridge, dev);
+ if (!br_port)
+ return ERR_PTR(-ENOMEM);
+
+ return br_port;
+}
+
+static int
+prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+ struct prestera_bridge *bridge = br_port->bridge;
+ int err;
+
+ err = prestera_hw_bridge_port_add(port, bridge->bridge_id);
+ if (err)
+ return err;
+
+ err = prestera_br_port_flags_set(br_port, port);
+ if (err)
+ goto err_flags2port_set;
+
+ return 0;
+
+err_flags2port_set:
+ prestera_hw_bridge_port_delete(port, bridge->bridge_id);
+
+ return err;
+}
+
+int prestera_bridge_port_join(struct net_device *br_dev,
+ struct prestera_port *port,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_switchdev *swdev = port->sw->swdev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+ int err;
+
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
+ if (!bridge) {
+ bridge = prestera_bridge_create(swdev, br_dev);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ br_port = prestera_bridge_port_add(bridge, port->dev);
+ if (IS_ERR(br_port)) {
+ prestera_bridge_put(bridge);
+ return PTR_ERR(br_port);
+ }
+
+ err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL,
+ NULL, NULL, false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
+ if (bridge->vlan_enabled)
+ return 0;
+
+ err = prestera_bridge_1d_port_join(br_port);
+ if (err)
+ goto err_port_join;
+
+ return 0;
+
+err_port_join:
+ switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
+err_switchdev_offload:
+ prestera_bridge_port_put(br_port);
+ return err;
+}
+
+static void prestera_bridge_1q_port_leave(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+
+ prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
+ prestera_port_pvid_set(port, PRESTERA_DEFAULT_VID);
+}
+
+static void prestera_bridge_1d_port_leave(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+
+ prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
+ prestera_hw_bridge_port_delete(port, br_port->bridge->bridge_id);
+}
+
+static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
+ u8 state)
+{
+ u8 hw_state = state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ hw_state = PRESTERA_STP_DISABLED;
+ break;
+
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ hw_state = PRESTERA_STP_BLOCK_LISTEN;
+ break;
+
+ case BR_STATE_LEARNING:
+ hw_state = PRESTERA_STP_LEARN;
+ break;
+
+ case BR_STATE_FORWARDING:
+ hw_state = PRESTERA_STP_FORWARD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return prestera_hw_vlan_port_stp_set(port, vid, hw_state);
+}
+
+void prestera_bridge_port_leave(struct net_device *br_dev,
+ struct prestera_port *port)
+{
+ struct prestera_switchdev *swdev = port->sw->swdev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
+ if (!bridge)
+ return;
+
+ br_port = __prestera_bridge_port_by_dev(bridge, port->dev);
+ if (!br_port)
+ return;
+
+ bridge = br_port->bridge;
+
+ if (bridge->vlan_enabled)
+ prestera_bridge_1q_port_leave(br_port);
+ else
+ prestera_bridge_1d_port_leave(br_port);
+
+ switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
+
+ prestera_mdb_flush_bridge_port(br_port);
+
+ prestera_br_port_flags_reset(br_port, port);
+ prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
+ prestera_bridge_port_put(br_port);
+}
+
+static int prestera_port_attr_br_flags_set(struct prestera_port *port,
+ struct net_device *dev,
+ struct switchdev_brport_flags flags)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
+ if (!br_port)
+ return 0;
+
+ br_port->flags &= ~flags.mask;
+ br_port->flags |= flags.val & flags.mask;
+ return prestera_br_port_flags_set(br_port, port);
+}
+
+static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies);
+ struct prestera_switch *sw = port->sw;
+
+ if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
+ ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
+ return -ERANGE;
+
+ return prestera_hw_switch_ageing_set(sw, ageing_time_ms);
+}
+
+static int prestera_port_attr_br_vlan_set(struct prestera_port *port,
+ struct net_device *dev,
+ bool vlan_enabled)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *bridge;
+
+ bridge = prestera_bridge_by_dev(sw->swdev, dev);
+ if (WARN_ON(!bridge))
+ return -EINVAL;
+
+ if (bridge->vlan_enabled == vlan_enabled)
+ return 0;
+
+ netdev_err(bridge->dev, "VLAN filtering can't be changed for existing bridge\n");
+
+ return -EINVAL;
+}
+
+static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port,
+ struct prestera_bridge_vlan *br_vlan,
+ u8 state)
+{
+ struct prestera_port_vlan *port_vlan;
+
+ list_for_each_entry(port_vlan, &br_vlan->port_vlan_list, br_vlan_head) {
+ if (port_vlan->port != port)
+ continue;
+
+ return prestera_port_vid_stp_set(port, br_vlan->vid, state);
+ }
+
+ return 0;
+}
+
+static int prestera_port_attr_stp_state_set(struct prestera_port *port,
+ struct net_device *dev,
+ u8 state)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge_vlan *br_vlan;
+ int err;
+ u16 vid;
+
+ br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
+ if (!br_port)
+ return 0;
+
+ if (!br_port->bridge->vlan_enabled) {
+ vid = br_port->bridge->bridge_id;
+ err = prestera_port_vid_stp_set(port, vid, state);
+ if (err)
+ goto err_port_stp_set;
+ } else {
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ err = prestera_port_bridge_vlan_stp_set(port, br_vlan,
+ state);
+ if (err)
+ goto err_port_vlan_stp_set;
+ }
+ }
+
+ br_port->stp_state = state;
+
+ return 0;
+
+err_port_vlan_stp_set:
+ list_for_each_entry_continue_reverse(br_vlan, &br_port->vlan_list, head)
+ prestera_port_bridge_vlan_stp_set(port, br_vlan, br_port->stp_state);
+ return err;
+
+err_port_stp_set:
+ prestera_port_vid_stp_set(port, vid, br_port->stp_state);
+
+ return err;
+}
+
+static int
+prestera_br_port_lag_mdb_mc_enable_sync(struct prestera_bridge_port *br_port,
+ bool enabled)
+{
+ struct prestera_port *pr_port;
+ struct prestera_switch *sw;
+ u16 lag_id;
+ int err;
+
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+ if (!pr_port)
+ return 0;
+
+ sw = pr_port->sw;
+ err = prestera_lag_id(sw, br_port->dev, &lag_id);
+ if (err)
+ return err;
+
+ list_for_each_entry(pr_port, &sw->port_list, list) {
+ if (pr_port->lag->lag_id == lag_id) {
+ err = prestera_port_mc_flood_set(pr_port, enabled);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int prestera_br_mdb_mc_enable_sync(struct prestera_bridge *br_dev)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_port *port;
+ bool enabled;
+ int err;
+
+ /* if mrouter exists:
+ * - make sure every mrouter receives unreg mcast traffic;
+ * if mrouter doesn't exists:
+ * - make sure every port receives unreg mcast traffic;
+ */
+ list_for_each_entry(br_port, &br_dev->port_list, head) {
+ if (br_dev->multicast_enabled && br_dev->mrouter_exist)
+ enabled = br_port->mrouter;
+ else
+ enabled = br_port->flags & BR_MCAST_FLOOD;
+
+ if (netif_is_lag_master(br_port->dev)) {
+ err = prestera_br_port_lag_mdb_mc_enable_sync(br_port,
+ enabled);
+ if (err)
+ return err;
+ continue;
+ }
+
+ port = prestera_port_dev_lower_find(br_port->dev);
+ if (!port)
+ continue;
+
+ err = prestera_port_mc_flood_set(port, enabled);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static bool
+prestera_br_mdb_port_is_member(struct prestera_br_mdb_entry *br_mdb,
+ struct net_device *orig_dev)
+{
+ struct prestera_br_mdb_port *tmp_port;
+
+ list_for_each_entry(tmp_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node)
+ if (tmp_port->br_port->dev == orig_dev)
+ return true;
+
+ return false;
+}
+
+static int
+prestera_mdb_port_add(struct prestera_mdb_entry *mdb,
+ struct net_device *orig_dev,
+ const unsigned char addr[ETH_ALEN], u16 vid)
+{
+ struct prestera_flood_domain *flood_domain = mdb->flood_domain;
+ int err;
+
+ if (!prestera_flood_domain_port_find(flood_domain,
+ orig_dev, vid)) {
+ err = prestera_flood_domain_port_create(flood_domain, orig_dev,
+ vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Sync bridge mdb (software table) with HW table (if MC is enabled). */
+static int prestera_br_mdb_sync(struct prestera_bridge *br_dev)
+{
+ struct prestera_br_mdb_port *br_mdb_port;
+ struct prestera_bridge_port *br_port;
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_mdb_entry *mdb;
+ struct prestera_port *pr_port;
+ int err = 0;
+
+ if (!br_dev->multicast_enabled)
+ return 0;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ mdb = br_mdb->mdb;
+ /* Make sure every port that explicitly been added to the mdb
+ * joins the specified group.
+ */
+ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ br_port = br_mdb_port->br_port;
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+
+ /* Match only mdb and br_mdb ports that belong to the
+ * same broadcast domain.
+ */
+ if (br_dev->vlan_enabled &&
+ !prestera_port_vlan_by_vid(pr_port,
+ mdb->vid))
+ continue;
+
+ /* If port is not in MDB or there's no Mrouter
+ * clear HW mdb.
+ */
+ if (prestera_br_mdb_port_is_member(br_mdb,
+ br_mdb_port->br_port->dev) &&
+ br_dev->mrouter_exist)
+ err = prestera_mdb_port_add(mdb, br_port->dev,
+ mdb->addr,
+ mdb->vid);
+ else
+ prestera_mdb_port_del(mdb, br_port->dev);
+
+ if (err)
+ return err;
+ }
+
+ /* Make sure that every mrouter port joins every MC group int
+ * broadcast domain. If it's not an mrouter - it should leave
+ */
+ list_for_each_entry(br_port, &br_dev->port_list, head) {
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+
+ /* Make sure mrouter woudln't receive traffci from
+ * another broadcast domain (e.g. from a vlan, which
+ * mrouter port is not a member of).
+ */
+ if (br_dev->vlan_enabled &&
+ !prestera_port_vlan_by_vid(pr_port,
+ mdb->vid))
+ continue;
+
+ if (br_port->mrouter) {
+ err = prestera_mdb_port_add(mdb, br_port->dev,
+ mdb->addr,
+ mdb->vid);
+ if (err)
+ return err;
+ } else if (!br_port->mrouter &&
+ !prestera_br_mdb_port_is_member
+ (br_mdb, br_port->dev)) {
+ prestera_mdb_port_del(mdb, br_port->dev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+prestera_mdb_enable_set(struct prestera_br_mdb_entry *br_mdb, bool enable)
+{
+ int err;
+
+ if (enable != br_mdb->enabled) {
+ if (enable)
+ err = prestera_hw_mdb_create(br_mdb->mdb);
+ else
+ err = prestera_hw_mdb_destroy(br_mdb->mdb);
+
+ if (err)
+ return err;
+
+ br_mdb->enabled = enable;
+ }
+
+ return 0;
+}
+
+static int
+prestera_br_mdb_enable_set(struct prestera_bridge *br_dev, bool enable)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ int err;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ err = prestera_mdb_enable_set(br_mdb, enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_port_attr_br_mc_disabled_set(struct prestera_port *port,
+ struct net_device *orig_dev,
+ bool mc_disabled)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *br_dev;
+
+ br_dev = prestera_bridge_find(sw, orig_dev);
+ if (!br_dev)
+ return 0;
+
+ br_dev->multicast_enabled = !mc_disabled;
+
+ /* There's no point in enabling mdb back if router is missing. */
+ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ WARN_ON(prestera_br_mdb_sync(br_dev));
+
+ WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
+
+ return 0;
+}
+
+static bool
+prestera_bridge_mdb_mc_mrouter_exists(struct prestera_bridge *br_dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &br_dev->port_list, head)
+ if (br_port->mrouter)
+ return true;
+
+ return false;
+}
+
+static int
+prestera_port_attr_mrouter_set(struct prestera_port *port,
+ struct net_device *orig_dev,
+ bool is_port_mrouter)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+
+ br_port = prestera_bridge_port_find(port->sw, orig_dev);
+ if (!br_port)
+ return 0;
+
+ br_dev = br_port->bridge;
+ br_port->mrouter = is_port_mrouter;
+
+ br_dev->mrouter_exist = prestera_bridge_mdb_mc_mrouter_exists(br_dev);
+
+ /* Enable MDB processing if both mrouter exists and mc is enabled.
+ * In case if MC enabled, but there is no mrouter, device would flood
+ * all multicast traffic (even if MDB table is not empty) with the use
+ * of bridge's flood capabilities (without the use of flood_domain).
+ */
+ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ WARN_ON(prestera_br_mdb_sync(br_dev));
+
+ WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
+
+ return 0;
+}
+
+static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = prestera_port_attr_stp_state_set(port, attr->orig_dev,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ if (attr->u.brport_flags.mask &
+ ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_PORT_LOCKED))
+ err = -EINVAL;
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = prestera_port_attr_br_flags_set(port, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = prestera_port_attr_br_ageing_set(port,
+ attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ err = prestera_port_attr_br_vlan_set(port, attr->orig_dev,
+ attr->u.vlan_filtering);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ err = prestera_port_attr_mrouter_set(port, attr->orig_dev,
+ attr->u.mrouter);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ err = prestera_port_attr_br_mc_disabled_set(port, attr->orig_dev,
+ attr->u.mc_disabled);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static void
+prestera_fdb_offload_notify(struct prestera_port *port,
+ struct switchdev_notifier_fdb_info *info)
+{
+ struct switchdev_notifier_fdb_info send_info = {};
+
+ send_info.addr = info->addr;
+ send_info.vid = info->vid;
+ send_info.offloaded = true;
+
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, port->dev,
+ &send_info.info, NULL);
+}
+
+static int prestera_port_fdb_set(struct prestera_port *port,
+ struct switchdev_notifier_fdb_info *fdb_info,
+ bool adding)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+ int err;
+ u16 vid;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
+ if (!br_port)
+ return -EINVAL;
+
+ bridge = br_port->bridge;
+
+ if (bridge->vlan_enabled)
+ vid = fdb_info->vid;
+ else
+ vid = bridge->bridge_id;
+
+ if (adding)
+ err = prestera_fdb_add(port, fdb_info->addr, vid, false);
+ else
+ err = prestera_fdb_del(port, fdb_info->addr, vid);
+
+ return err;
+}
+
+static void prestera_fdb_event_work(struct work_struct *work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct prestera_fdb_event_work *swdev_work;
+ struct prestera_port *port;
+ struct net_device *dev;
+ int err;
+
+ swdev_work = container_of(work, struct prestera_fdb_event_work, work);
+ dev = swdev_work->dev;
+
+ rtnl_lock();
+
+ port = prestera_port_dev_lower_find(dev);
+ if (!port)
+ goto out_unlock;
+
+ switch (swdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &swdev_work->fdb_info;
+ if (!fdb_info->added_by_user || fdb_info->is_local)
+ break;
+
+ err = prestera_port_fdb_set(port, fdb_info, true);
+ if (err)
+ break;
+
+ prestera_fdb_offload_notify(port, fdb_info);
+ break;
+
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &swdev_work->fdb_info;
+ prestera_port_fdb_set(port, fdb_info, false);
+ break;
+ }
+
+out_unlock:
+ rtnl_unlock();
+
+ kfree(swdev_work->fdb_info.addr);
+ kfree(swdev_work);
+ dev_put(dev);
+}
+
+static int prestera_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct prestera_fdb_event_work *swdev_work;
+ struct net_device *upper;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!prestera_netdev_check(dev))
+ return NOTIFY_DONE;
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (!upper)
+ return NOTIFY_DONE;
+
+ if (!netif_is_bridge_master(upper))
+ return NOTIFY_DONE;
+
+ swdev_work = kzalloc(sizeof(*swdev_work), GFP_ATOMIC);
+ if (!swdev_work)
+ return NOTIFY_BAD;
+
+ swdev_work->event = event;
+ swdev_work->dev = dev;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+
+ INIT_WORK(&swdev_work->work, prestera_fdb_event_work);
+ memcpy(&swdev_work->fdb_info, ptr,
+ sizeof(swdev_work->fdb_info));
+
+ swdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!swdev_work->fdb_info.addr)
+ goto out_bad;
+
+ ether_addr_copy((u8 *)swdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(dev);
+ break;
+
+ default:
+ kfree(swdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(swdev_wq, &swdev_work->work);
+ return NOTIFY_DONE;
+
+out_bad:
+ kfree(swdev_work);
+ return NOTIFY_BAD;
+}
+
+static int
+prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = port_vlan->port;
+ struct prestera_bridge_vlan *br_vlan;
+ u16 vid = port_vlan->vid;
+ int err;
+
+ if (port_vlan->br_port)
+ return 0;
+
+ err = prestera_br_port_flags_set(br_port, port);
+ if (err)
+ goto err_flags2port_set;
+
+ err = prestera_port_vid_stp_set(port, vid, br_port->stp_state);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
+ if (!br_vlan) {
+ br_vlan = prestera_bridge_vlan_create(br_port, vid);
+ if (!br_vlan) {
+ err = -ENOMEM;
+ goto err_bridge_vlan_get;
+ }
+ }
+
+ list_add(&port_vlan->br_vlan_head, &br_vlan->port_vlan_list);
+
+ prestera_bridge_port_get(br_port);
+ port_vlan->br_port = br_port;
+
+ return 0;
+
+err_bridge_vlan_get:
+ prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING);
+err_port_vid_stp_set:
+ prestera_br_port_flags_reset(br_port, port);
+err_flags2port_set:
+ return err;
+}
+
+static int
+prestera_bridge_port_vlan_add(struct prestera_port *port,
+ struct prestera_bridge_port *br_port,
+ u16 vid, bool is_untagged, bool is_pvid,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port_vlan *port_vlan;
+ u16 old_pvid = port->pvid;
+ u16 pvid;
+ int err;
+
+ if (is_pvid)
+ pvid = vid;
+ else
+ pvid = port->pvid == vid ? 0 : port->pvid;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (port_vlan && port_vlan->br_port != br_port)
+ return -EEXIST;
+
+ if (!port_vlan) {
+ port_vlan = prestera_port_vlan_create(port, vid, is_untagged);
+ if (IS_ERR(port_vlan))
+ return PTR_ERR(port_vlan);
+ } else {
+ err = prestera_hw_vlan_port_set(port, vid, true, is_untagged);
+ if (err)
+ goto err_port_vlan_set;
+ }
+
+ err = prestera_port_pvid_set(port, pvid);
+ if (err)
+ goto err_port_pvid_set;
+
+ err = prestera_port_vlan_bridge_join(port_vlan, br_port);
+ if (err)
+ goto err_port_vlan_bridge_join;
+
+ return 0;
+
+err_port_vlan_bridge_join:
+ prestera_port_pvid_set(port, old_pvid);
+err_port_pvid_set:
+ prestera_hw_vlan_port_set(port, vid, false, false);
+err_port_vlan_set:
+ prestera_port_vlan_destroy(port_vlan);
+
+ return err;
+}
+
+static void
+prestera_bridge_port_vlan_del(struct prestera_port *port,
+ struct prestera_bridge_port *br_port, u16 vid)
+{
+ u16 pvid = port->pvid == vid ? 0 : port->pvid;
+ struct prestera_port_vlan *port_vlan;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (WARN_ON(!port_vlan))
+ return;
+
+ prestera_port_vlan_bridge_leave(port_vlan);
+ prestera_port_pvid_set(port, pvid);
+ prestera_port_vlan_destroy(port_vlan);
+}
+
+static int prestera_port_vlans_add(struct prestera_port *port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *bridge;
+
+ if (netif_is_bridge_master(orig_dev))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
+ if (WARN_ON(!br_port))
+ return -EINVAL;
+
+ bridge = br_port->bridge;
+ if (!bridge->vlan_enabled)
+ return 0;
+
+ return prestera_bridge_port_vlan_add(port, br_port,
+ vlan->vid, flag_untagged,
+ flag_pvid, extack);
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_create(struct prestera_switch *sw,
+ struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb_entry;
+ struct prestera_mdb_entry *mdb_entry;
+
+ br_mdb_entry = kzalloc(sizeof(*br_mdb_entry), GFP_KERNEL);
+ if (!br_mdb_entry)
+ return NULL;
+
+ mdb_entry = prestera_mdb_entry_create(sw, addr, vid);
+ if (!mdb_entry)
+ goto err_mdb_alloc;
+
+ br_mdb_entry->mdb = mdb_entry;
+ br_mdb_entry->bridge = br_dev;
+ br_mdb_entry->enabled = true;
+ INIT_LIST_HEAD(&br_mdb_entry->br_mdb_port_list);
+
+ list_add(&br_mdb_entry->br_mdb_entry_node, &br_dev->br_mdb_entry_list);
+
+ return br_mdb_entry;
+
+err_mdb_alloc:
+ kfree(br_mdb_entry);
+ return NULL;
+}
+
+static int prestera_br_mdb_port_add(struct prestera_br_mdb_entry *br_mdb,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port;
+
+ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node)
+ if (br_mdb_port->br_port == br_port)
+ return 0;
+
+ br_mdb_port = kzalloc(sizeof(*br_mdb_port), GFP_KERNEL);
+ if (!br_mdb_port)
+ return -ENOMEM;
+
+ br_mdb_port->br_port = br_port;
+ list_add(&br_mdb_port->br_mdb_port_node,
+ &br_mdb->br_mdb_port_list);
+
+ return 0;
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_find(struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node)
+ if (ether_addr_equal(&br_mdb->mdb->addr[0], addr) &&
+ vid == br_mdb->mdb->vid)
+ return br_mdb;
+
+ return NULL;
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_get(struct prestera_switch *sw,
+ struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+
+ br_mdb = prestera_br_mdb_entry_find(br_dev, addr, vid);
+ if (br_mdb)
+ return br_mdb;
+
+ return prestera_br_mdb_entry_create(sw, br_dev, addr, vid);
+}
+
+static int
+prestera_mdb_port_addr_obj_add(const struct switchdev_obj_port_mdb *mdb)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+ struct prestera_switch *sw;
+ struct prestera_port *port;
+ int err;
+
+ sw = prestera_switch_get(mdb->obj.orig_dev);
+ port = prestera_port_dev_lower_find(mdb->obj.orig_dev);
+
+ br_port = prestera_bridge_port_find(sw, mdb->obj.orig_dev);
+ if (!br_port)
+ return 0;
+
+ br_dev = br_port->bridge;
+
+ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
+ return 0;
+
+ if (mdb->vid)
+ br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
+ mdb->vid);
+ else
+ br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
+ br_dev->bridge_id);
+
+ if (!br_mdb)
+ return -ENOMEM;
+
+ /* Make sure newly allocated MDB entry gets disabled if either MC is
+ * disabled, or the mrouter does not exist.
+ */
+ WARN_ON(prestera_mdb_enable_set(br_mdb, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ err = prestera_br_mdb_port_add(br_mdb, br_port);
+ if (err) {
+ prestera_br_mdb_entry_put(br_mdb);
+ return err;
+ }
+
+ err = prestera_br_mdb_sync(br_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int prestera_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ const struct switchdev_obj_port_vlan *vlan;
+ const struct switchdev_obj_port_mdb *mdb;
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ return prestera_port_vlans_add(port, vlan, extack);
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = prestera_mdb_port_addr_obj_add(mdb);
+ break;
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ fallthrough;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int prestera_port_vlans_del(struct prestera_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_switch *sw = port->sw;
+
+ if (netif_is_bridge_master(orig_dev))
+ return -EOPNOTSUPP;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
+ if (WARN_ON(!br_port))
+ return -EINVAL;
+
+ if (!br_port->bridge->vlan_enabled)
+ return 0;
+
+ prestera_bridge_port_vlan_del(port, br_port, vlan->vid);
+
+ return 0;
+}
+
+static int
+prestera_mdb_port_addr_obj_del(struct prestera_port *port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+ int err;
+
+ /* Bridge port no longer exists - and so does this MDB entry */
+ br_port = prestera_bridge_port_find(port->sw, mdb->obj.orig_dev);
+ if (!br_port)
+ return 0;
+
+ /* Removing MDB with non-existing VLAN - not supported; */
+ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
+ return 0;
+
+ br_dev = br_port->bridge;
+
+ if (br_port->bridge->vlan_enabled)
+ br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
+ mdb->vid);
+ else
+ br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
+ br_port->bridge->bridge_id);
+
+ if (!br_mdb)
+ return 0;
+
+ /* Since there might be a situation that this port was the last in the
+ * MDB group, we have to both remove this port from software and HW MDB,
+ * sync MDB table, and then destroy software MDB (if needed).
+ */
+ prestera_br_mdb_port_del(br_mdb, br_port);
+
+ prestera_br_mdb_entry_put(br_mdb);
+
+ err = prestera_br_mdb_sync(br_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int prestera_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ const struct switchdev_obj_port_mdb *mdb;
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = prestera_mdb_port_addr_obj_del(port, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int prestera_switchdev_blk_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_attr_set);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void prestera_fdb_event(struct prestera_switch *sw,
+ struct prestera_event *evt, void *arg)
+{
+ struct switchdev_notifier_fdb_info info = {};
+ struct net_device *dev = NULL;
+ struct prestera_port *port;
+ struct prestera_lag *lag;
+
+ switch (evt->fdb_evt.type) {
+ case PRESTERA_FDB_ENTRY_TYPE_REG_PORT:
+ port = prestera_find_port(sw, evt->fdb_evt.dest.port_id);
+ if (port)
+ dev = port->dev;
+ break;
+ case PRESTERA_FDB_ENTRY_TYPE_LAG:
+ lag = prestera_lag_by_id(sw, evt->fdb_evt.dest.lag_id);
+ if (lag)
+ dev = lag->dev;
+ break;
+ default:
+ return;
+ }
+
+ if (!dev)
+ return;
+
+ info.addr = evt->fdb_evt.data.mac;
+ info.vid = evt->fdb_evt.vid;
+ info.offloaded = true;
+
+ rtnl_lock();
+
+ switch (evt->id) {
+ case PRESTERA_FDB_EVENT_LEARNED:
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ dev, &info.info, NULL);
+ break;
+ case PRESTERA_FDB_EVENT_AGED:
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ dev, &info.info, NULL);
+ break;
+ }
+
+ rtnl_unlock();
+}
+
+static int prestera_fdb_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event, NULL);
+ if (err)
+ return err;
+
+ err = prestera_hw_switch_ageing_set(sw, PRESTERA_DEFAULT_AGEING_TIME_MS);
+ if (err)
+ goto err_ageing_set;
+
+ return 0;
+
+err_ageing_set:
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event);
+ return err;
+}
+
+static void prestera_fdb_fini(struct prestera_switch *sw)
+{
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event);
+}
+
+static int prestera_switchdev_handler_init(struct prestera_switchdev *swdev)
+{
+ int err;
+
+ swdev->swdev_nb.notifier_call = prestera_switchdev_event;
+ err = register_switchdev_notifier(&swdev->swdev_nb);
+ if (err)
+ goto err_register_swdev_notifier;
+
+ swdev->swdev_nb_blk.notifier_call = prestera_switchdev_blk_event;
+ err = register_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
+ if (err)
+ goto err_register_blk_swdev_notifier;
+
+ return 0;
+
+err_register_blk_swdev_notifier:
+ unregister_switchdev_notifier(&swdev->swdev_nb);
+err_register_swdev_notifier:
+ destroy_workqueue(swdev_wq);
+ return err;
+}
+
+static void prestera_switchdev_handler_fini(struct prestera_switchdev *swdev)
+{
+ unregister_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
+ unregister_switchdev_notifier(&swdev->swdev_nb);
+}
+
+int prestera_switchdev_init(struct prestera_switch *sw)
+{
+ struct prestera_switchdev *swdev;
+ int err;
+
+ swdev = kzalloc(sizeof(*swdev), GFP_KERNEL);
+ if (!swdev)
+ return -ENOMEM;
+
+ sw->swdev = swdev;
+ swdev->sw = sw;
+
+ INIT_LIST_HEAD(&swdev->bridge_list);
+
+ swdev_wq = alloc_ordered_workqueue("%s_ordered", 0, "prestera_br");
+ if (!swdev_wq) {
+ err = -ENOMEM;
+ goto err_alloc_wq;
+ }
+
+ err = prestera_switchdev_handler_init(swdev);
+ if (err)
+ goto err_swdev_init;
+
+ err = prestera_fdb_init(sw);
+ if (err)
+ goto err_fdb_init;
+
+ return 0;
+
+err_fdb_init:
+err_swdev_init:
+ destroy_workqueue(swdev_wq);
+err_alloc_wq:
+ kfree(swdev);
+
+ return err;
+}
+
+void prestera_switchdev_fini(struct prestera_switch *sw)
+{
+ struct prestera_switchdev *swdev = sw->swdev;
+
+ prestera_fdb_fini(sw);
+ prestera_switchdev_handler_fini(swdev);
+ destroy_workqueue(swdev_wq);
+ kfree(swdev);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
new file mode 100644
index 000000000..0e93fda3d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_SWITCHDEV_H_
+#define _PRESTERA_SWITCHDEV_H_
+
+int prestera_switchdev_init(struct prestera_switch *sw);
+void prestera_switchdev_fini(struct prestera_switch *sw);
+
+int prestera_bridge_port_join(struct net_device *br_dev,
+ struct prestera_port *port,
+ struct netlink_ext_ack *extack);
+
+void prestera_bridge_port_leave(struct net_device *br_dev,
+ struct prestera_port *port);
+
+#endif /* _PRESTERA_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
new file mode 100644
index 000000000..d5691b6a2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -0,0 +1,1597 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PXA168 ethernet driver.
+ * Most of the code is derived from mv643xx ethernet driver.
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Sachin Sanap <ssanap@marvell.com>
+ * Zhangfei Gao <zgao6@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pxa168_eth.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+#include <linux/pgtable.h>
+
+#include <asm/cacheflush.h>
+
+#define DRIVER_NAME "pxa168-eth"
+#define DRIVER_VERSION "0.3"
+
+/*
+ * Registers
+ */
+
+#define PHY_ADDRESS 0x0000
+#define SMI 0x0010
+#define PORT_CONFIG 0x0400
+#define PORT_CONFIG_EXT 0x0408
+#define PORT_COMMAND 0x0410
+#define PORT_STATUS 0x0418
+#define HTPR 0x0428
+#define MAC_ADDR_LOW 0x0430
+#define MAC_ADDR_HIGH 0x0438
+#define SDMA_CONFIG 0x0440
+#define SDMA_CMD 0x0448
+#define INT_CAUSE 0x0450
+#define INT_W_CLEAR 0x0454
+#define INT_MASK 0x0458
+#define ETH_F_RX_DESC_0 0x0480
+#define ETH_C_RX_DESC_0 0x04A0
+#define ETH_C_TX_DESC_1 0x04E4
+
+/* smi register */
+#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
+#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
+#define SMI_OP_W (0 << 26) /* Write operation */
+#define SMI_OP_R (1 << 26) /* Read operation */
+
+#define PHY_WAIT_ITERATIONS 10
+
+#define PXA168_ETH_PHY_ADDR_DEFAULT 0
+/* RX & TX descriptor command */
+#define BUF_OWNED_BY_DMA (1 << 31)
+
+/* RX descriptor status */
+#define RX_EN_INT (1 << 23)
+#define RX_FIRST_DESC (1 << 17)
+#define RX_LAST_DESC (1 << 16)
+#define RX_ERROR (1 << 15)
+
+/* TX descriptor command */
+#define TX_EN_INT (1 << 23)
+#define TX_GEN_CRC (1 << 22)
+#define TX_ZERO_PADDING (1 << 18)
+#define TX_FIRST_DESC (1 << 17)
+#define TX_LAST_DESC (1 << 16)
+#define TX_ERROR (1 << 15)
+
+/* SDMA_CMD */
+#define SDMA_CMD_AT (1 << 31)
+#define SDMA_CMD_TXDL (1 << 24)
+#define SDMA_CMD_TXDH (1 << 23)
+#define SDMA_CMD_AR (1 << 15)
+#define SDMA_CMD_ERD (1 << 7)
+
+/* Bit definitions of the Port Config Reg */
+#define PCR_DUPLEX_FULL (1 << 15)
+#define PCR_HS (1 << 12)
+#define PCR_EN (1 << 7)
+#define PCR_PM (1 << 0)
+
+/* Bit definitions of the Port Config Extend Reg */
+#define PCXR_2BSM (1 << 28)
+#define PCXR_DSCP_EN (1 << 21)
+#define PCXR_RMII_EN (1 << 20)
+#define PCXR_AN_SPEED_DIS (1 << 19)
+#define PCXR_SPEED_100 (1 << 18)
+#define PCXR_MFL_1518 (0 << 14)
+#define PCXR_MFL_1536 (1 << 14)
+#define PCXR_MFL_2048 (2 << 14)
+#define PCXR_MFL_64K (3 << 14)
+#define PCXR_FLOWCTL_DIS (1 << 12)
+#define PCXR_FLP (1 << 11)
+#define PCXR_AN_FLOWCTL_DIS (1 << 10)
+#define PCXR_AN_DUPLEX_DIS (1 << 9)
+#define PCXR_PRIO_TX_OFF 3
+#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
+
+/* Bit definitions of the SDMA Config Reg */
+#define SDCR_BSZ_OFF 12
+#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
+#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
+#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
+#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
+#define SDCR_BLMR (1 << 6)
+#define SDCR_BLMT (1 << 7)
+#define SDCR_RIFB (1 << 9)
+#define SDCR_RC_OFF 2
+#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
+
+/*
+ * Bit definitions of the Interrupt Cause Reg
+ * and Interrupt MASK Reg is the same
+ */
+#define ICR_RXBUF (1 << 0)
+#define ICR_TXBUF_H (1 << 2)
+#define ICR_TXBUF_L (1 << 3)
+#define ICR_TXEND_H (1 << 6)
+#define ICR_TXEND_L (1 << 7)
+#define ICR_RXERR (1 << 8)
+#define ICR_TXERR_H (1 << 10)
+#define ICR_TXERR_L (1 << 11)
+#define ICR_TX_UDR (1 << 13)
+#define ICR_MII_CH (1 << 28)
+
+#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
+ ICR_TXERR_H | ICR_TXERR_L |\
+ ICR_TXEND_H | ICR_TXEND_L |\
+ ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
+
+#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
+
+#define NUM_RX_DESCS 64
+#define NUM_TX_DESCS 64
+
+#define HASH_ADD 0
+#define HASH_DELETE 1
+#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
+#define HOP_NUMBER 12
+
+/* Bit definitions for Port status */
+#define PORT_SPEED_100 (1 << 0)
+#define FULL_DUPLEX (1 << 1)
+#define FLOW_CONTROL_DISABLED (1 << 2)
+#define LINK_UP (1 << 3)
+
+/* Bit definitions for work to be done */
+#define WORK_TX_DONE (1 << 1)
+
+/*
+ * Misc definitions.
+ */
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+struct rx_desc {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 buf_size; /* Buffer size */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+};
+
+struct tx_desc {
+ u32 cmd_sts; /* Command/status field */
+ u16 reserved;
+ u16 byte_cnt; /* buffer byte count */
+ u32 buf_ptr; /* pointer to buffer for this descriptor */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+};
+
+struct pxa168_eth_private {
+ struct platform_device *pdev;
+ int port_num; /* User Ethernet port number */
+ int phy_addr;
+ int phy_speed;
+ int phy_duplex;
+ phy_interface_t phy_intf;
+
+ int rx_resource_err; /* Rx ring resource error flag */
+
+ /* Next available and first returning Rx resource */
+ int rx_curr_desc_q, rx_used_desc_q;
+
+ /* Next available and first returning Tx resource */
+ int tx_curr_desc_q, tx_used_desc_q;
+
+ struct rx_desc *p_rx_desc_area;
+ dma_addr_t rx_desc_dma;
+ int rx_desc_area_size;
+ struct sk_buff **rx_skb;
+
+ struct tx_desc *p_tx_desc_area;
+ dma_addr_t tx_desc_dma;
+ int tx_desc_area_size;
+ struct sk_buff **tx_skb;
+
+ struct work_struct tx_timeout_task;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+ u8 work_todo;
+ int skb_size;
+
+ /* Size of Tx Ring per queue */
+ int tx_ring_size;
+ /* Number of tx descriptors in use */
+ int tx_desc_count;
+ /* Size of Rx Ring per queue */
+ int rx_ring_size;
+ /* Number of rx descriptors in use */
+ int rx_desc_count;
+
+ /*
+ * Used in case RX Ring is empty, which can occur when
+ * system does not have resources (skb's)
+ */
+ struct timer_list timeout;
+ struct mii_bus *smi_bus;
+
+ /* clock */
+ struct clk *clk;
+ struct pxa168_eth_platform_data *pd;
+ /*
+ * Ethernet controller base address.
+ */
+ void __iomem *base;
+
+ /* Pointer to the hardware address filter table */
+ void *htpr;
+ dma_addr_t htpr_dma;
+};
+
+struct addr_table_entry {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+ HASH_ENTRY_VALID = 1,
+ SKIP = 2,
+ HASH_ENTRY_RECEIVE_DISCARD = 4,
+ HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
+};
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static int pxa168_init_phy(struct net_device *dev);
+static void eth_port_reset(struct net_device *dev);
+static void eth_port_start(struct net_device *dev);
+static int pxa168_eth_open(struct net_device *dev);
+static int pxa168_eth_stop(struct net_device *dev);
+
+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
+{
+ return readl_relaxed(pep->base + offset);
+}
+
+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
+{
+ writel_relaxed(data, pep->base + offset);
+}
+
+static void abort_dma(struct pxa168_eth_private *pep)
+{
+ int delay;
+ int max_retries = 40;
+
+ do {
+ wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
+ udelay(100);
+
+ delay = 10;
+ while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
+ && delay-- > 0) {
+ udelay(10);
+ }
+ } while (max_retries-- > 0 && delay <= 0);
+
+ if (max_retries <= 0)
+ netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
+}
+
+static void rxq_refill(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct rx_desc *p_used_rx_desc;
+ int used_rx_desc;
+
+ while (pep->rx_desc_count < pep->rx_ring_size) {
+ int size;
+
+ skb = netdev_alloc_skb(dev, pep->skb_size);
+ if (!skb)
+ break;
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
+ pep->rx_desc_count++;
+ /* Get 'used' Rx descriptor */
+ used_rx_desc = pep->rx_used_desc_q;
+ p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
+ size = skb_end_pointer(skb) - skb->data;
+ p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
+ skb->data,
+ size,
+ DMA_FROM_DEVICE);
+ p_used_rx_desc->buf_size = size;
+ pep->rx_skb[used_rx_desc] = skb;
+
+ /* Return the descriptor to DMA ownership */
+ dma_wmb();
+ p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
+ dma_wmb();
+
+ /* Move the used descriptor pointer to the next descriptor */
+ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
+
+ /* Any Rx return cancels the Rx resource error status */
+ pep->rx_resource_err = 0;
+
+ skb_reserve(skb, ETH_HW_IP_ALIGN);
+ }
+
+ /*
+ * If RX ring is empty of SKB, set a timer to try allocating
+ * again at a later time.
+ */
+ if (pep->rx_desc_count == 0) {
+ pep->timeout.expires = jiffies + (HZ / 10);
+ add_timer(&pep->timeout);
+ }
+}
+
+static inline void rxq_refill_timer_wrapper(struct timer_list *t)
+{
+ struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
+ napi_schedule(&pep->napi);
+}
+
+static inline u8 flip_8_bits(u8 x)
+{
+ return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
+ | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
+ | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
+ | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
+}
+
+static void nibble_swap_every_byte(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
+ ((mac_addr[i] & 0xf0) >> 4);
+ }
+}
+
+static void inverse_every_nibble(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = flip_8_bits(mac_addr[i]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will calculate the hash function of the address.
+ * Inputs
+ * mac_addr_orig - MAC address.
+ * Outputs
+ * return the calculated entry.
+ */
+static u32 hash_function(const unsigned char *mac_addr_orig)
+{
+ u32 hash_result;
+ u32 addr0;
+ u32 addr1;
+ u32 addr2;
+ u32 addr3;
+ unsigned char mac_addr[ETH_ALEN];
+
+ /* Make a copy of MAC address since we are going to performe bit
+ * operations on it
+ */
+ memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
+
+ nibble_swap_every_byte(mac_addr);
+ inverse_every_nibble(mac_addr);
+
+ addr0 = (mac_addr[5] >> 2) & 0x3f;
+ addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
+ addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
+ addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
+
+ hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
+ hash_result = hash_result & 0x07ff;
+ return hash_result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will add/del an entry to the address table.
+ * Inputs
+ * pep - ETHERNET .
+ * mac_addr - MAC address.
+ * skip - if 1, skip this address.Used in case of deleting an entry which is a
+ * part of chain in the hash table.We can't just delete the entry since
+ * that will break the chain.We need to defragment the tables time to
+ * time.
+ * rd - 0 Discard packet upon match.
+ * - 1 Receive packet upon match.
+ * Outputs
+ * address table entry is added/deleted.
+ * 0 if success.
+ * -ENOSPC if table full
+ */
+static int add_del_hash_entry(struct pxa168_eth_private *pep,
+ const unsigned char *mac_addr,
+ u32 rd, u32 skip, int del)
+{
+ struct addr_table_entry *entry, *start;
+ u32 new_high;
+ u32 new_low;
+ u32 i;
+
+ new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
+ | (((mac_addr[1] >> 0) & 0xf) << 11)
+ | (((mac_addr[0] >> 4) & 0xf) << 7)
+ | (((mac_addr[0] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 4) & 0x1) << 31)
+ | (((mac_addr[3] >> 0) & 0xf) << 27)
+ | (((mac_addr[2] >> 4) & 0xf) << 23)
+ | (((mac_addr[2] >> 0) & 0xf) << 19)
+ | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
+ | HASH_ENTRY_VALID;
+
+ new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
+ | (((mac_addr[5] >> 0) & 0xf) << 11)
+ | (((mac_addr[4] >> 4) & 0xf) << 7)
+ | (((mac_addr[4] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 5) & 0x7) << 0);
+
+ /*
+ * Pick the appropriate table, start scanning for free/reusable
+ * entries at the index obtained by hashing the specified MAC address
+ */
+ start = pep->htpr;
+ entry = start + hash_function(mac_addr);
+ for (i = 0; i < HOP_NUMBER; i++) {
+ if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+ break;
+ } else {
+ /* if same address put in same position */
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
+ (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) == new_high)) {
+ break;
+ }
+ }
+ if (entry == start + 0x7ff)
+ entry = start;
+ else
+ entry++;
+ }
+
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) != new_high) && del)
+ return 0;
+
+ if (i == HOP_NUMBER) {
+ if (!del) {
+ netdev_info(pep->dev,
+ "%s: table section is full, need to "
+ "move to 16kB implementation?\n",
+ __FILE__);
+ return -ENOSPC;
+ } else
+ return 0;
+ }
+
+ /*
+ * Update the selected entry
+ */
+ if (del) {
+ entry->hi = 0;
+ entry->lo = 0;
+ } else {
+ entry->hi = cpu_to_le32(new_high);
+ entry->lo = cpu_to_le32(new_low);
+ }
+
+ return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Create an addressTable entry from MAC address info
+ * found in the specifed net_device struct
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
+ unsigned char *oaddr,
+ const unsigned char *addr)
+{
+ /* Delete old entry */
+ if (oaddr)
+ add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
+ /* Add new entry */
+ add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
+}
+
+static int init_hash_table(struct pxa168_eth_private *pep)
+{
+ /*
+ * Hardware expects CPU to build a hash table based on a predefined
+ * hash function and populate it based on hardware address. The
+ * location of the hash table is identified by 32-bit pointer stored
+ * in HTPR internal register. Two possible sizes exists for the hash
+ * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
+ * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
+ * 1/2kB.
+ */
+ /* TODO: Add support for 8kB hash table and alternative hash
+ * function.Driver can dynamically switch to them if the 1/2kB hash
+ * table is full.
+ */
+ if (!pep->htpr) {
+ pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
+ if (!pep->htpr)
+ return -ENOMEM;
+ } else {
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ }
+ wrl(pep, HTPR, pep->htpr_dma);
+ return 0;
+}
+
+static void pxa168_eth_set_rx_mode(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u32 val;
+
+ val = rdl(pep, PORT_CONFIG);
+ if (dev->flags & IFF_PROMISC)
+ val |= PCR_PM;
+ else
+ val &= ~PCR_PM;
+ wrl(pep, PORT_CONFIG, val);
+
+ /*
+ * Remove the old list of MAC address and add dev->addr
+ * and multicast address.
+ */
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ update_hash_table_mac_address(pep, NULL, ha->addr);
+}
+
+static void pxa168_eth_get_mac_address(struct net_device *dev,
+ unsigned char *addr)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH);
+ unsigned int mac_l = rdl(pep, MAC_ADDR_LOW);
+
+ addr[0] = (mac_h >> 24) & 0xff;
+ addr[1] = (mac_h >> 16) & 0xff;
+ addr[2] = (mac_h >> 8) & 0xff;
+ addr[3] = mac_h & 0xff;
+ addr[4] = (mac_l >> 8) & 0xff;
+ addr[5] = mac_l & 0xff;
+}
+
+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned char oldMac[ETH_ALEN];
+ u32 mac_h, mac_l;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
+
+ mac_h = dev->dev_addr[0] << 24;
+ mac_h |= dev->dev_addr[1] << 16;
+ mac_h |= dev->dev_addr[2] << 8;
+ mac_h |= dev->dev_addr[3];
+ mac_l = dev->dev_addr[4] << 8;
+ mac_l |= dev->dev_addr[5];
+ wrl(pep, MAC_ADDR_HIGH, mac_h);
+ wrl(pep, MAC_ADDR_LOW, mac_l);
+
+ netif_addr_lock_bh(dev);
+ update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
+ netif_addr_unlock_bh(dev);
+ return 0;
+}
+
+static void eth_port_start(struct net_device *dev)
+{
+ unsigned int val = 0;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int tx_curr_desc, rx_curr_desc;
+
+ phy_start(dev->phydev);
+
+ /* Assignment of Tx CTRP of given queue */
+ tx_curr_desc = pep->tx_curr_desc_q;
+ wrl(pep, ETH_C_TX_DESC_1,
+ (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
+
+ /* Assignment of Rx CRDP of given queue */
+ rx_curr_desc = pep->rx_curr_desc_q;
+ wrl(pep, ETH_C_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ wrl(pep, ETH_F_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Enable all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, ALL_INTS);
+
+ val = rdl(pep, PORT_CONFIG);
+ val |= PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+
+ /* Start RX DMA engine */
+ val = rdl(pep, SDMA_CMD);
+ val |= SDMA_CMD_ERD;
+ wrl(pep, SDMA_CMD, val);
+}
+
+static void eth_port_reset(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned int val = 0;
+
+ /* Stop all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, 0);
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Stop RX DMA */
+ val = rdl(pep, SDMA_CMD);
+ val &= ~SDMA_CMD_ERD; /* abort dma command */
+
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+
+ /* Disable port */
+ val = rdl(pep, PORT_CONFIG);
+ val &= ~PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+
+ phy_stop(dev->phydev);
+}
+
+/*
+ * txq_reclaim - Free the tx desc data for completed descriptors
+ * If force is non-zero, frees uncompleted descriptors as well
+ */
+static int txq_reclaim(struct net_device *dev, int force)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *desc;
+ u32 cmd_sts;
+ struct sk_buff *skb;
+ int tx_index;
+ dma_addr_t addr;
+ int count;
+ int released = 0;
+
+ netif_tx_lock(dev);
+
+ pep->work_todo &= ~WORK_TX_DONE;
+ while (pep->tx_desc_count > 0) {
+ tx_index = pep->tx_used_desc_q;
+ desc = &pep->p_tx_desc_area[tx_index];
+ cmd_sts = desc->cmd_sts;
+ if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
+ if (released > 0) {
+ goto txq_reclaim_end;
+ } else {
+ released = -1;
+ goto txq_reclaim_end;
+ }
+ }
+ pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
+ pep->tx_desc_count--;
+ addr = desc->buf_ptr;
+ count = desc->byte_cnt;
+ skb = pep->tx_skb[tx_index];
+ if (skb)
+ pep->tx_skb[tx_index] = NULL;
+
+ if (cmd_sts & TX_ERROR) {
+ if (net_ratelimit())
+ netdev_err(dev, "Error in TX\n");
+ dev->stats.tx_errors++;
+ }
+ dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ released++;
+ }
+txq_reclaim_end:
+ netif_tx_unlock(dev);
+ return released;
+}
+
+static void pxa168_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ netdev_info(dev, "TX timeout desc_count %d\n", pep->tx_desc_count);
+
+ schedule_work(&pep->tx_timeout_task);
+}
+
+static void pxa168_eth_tx_timeout_task(struct work_struct *work)
+{
+ struct pxa168_eth_private *pep = container_of(work,
+ struct pxa168_eth_private,
+ tx_timeout_task);
+ struct net_device *dev = pep->dev;
+ pxa168_eth_stop(dev);
+ pxa168_eth_open(dev);
+}
+
+static int rxq_process(struct net_device *dev, int budget)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int received_packets = 0;
+ struct sk_buff *skb;
+
+ while (budget-- > 0) {
+ int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+ struct rx_desc *rx_desc;
+ unsigned int cmd_sts;
+
+ /* Do not process Rx ring in case of Rx ring resource error */
+ if (pep->rx_resource_err)
+ break;
+ rx_curr_desc = pep->rx_curr_desc_q;
+ rx_used_desc = pep->rx_used_desc_q;
+ rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
+ cmd_sts = rx_desc->cmd_sts;
+ dma_rmb();
+ if (cmd_sts & (BUF_OWNED_BY_DMA))
+ break;
+ skb = pep->rx_skb[rx_curr_desc];
+ pep->rx_skb[rx_curr_desc] = NULL;
+
+ rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
+ pep->rx_curr_desc_q = rx_next_curr_desc;
+
+ /* Rx descriptors exhausted. */
+ /* Set the Rx ring resource error flag */
+ if (rx_next_curr_desc == rx_used_desc)
+ pep->rx_resource_err = 1;
+ pep->rx_desc_count--;
+ dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
+ rx_desc->buf_size,
+ DMA_FROM_DEVICE);
+ received_packets++;
+ /*
+ * Update statistics.
+ * Note byte count includes 4 byte CRC count
+ */
+ stats->rx_packets++;
+ stats->rx_bytes += rx_desc->byte_cnt;
+ /*
+ * In case received a packet without first / last bits on OR
+ * the error summary bit is on, the packets needs to be droped.
+ */
+ if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC))
+ || (cmd_sts & RX_ERROR)) {
+
+ stats->rx_dropped++;
+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC)) {
+ if (net_ratelimit())
+ netdev_err(dev,
+ "Rx pkt on multiple desc\n");
+ }
+ if (cmd_sts & RX_ERROR)
+ stats->rx_errors++;
+ dev_kfree_skb_irq(skb);
+ } else {
+ /*
+ * The -4 is for the CRC in the trailer of the
+ * received packet
+ */
+ skb_put(skb, rx_desc->byte_cnt - 4);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+ }
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ return received_packets;
+}
+
+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
+ struct net_device *dev)
+{
+ u32 icr;
+ int ret = 0;
+
+ icr = rdl(pep, INT_CAUSE);
+ if (icr == 0)
+ return IRQ_NONE;
+
+ wrl(pep, INT_CAUSE, ~icr);
+ if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
+ pep->work_todo |= WORK_TX_DONE;
+ ret = 1;
+ }
+ if (icr & ICR_RXBUF)
+ ret = 1;
+ return ret;
+}
+
+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (unlikely(!pxa168_eth_collect_events(pep, dev)))
+ return IRQ_NONE;
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ napi_schedule(&pep->napi);
+ return IRQ_HANDLED;
+}
+
+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ /*
+ * Reserve 2+14 bytes for an ethernet header (the hardware
+ * automatically prepends 2 bytes of dummy data to each
+ * received packet), 16 bytes for up to four VLAN tags, and
+ * 4 bytes for the trailing FCS -- 36 bytes total.
+ */
+ skb_size = pep->dev->mtu + 36;
+
+ /*
+ * Make sure that the skb size is a multiple of 8 bytes, as
+ * the lower three bits of the receive descriptor's buffer
+ * size field are ignored by the hardware.
+ */
+ pep->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ pep->skb_size += SKB_DMA_REALIGN;
+
+}
+
+static int set_port_config_ext(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ pxa168_eth_recalc_skb_size(pep);
+ if (pep->skb_size <= 1518)
+ skb_size = PCXR_MFL_1518;
+ else if (pep->skb_size <= 1536)
+ skb_size = PCXR_MFL_1536;
+ else if (pep->skb_size <= 2048)
+ skb_size = PCXR_MFL_2048;
+ else
+ skb_size = PCXR_MFL_64K;
+
+ /* Extended Port Configuration */
+ wrl(pep, PORT_CONFIG_EXT,
+ PCXR_AN_SPEED_DIS | /* Disable HW AN */
+ PCXR_AN_DUPLEX_DIS |
+ PCXR_AN_FLOWCTL_DIS |
+ PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+ PCXR_DSCP_EN | /* Enable DSCP in IP */
+ skb_size | PCXR_FLP | /* do not force link pass */
+ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
+
+ return 0;
+}
+
+static void pxa168_eth_adjust_link(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct phy_device *phy = dev->phydev;
+ u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
+ u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
+
+ cfg = cfg_o & ~PCR_DUPLEX_FULL;
+ cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN);
+
+ if (phy->interface == PHY_INTERFACE_MODE_RMII)
+ cfgext |= PCXR_RMII_EN;
+ if (phy->speed == SPEED_100)
+ cfgext |= PCXR_SPEED_100;
+ if (phy->duplex)
+ cfg |= PCR_DUPLEX_FULL;
+ if (!phy->pause)
+ cfgext |= PCXR_FLOWCTL_DIS;
+
+ /* Bail out if there has nothing changed */
+ if (cfg == cfg_o && cfgext == cfgext_o)
+ return;
+
+ wrl(pep, PORT_CONFIG, cfg);
+ wrl(pep, PORT_CONFIG_EXT, cfgext);
+
+ phy_print_status(phy);
+}
+
+static int pxa168_init_phy(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct ethtool_link_ksettings cmd;
+ struct phy_device *phy = NULL;
+ int err;
+
+ if (dev->phydev)
+ return 0;
+
+ phy = mdiobus_scan_c22(pep->smi_bus, pep->phy_addr);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link,
+ pep->phy_intf);
+ if (err)
+ return err;
+
+ cmd.base.phy_address = pep->phy_addr;
+ cmd.base.speed = pep->phy_speed;
+ cmd.base.duplex = pep->phy_duplex;
+ linkmode_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES);
+ cmd.base.autoneg = AUTONEG_ENABLE;
+
+ if (cmd.base.speed != 0)
+ cmd.base.autoneg = AUTONEG_DISABLE;
+
+ return phy_ethtool_set_link_ksettings(dev, &cmd);
+}
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep)
+{
+ int err = 0;
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+ /* Initialize address hash table */
+ err = init_hash_table(pep);
+ if (err)
+ return err;
+ /* SDMA configuration */
+ wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
+ SDCR_RIFB | /* Rx interrupt on frame */
+ SDCR_BLMT | /* Little endian transmit */
+ SDCR_BLMR | /* Little endian receive */
+ SDCR_RC_MAX_RETRANS); /* Max retransmit count */
+ /* Port Configuration */
+ wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
+ set_port_config_ext(pep);
+
+ return err;
+}
+
+static int rxq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct rx_desc *p_rx_desc;
+ int size = 0, i = 0;
+ int rx_desc_num = pep->rx_ring_size;
+
+ /* Allocate RX skb rings */
+ pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL);
+ if (!pep->rx_skb)
+ return -ENOMEM;
+
+ /* Allocate RX ring */
+ pep->rx_desc_count = 0;
+ size = pep->rx_ring_size * sizeof(struct rx_desc);
+ pep->rx_desc_area_size = size;
+ pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma,
+ GFP_KERNEL);
+ if (!pep->p_rx_desc_area)
+ goto out;
+
+ /* initialize the next_desc_ptr links in the Rx descriptors ring */
+ p_rx_desc = pep->p_rx_desc_area;
+ for (i = 0; i < rx_desc_num; i++) {
+ p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
+ ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
+ }
+ /* Save Rx desc pointer to driver struct. */
+ pep->rx_curr_desc_q = 0;
+ pep->rx_used_desc_q = 0;
+ pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
+ return 0;
+out:
+ kfree(pep->rx_skb);
+ return -ENOMEM;
+}
+
+static void rxq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int curr;
+
+ /* Free preallocated skb's on RX rings */
+ for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
+ if (pep->rx_skb[curr]) {
+ dev_kfree_skb(pep->rx_skb[curr]);
+ pep->rx_desc_count--;
+ }
+ }
+ if (pep->rx_desc_count)
+ netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n",
+ pep->rx_desc_count);
+ /* Free RX ring */
+ if (pep->p_rx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
+ pep->p_rx_desc_area, pep->rx_desc_dma);
+ kfree(pep->rx_skb);
+}
+
+static int txq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *p_tx_desc;
+ int size = 0, i = 0;
+ int tx_desc_num = pep->tx_ring_size;
+
+ pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL);
+ if (!pep->tx_skb)
+ return -ENOMEM;
+
+ /* Allocate TX ring */
+ pep->tx_desc_count = 0;
+ size = pep->tx_ring_size * sizeof(struct tx_desc);
+ pep->tx_desc_area_size = size;
+ pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma,
+ GFP_KERNEL);
+ if (!pep->p_tx_desc_area)
+ goto out;
+ /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+ p_tx_desc = pep->p_tx_desc_area;
+ for (i = 0; i < tx_desc_num; i++) {
+ p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
+ ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+ }
+ pep->tx_curr_desc_q = 0;
+ pep->tx_used_desc_q = 0;
+ pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
+ return 0;
+out:
+ kfree(pep->tx_skb);
+ return -ENOMEM;
+}
+
+static void txq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ /* Free outstanding skb's on TX ring */
+ txq_reclaim(dev, 1);
+ BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
+ /* Free TX ring */
+ if (pep->p_tx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
+ pep->p_tx_desc_area, pep->tx_desc_dma);
+ kfree(pep->tx_skb);
+}
+
+static int pxa168_eth_open(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = pxa168_init_phy(dev);
+ if (err)
+ return err;
+
+ err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
+ if (err) {
+ dev_err(&dev->dev, "can't assign irq\n");
+ return -EAGAIN;
+ }
+ pep->rx_resource_err = 0;
+ err = rxq_init(dev);
+ if (err != 0)
+ goto out_free_irq;
+ err = txq_init(dev);
+ if (err != 0)
+ goto out_free_rx_skb;
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+ netif_carrier_off(dev);
+ napi_enable(&pep->napi);
+ eth_port_start(dev);
+ return 0;
+out_free_rx_skb:
+ rxq_deinit(dev);
+out_free_irq:
+ free_irq(dev->irq, dev);
+ return err;
+}
+
+static int pxa168_eth_stop(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ eth_port_reset(dev);
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ napi_disable(&pep->napi);
+ del_timer_sync(&pep->timeout);
+ netif_carrier_off(dev);
+ free_irq(dev->irq, dev);
+ rxq_deinit(dev);
+ txq_deinit(dev);
+
+ return 0;
+}
+
+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ dev->mtu = mtu;
+ set_port_config_ext(pep);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /*
+ * Stop and then re-open the interface. This will allocate RX
+ * skbs of the new MTU.
+ * There is a possible danger that the open will not succeed,
+ * due to memory being full.
+ */
+ pxa168_eth_stop(dev);
+ if (pxa168_eth_open(dev)) {
+ dev_err(&dev->dev,
+ "fatal error on re-opening device after MTU change\n");
+ }
+
+ return 0;
+}
+
+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
+{
+ int tx_desc_curr;
+
+ tx_desc_curr = pep->tx_curr_desc_q;
+ pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
+ BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
+ pep->tx_desc_count++;
+
+ return tx_desc_curr;
+}
+
+static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct pxa168_eth_private *pep =
+ container_of(napi, struct pxa168_eth_private, napi);
+ struct net_device *dev = pep->dev;
+ int work_done = 0;
+
+ /*
+ * We call txq_reclaim every time since in NAPI interupts are disabled
+ * and due to this we miss the TX_DONE interrupt,which is not updated in
+ * interrupt status register.
+ */
+ txq_reclaim(dev, 0);
+ if (netif_queue_stopped(dev)
+ && pep->tx_ring_size - pep->tx_desc_count > 1) {
+ netif_wake_queue(dev);
+ }
+ work_done = rxq_process(dev, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ wrl(pep, INT_MASK, ALL_INTS);
+ }
+
+ return work_done;
+}
+
+static netdev_tx_t
+pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct tx_desc *desc;
+ int tx_index;
+ int length;
+
+ tx_index = eth_alloc_tx_desc_index(pep);
+ desc = &pep->p_tx_desc_area[tx_index];
+ length = skb->len;
+ pep->tx_skb[tx_index] = skb;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
+ DMA_TO_DEVICE);
+
+ skb_tx_timestamp(skb);
+
+ dma_wmb();
+ desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
+ TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
+ wmb();
+ wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
+
+ stats->tx_bytes += length;
+ stats->tx_packets++;
+ netif_trans_update(dev);
+ if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
+ /* We handled the current skb, but now we are out of space.*/
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int smi_wait_ready(struct pxa168_eth_private *pep)
+{
+ int i = 0;
+
+ /* wait for the SMI register to become available */
+ for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
+ if (i == PHY_WAIT_ITERATIONS)
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+ int i = 0;
+ int val;
+
+ if (smi_wait_ready(pep)) {
+ netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
+ /* now wait for the data to be valid */
+ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ netdev_warn(pep->dev,
+ "pxa168_eth: SMI bus read not valid\n");
+ return -ENODEV;
+ }
+ msleep(10);
+ }
+
+ return val & 0xffff;
+}
+
+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+
+ if (smi_wait_ready(pep)) {
+ netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
+ SMI_OP_W | (value & 0xffff));
+
+ if (smi_wait_ready(pep)) {
+ netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void pxa168_eth_netpoll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ pxa168_eth_int_handler(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static void pxa168_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strscpy(info->bus_info, "N/A", sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops pxa168_ethtool_ops = {
+ .get_drvinfo = pxa168_get_drvinfo,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct net_device_ops pxa168_eth_netdev_ops = {
+ .ndo_open = pxa168_eth_open,
+ .ndo_stop = pxa168_eth_stop,
+ .ndo_start_xmit = pxa168_eth_start_xmit,
+ .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
+ .ndo_set_mac_address = pxa168_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = phy_do_ioctl,
+ .ndo_change_mtu = pxa168_eth_change_mtu,
+ .ndo_tx_timeout = pxa168_eth_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pxa168_eth_netpoll,
+#endif
+};
+
+static int pxa168_eth_probe(struct platform_device *pdev)
+{
+ struct pxa168_eth_private *pep = NULL;
+ struct net_device *dev = NULL;
+ struct clk *clk;
+ struct device_node *np;
+ int err;
+
+ printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
+ return -ENODEV;
+ }
+ clk_prepare_enable(clk);
+
+ dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ pep = netdev_priv(dev);
+ pep->dev = dev;
+ pep->clk = clk;
+
+ pep->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pep->base)) {
+ err = PTR_ERR(pep->base);
+ goto err_netdev;
+ }
+
+ err = platform_get_irq(pdev, 0);
+ if (err == -EPROBE_DEFER)
+ goto err_netdev;
+ BUG_ON(dev->irq < 0);
+ dev->irq = err;
+ dev->netdev_ops = &pxa168_eth_netdev_ops;
+ dev->watchdog_timeo = 2 * HZ;
+ dev->base_addr = 0;
+ dev->ethtool_ops = &pxa168_ethtool_ops;
+
+ /* MTU range: 68 - 9500 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = 9500;
+
+ INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
+
+ err = of_get_ethdev_address(pdev->dev.of_node, dev);
+ if (err) {
+ u8 addr[ETH_ALEN];
+
+ /* try reading the mac address, if set by the bootloader */
+ pxa168_eth_get_mac_address(dev, addr);
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(dev, addr);
+ } else {
+ dev_info(&pdev->dev, "Using random mac address\n");
+ eth_hw_addr_random(dev);
+ }
+ }
+
+ pep->rx_ring_size = NUM_RX_DESCS;
+ pep->tx_ring_size = NUM_TX_DESCS;
+
+ pep->pd = dev_get_platdata(&pdev->dev);
+ if (pep->pd) {
+ if (pep->pd->rx_queue_size)
+ pep->rx_ring_size = pep->pd->rx_queue_size;
+
+ if (pep->pd->tx_queue_size)
+ pep->tx_ring_size = pep->pd->tx_queue_size;
+
+ pep->port_num = pep->pd->port_number;
+ pep->phy_addr = pep->pd->phy_addr;
+ pep->phy_speed = pep->pd->speed;
+ pep->phy_duplex = pep->pd->duplex;
+ pep->phy_intf = pep->pd->intf;
+
+ if (pep->pd->init)
+ pep->pd->init();
+ } else if (pdev->dev.of_node) {
+ of_property_read_u32(pdev->dev.of_node, "port-id",
+ &pep->port_num);
+
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "missing phy-handle\n");
+ err = -EINVAL;
+ goto err_netdev;
+ }
+ of_property_read_u32(np, "reg", &pep->phy_addr);
+ of_node_put(np);
+ err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf);
+ if (err && err != -ENODEV)
+ goto err_netdev;
+ }
+
+ /* Hardware supports only 3 ports */
+ BUG_ON(pep->port_num > 2);
+ netif_napi_add_weight(dev, &pep->napi, pxa168_rx_poll,
+ pep->rx_ring_size);
+
+ memset(&pep->timeout, 0, sizeof(struct timer_list));
+ timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
+
+ pep->smi_bus = mdiobus_alloc();
+ if (!pep->smi_bus) {
+ err = -ENOMEM;
+ goto err_netdev;
+ }
+ pep->smi_bus->priv = pep;
+ pep->smi_bus->name = "pxa168_eth smi";
+ pep->smi_bus->read = pxa168_smi_read;
+ pep->smi_bus->write = pxa168_smi_write;
+ snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ pdev->name, pdev->id);
+ pep->smi_bus->parent = &pdev->dev;
+ pep->smi_bus->phy_mask = 0xffffffff;
+ err = mdiobus_register(pep->smi_bus);
+ if (err)
+ goto err_free_mdio;
+
+ pep->pdev = pdev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ pxa168_init_hw(pep);
+ err = register_netdev(dev);
+ if (err)
+ goto err_mdiobus;
+ return 0;
+
+err_mdiobus:
+ mdiobus_unregister(pep->smi_bus);
+err_free_mdio:
+ mdiobus_free(pep->smi_bus);
+err_netdev:
+ free_netdev(dev);
+err_clk:
+ clk_disable_unprepare(clk);
+ return err;
+}
+
+static int pxa168_eth_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ cancel_work_sync(&pep->tx_timeout_task);
+ if (pep->htpr) {
+ dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+ pep->htpr, pep->htpr_dma);
+ pep->htpr = NULL;
+ }
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
+
+ clk_disable_unprepare(pep->clk);
+ mdiobus_unregister(pep->smi_bus);
+ mdiobus_free(pep->smi_bus);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return 0;
+}
+
+static void pxa168_eth_shutdown(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ eth_port_reset(dev);
+}
+
+#ifdef CONFIG_PM
+static int pxa168_eth_resume(struct platform_device *pdev)
+{
+ return -ENOSYS;
+}
+
+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+#else
+#define pxa168_eth_resume NULL
+#define pxa168_eth_suspend NULL
+#endif
+
+static const struct of_device_id pxa168_eth_of_match[] = {
+ { .compatible = "marvell,pxa168-eth" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
+
+static struct platform_driver pxa168_eth_driver = {
+ .probe = pxa168_eth_probe,
+ .remove = pxa168_eth_remove,
+ .shutdown = pxa168_eth_shutdown,
+ .resume = pxa168_eth_resume,
+ .suspend = pxa168_eth_suspend,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = pxa168_eth_of_match,
+ },
+};
+
+module_platform_driver(pxa168_eth_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
+MODULE_ALIAS("platform:pxa168_eth");
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
new file mode 100644
index 000000000..1b43704ba
--- /dev/null
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -0,0 +1,4195 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * New driver for Marvell Yukon chipset and SysKonnect Gigabit
+ * Ethernet adapters. Based on earlier sk98lin, e100 and
+ * FreeBSD if_sk drivers.
+ *
+ * This driver intentionally does not support all the features
+ * of the original driver such as link fail-over and link management because
+ * those should be done at higher levels.
+ *
+ * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/mii.h>
+#include <linux/slab.h>
+#include <linux/dmi.h>
+#include <linux/prefetch.h>
+#include <asm/irq.h>
+
+#include "skge.h"
+
+#define DRV_NAME "skge"
+#define DRV_VERSION "1.14"
+
+#define DEFAULT_TX_RING_SIZE 128
+#define DEFAULT_RX_RING_SIZE 512
+#define MAX_TX_RING_SIZE 1024
+#define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
+#define MAX_RX_RING_SIZE 4096
+#define RX_COPY_THRESHOLD 128
+#define RX_BUF_SIZE 1536
+#define PHY_RETRIES 1000
+#define ETH_JUMBO_MTU 9000
+#define TX_WATCHDOG (5 * HZ)
+#define BLINK_MS 250
+#define LINK_HZ HZ
+
+#define SKGE_EEPROM_MAGIC 0x9933aabb
+
+
+MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
+MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN);
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static const struct pci_device_id skge_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */
+ { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */
+#ifdef CONFIG_SKGE_GENESIS
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */
+#endif
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
+ { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */
+ { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */
+ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, skge_id_table);
+
+static int skge_up(struct net_device *dev);
+static int skge_down(struct net_device *dev);
+static void skge_phy_reset(struct skge_port *skge);
+static void skge_tx_clean(struct net_device *dev);
+static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
+static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
+static void genesis_get_stats(struct skge_port *skge, u64 *data);
+static void yukon_get_stats(struct skge_port *skge, u64 *data);
+static void yukon_init(struct skge_hw *hw, int port);
+static void genesis_mac_init(struct skge_hw *hw, int port);
+static void genesis_link_up(struct skge_port *skge);
+static void skge_set_multicast(struct net_device *dev);
+static irqreturn_t skge_intr(int irq, void *dev_id);
+
+/* Avoid conditionals by using array */
+static const int txqaddr[] = { Q_XA1, Q_XA2 };
+static const int rxqaddr[] = { Q_R1, Q_R2 };
+static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
+static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
+static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
+static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
+
+static inline bool is_genesis(const struct skge_hw *hw)
+{
+#ifdef CONFIG_SKGE_GENESIS
+ return hw->chip_id == CHIP_ID_GENESIS;
+#else
+ return false;
+#endif
+}
+
+static int skge_get_regs_len(struct net_device *dev)
+{
+ return 0x4000;
+}
+
+/*
+ * Returns copy of whole control register region
+ * Note: skip RAM address register because accessing it will
+ * cause bus hangs!
+ */
+static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ const struct skge_port *skge = netdev_priv(dev);
+ const void __iomem *io = skge->hw->regs;
+
+ regs->version = 1;
+ memset(p, 0, regs->len);
+ memcpy_fromio(p, io, B3_RAM_ADDR);
+
+ if (regs->len > B3_RI_WTO_R1) {
+ memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+ regs->len - B3_RI_WTO_R1);
+ }
+}
+
+/* Wake on Lan only supported on Yukon chips with rev 1 or above */
+static u32 wol_supported(const struct skge_hw *hw)
+{
+ if (is_genesis(hw))
+ return 0;
+
+ if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
+ return 0;
+
+ return WAKE_MAGIC | WAKE_PHY;
+}
+
+static void skge_wol_init(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 ctrl;
+
+ skge_write16(hw, B0_CTST, CS_RST_CLR);
+ skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+ /* Turn on Vaux */
+ skge_write8(hw, B0_POWER_CTRL,
+ PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
+
+ /* WA code for COMA mode -- clear PHY reset */
+ if (hw->chip_id == CHIP_ID_YUKON_LITE &&
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+ u32 reg = skge_read32(hw, B2_GP_IO);
+ reg |= GP_DIR_9;
+ reg &= ~GP_IO_9;
+ skge_write32(hw, B2_GP_IO, reg);
+ }
+
+ skge_write32(hw, SK_REG(port, GPHY_CTRL),
+ GPC_DIS_SLEEP |
+ GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
+ GPC_ANEG_1 | GPC_RST_SET);
+
+ skge_write32(hw, SK_REG(port, GPHY_CTRL),
+ GPC_DIS_SLEEP |
+ GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
+ GPC_ANEG_1 | GPC_RST_CLR);
+
+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+ /* Force to 10/100 skge_reset will re-enable on resume */
+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
+ (PHY_AN_100FULL | PHY_AN_100HALF |
+ PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA));
+ /* no 1000 HD/FD */
+ gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0);
+ gm_phy_write(hw, port, PHY_MARV_CTRL,
+ PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE |
+ PHY_CT_RE_CFG | PHY_CT_DUP_MD);
+
+
+ /* Set GMAC to no flow control and auto update for speed/duplex */
+ gma_write16(hw, port, GM_GP_CTRL,
+ GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
+ GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
+
+ /* Set WOL address */
+ memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
+ skge->netdev->dev_addr, ETH_ALEN);
+
+ /* Turn on appropriate WOL control bits */
+ skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
+ ctrl = 0;
+ if (skge->wol & WAKE_PHY)
+ ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
+ else
+ ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
+
+ if (skge->wol & WAKE_MAGIC)
+ ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
+ else
+ ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
+
+ ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
+ skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
+
+ /* block receiver */
+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+}
+
+static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ wol->supported = wol_supported(skge->hw);
+ wol->wolopts = skge->wol;
+}
+
+static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+
+ if ((wol->wolopts & ~wol_supported(hw)) ||
+ !device_can_wakeup(&hw->pdev->dev))
+ return -EOPNOTSUPP;
+
+ skge->wol = wol->wolopts;
+
+ device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
+
+ return 0;
+}
+
+/* Determine supported/advertised modes based on hardware.
+ * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
+ */
+static u32 skge_supported_modes(const struct skge_hw *hw)
+{
+ u32 supported;
+
+ if (hw->copper) {
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+
+ if (is_genesis(hw))
+ supported &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
+
+ else if (hw->chip_id == CHIP_ID_YUKON)
+ supported &= ~SUPPORTED_1000baseT_Half;
+ } else
+ supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ return supported;
+}
+
+static int skge_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ u32 supported, advertising;
+
+ supported = skge_supported_modes(hw);
+
+ if (hw->copper) {
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy_addr;
+ } else
+ cmd->base.port = PORT_FIBRE;
+
+ advertising = skge->advertising;
+ cmd->base.autoneg = skge->autoneg;
+ cmd->base.speed = skge->speed;
+ cmd->base.duplex = skge->duplex;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
+ return 0;
+}
+
+static int skge_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ const struct skge_hw *hw = skge->hw;
+ u32 supported = skge_supported_modes(hw);
+ int err = 0;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ advertising = supported;
+ skge->duplex = -1;
+ skge->speed = -1;
+ } else {
+ u32 setting;
+ u32 speed = cmd->base.speed;
+
+ switch (speed) {
+ case SPEED_1000:
+ if (cmd->base.duplex == DUPLEX_FULL)
+ setting = SUPPORTED_1000baseT_Full;
+ else if (cmd->base.duplex == DUPLEX_HALF)
+ setting = SUPPORTED_1000baseT_Half;
+ else
+ return -EINVAL;
+ break;
+ case SPEED_100:
+ if (cmd->base.duplex == DUPLEX_FULL)
+ setting = SUPPORTED_100baseT_Full;
+ else if (cmd->base.duplex == DUPLEX_HALF)
+ setting = SUPPORTED_100baseT_Half;
+ else
+ return -EINVAL;
+ break;
+
+ case SPEED_10:
+ if (cmd->base.duplex == DUPLEX_FULL)
+ setting = SUPPORTED_10baseT_Full;
+ else if (cmd->base.duplex == DUPLEX_HALF)
+ setting = SUPPORTED_10baseT_Half;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((setting & supported) == 0)
+ return -EINVAL;
+
+ skge->speed = speed;
+ skge->duplex = cmd->base.duplex;
+ }
+
+ skge->autoneg = cmd->base.autoneg;
+ skge->advertising = advertising;
+
+ if (netif_running(dev)) {
+ skge_down(dev);
+ err = skge_up(dev);
+ if (err) {
+ dev_close(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void skge_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(skge->hw->pdev),
+ sizeof(info->bus_info));
+}
+
+static const struct skge_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 xmac_offset;
+ u16 gma_offset;
+} skge_stats[] = {
+ { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI },
+ { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI },
+
+ { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK },
+ { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK },
+ { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK },
+ { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK },
+ { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK },
+ { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK },
+ { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE },
+ { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE },
+
+ { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL },
+ { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL },
+ { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL },
+ { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL },
+ { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR },
+ { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV },
+
+ { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
+ { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT },
+ { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG },
+ { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
+ { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR },
+};
+
+static int skge_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(skge_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void skge_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ if (is_genesis(skge->hw))
+ genesis_get_stats(skge, data);
+ else
+ yukon_get_stats(skge, data);
+}
+
+/* Use hardware MIB variables for critical path statistics and
+ * transmit feedback not reported at interrupt.
+ * Other errors are accounted for in interrupt handler.
+ */
+static struct net_device_stats *skge_get_stats(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ u64 data[ARRAY_SIZE(skge_stats)];
+
+ if (is_genesis(skge->hw))
+ genesis_get_stats(skge, data);
+ else
+ yukon_get_stats(skge, data);
+
+ dev->stats.tx_bytes = data[0];
+ dev->stats.rx_bytes = data[1];
+ dev->stats.tx_packets = data[2] + data[4] + data[6];
+ dev->stats.rx_packets = data[3] + data[5] + data[7];
+ dev->stats.multicast = data[3] + data[5];
+ dev->stats.collisions = data[10];
+ dev->stats.tx_aborted_errors = data[12];
+
+ return &dev->stats;
+}
+
+static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ skge_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static void skge_get_ring_param(struct net_device *dev,
+ struct ethtool_ringparam *p,
+ struct kernel_ethtool_ringparam *kernel_p,
+ struct netlink_ext_ack *extack)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ p->rx_max_pending = MAX_RX_RING_SIZE;
+ p->tx_max_pending = MAX_TX_RING_SIZE;
+
+ p->rx_pending = skge->rx_ring.count;
+ p->tx_pending = skge->tx_ring.count;
+}
+
+static int skge_set_ring_param(struct net_device *dev,
+ struct ethtool_ringparam *p,
+ struct kernel_ethtool_ringparam *kernel_p,
+ struct netlink_ext_ack *extack)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ int err = 0;
+
+ if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
+ p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
+ return -EINVAL;
+
+ skge->rx_ring.count = p->rx_pending;
+ skge->tx_ring.count = p->tx_pending;
+
+ if (netif_running(dev)) {
+ skge_down(dev);
+ err = skge_up(dev);
+ if (err)
+ dev_close(dev);
+ }
+
+ return err;
+}
+
+static u32 skge_get_msglevel(struct net_device *netdev)
+{
+ struct skge_port *skge = netdev_priv(netdev);
+ return skge->msg_enable;
+}
+
+static void skge_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct skge_port *skge = netdev_priv(netdev);
+ skge->msg_enable = value;
+}
+
+static int skge_nway_reset(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
+ return -EINVAL;
+
+ skge_phy_reset(skge);
+ return 0;
+}
+
+static void skge_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) ||
+ (skge->flow_control == FLOW_MODE_SYM_OR_REM));
+ ecmd->tx_pause = (ecmd->rx_pause ||
+ (skge->flow_control == FLOW_MODE_LOC_SEND));
+
+ ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause;
+}
+
+static int skge_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct ethtool_pauseparam old;
+ int err = 0;
+
+ skge_get_pauseparam(dev, &old);
+
+ if (ecmd->autoneg != old.autoneg)
+ skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC;
+ else {
+ if (ecmd->rx_pause && ecmd->tx_pause)
+ skge->flow_control = FLOW_MODE_SYMMETRIC;
+ else if (ecmd->rx_pause && !ecmd->tx_pause)
+ skge->flow_control = FLOW_MODE_SYM_OR_REM;
+ else if (!ecmd->rx_pause && ecmd->tx_pause)
+ skge->flow_control = FLOW_MODE_LOC_SEND;
+ else
+ skge->flow_control = FLOW_MODE_NONE;
+ }
+
+ if (netif_running(dev)) {
+ skge_down(dev);
+ err = skge_up(dev);
+ if (err) {
+ dev_close(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* Chip internal frequency for clock calculations */
+static inline u32 hwkhz(const struct skge_hw *hw)
+{
+ return is_genesis(hw) ? 53125 : 78125;
+}
+
+/* Chip HZ to microseconds */
+static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks)
+{
+ return (ticks * 1000) / hwkhz(hw);
+}
+
+/* Microseconds to chip HZ */
+static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
+{
+ return hwkhz(hw) * usec / 1000;
+}
+
+static int skge_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+
+ ecmd->rx_coalesce_usecs = 0;
+ ecmd->tx_coalesce_usecs = 0;
+
+ if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) {
+ u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI));
+ u32 msk = skge_read32(hw, B2_IRQM_MSK);
+
+ if (msk & rxirqmask[port])
+ ecmd->rx_coalesce_usecs = delay;
+ if (msk & txirqmask[port])
+ ecmd->tx_coalesce_usecs = delay;
+ }
+
+ return 0;
+}
+
+/* Note: interrupt timer is per board, but can turn on/off per port */
+static int skge_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u32 msk = skge_read32(hw, B2_IRQM_MSK);
+ u32 delay = 25;
+
+ if (ecmd->rx_coalesce_usecs == 0)
+ msk &= ~rxirqmask[port];
+ else if (ecmd->rx_coalesce_usecs < 25 ||
+ ecmd->rx_coalesce_usecs > 33333)
+ return -EINVAL;
+ else {
+ msk |= rxirqmask[port];
+ delay = ecmd->rx_coalesce_usecs;
+ }
+
+ if (ecmd->tx_coalesce_usecs == 0)
+ msk &= ~txirqmask[port];
+ else if (ecmd->tx_coalesce_usecs < 25 ||
+ ecmd->tx_coalesce_usecs > 33333)
+ return -EINVAL;
+ else {
+ msk |= txirqmask[port];
+ delay = min(delay, ecmd->rx_coalesce_usecs);
+ }
+
+ skge_write32(hw, B2_IRQM_MSK, msk);
+ if (msk == 0)
+ skge_write32(hw, B2_IRQM_CTRL, TIM_STOP);
+ else {
+ skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay));
+ skge_write32(hw, B2_IRQM_CTRL, TIM_START);
+ }
+ return 0;
+}
+
+enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
+static void skge_led(struct skge_port *skge, enum led_mode mode)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+
+ spin_lock_bh(&hw->phy_lock);
+ if (is_genesis(hw)) {
+ switch (mode) {
+ case LED_MODE_OFF:
+ if (hw->phy_type == SK_PHY_BCOM)
+ xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
+ else {
+ skge_write32(hw, SK_REG(port, TX_LED_VAL), 0);
+ skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF);
+ }
+ skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
+ skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
+ skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
+ break;
+
+ case LED_MODE_ON:
+ skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
+ skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
+
+ skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
+ skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
+
+ break;
+
+ case LED_MODE_TST:
+ skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
+ skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
+ skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
+
+ if (hw->phy_type == SK_PHY_BCOM)
+ xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
+ else {
+ skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON);
+ skge_write32(hw, SK_REG(port, TX_LED_VAL), 100);
+ skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
+ }
+
+ }
+ } else {
+ switch (mode) {
+ case LED_MODE_OFF:
+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+ PHY_M_LED_MO_DUP(MO_LED_OFF) |
+ PHY_M_LED_MO_10(MO_LED_OFF) |
+ PHY_M_LED_MO_100(MO_LED_OFF) |
+ PHY_M_LED_MO_1000(MO_LED_OFF) |
+ PHY_M_LED_MO_RX(MO_LED_OFF));
+ break;
+ case LED_MODE_ON:
+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
+ PHY_M_LED_PULS_DUR(PULS_170MS) |
+ PHY_M_LED_BLINK_RT(BLINK_84MS) |
+ PHY_M_LEDC_TX_CTRL |
+ PHY_M_LEDC_DP_CTRL);
+
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+ PHY_M_LED_MO_RX(MO_LED_OFF) |
+ (skge->speed == SPEED_100 ?
+ PHY_M_LED_MO_100(MO_LED_ON) : 0));
+ break;
+ case LED_MODE_TST:
+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+ PHY_M_LED_MO_DUP(MO_LED_ON) |
+ PHY_M_LED_MO_10(MO_LED_ON) |
+ PHY_M_LED_MO_100(MO_LED_ON) |
+ PHY_M_LED_MO_1000(MO_LED_ON) |
+ PHY_M_LED_MO_RX(MO_LED_ON));
+ }
+ }
+ spin_unlock_bh(&hw->phy_lock);
+}
+
+/* blink LED's for finding board */
+static int skge_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 2; /* cycle on/off twice per second */
+
+ case ETHTOOL_ID_ON:
+ skge_led(skge, LED_MODE_TST);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ skge_led(skge, LED_MODE_OFF);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* back to regular LED state */
+ skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
+ }
+
+ return 0;
+}
+
+static int skge_get_eeprom_len(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ u32 reg2;
+
+ pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
+ return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
+}
+
+static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
+{
+ u32 val;
+
+ pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
+
+ do {
+ pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
+ } while (!(offset & PCI_VPD_ADDR_F));
+
+ pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
+ return val;
+}
+
+static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
+{
+ pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
+ pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
+ offset | PCI_VPD_ADDR_F);
+
+ do {
+ pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
+ } while (offset & PCI_VPD_ADDR_F);
+}
+
+static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct pci_dev *pdev = skge->hw->pdev;
+ int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
+ int length = eeprom->len;
+ u16 offset = eeprom->offset;
+
+ if (!cap)
+ return -EINVAL;
+
+ eeprom->magic = SKGE_EEPROM_MAGIC;
+
+ while (length > 0) {
+ u32 val = skge_vpd_read(pdev, cap, offset);
+ int n = min_t(int, length, sizeof(val));
+
+ memcpy(data, &val, n);
+ length -= n;
+ data += n;
+ offset += n;
+ }
+ return 0;
+}
+
+static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct pci_dev *pdev = skge->hw->pdev;
+ int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
+ int length = eeprom->len;
+ u16 offset = eeprom->offset;
+
+ if (!cap)
+ return -EINVAL;
+
+ if (eeprom->magic != SKGE_EEPROM_MAGIC)
+ return -EINVAL;
+
+ while (length > 0) {
+ u32 val;
+ int n = min_t(int, length, sizeof(val));
+
+ if (n < sizeof(val))
+ val = skge_vpd_read(pdev, cap, offset);
+ memcpy(&val, data, n);
+
+ skge_vpd_write(pdev, cap, offset, val);
+
+ length -= n;
+ data += n;
+ offset += n;
+ }
+ return 0;
+}
+
+static const struct ethtool_ops skge_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = skge_get_drvinfo,
+ .get_regs_len = skge_get_regs_len,
+ .get_regs = skge_get_regs,
+ .get_wol = skge_get_wol,
+ .set_wol = skge_set_wol,
+ .get_msglevel = skge_get_msglevel,
+ .set_msglevel = skge_set_msglevel,
+ .nway_reset = skge_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = skge_get_eeprom_len,
+ .get_eeprom = skge_get_eeprom,
+ .set_eeprom = skge_set_eeprom,
+ .get_ringparam = skge_get_ring_param,
+ .set_ringparam = skge_set_ring_param,
+ .get_pauseparam = skge_get_pauseparam,
+ .set_pauseparam = skge_set_pauseparam,
+ .get_coalesce = skge_get_coalesce,
+ .set_coalesce = skge_set_coalesce,
+ .get_strings = skge_get_strings,
+ .set_phys_id = skge_set_phys_id,
+ .get_sset_count = skge_get_sset_count,
+ .get_ethtool_stats = skge_get_ethtool_stats,
+ .get_link_ksettings = skge_get_link_ksettings,
+ .set_link_ksettings = skge_set_link_ksettings,
+};
+
+/*
+ * Allocate ring elements and chain them together
+ * One-to-one association of board descriptors with ring elements
+ */
+static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
+{
+ struct skge_tx_desc *d;
+ struct skge_element *e;
+ int i;
+
+ ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL);
+ if (!ring->start)
+ return -ENOMEM;
+
+ for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
+ e->desc = d;
+ if (i == ring->count - 1) {
+ e->next = ring->start;
+ d->next_offset = base;
+ } else {
+ e->next = e + 1;
+ d->next_offset = base + (i+1) * sizeof(*d);
+ }
+ }
+ ring->to_use = ring->to_clean = ring->start;
+
+ return 0;
+}
+
+/* Allocate and setup a new buffer for receiving */
+static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+ struct sk_buff *skb, unsigned int bufsize)
+{
+ struct skge_rx_desc *rd = e->desc;
+ dma_addr_t map;
+
+ map = dma_map_single(&skge->hw->pdev->dev, skb->data, bufsize,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&skge->hw->pdev->dev, map))
+ return -1;
+
+ rd->dma_lo = lower_32_bits(map);
+ rd->dma_hi = upper_32_bits(map);
+ e->skb = skb;
+ rd->csum1_start = ETH_HLEN;
+ rd->csum2_start = ETH_HLEN;
+ rd->csum1 = 0;
+ rd->csum2 = 0;
+
+ wmb();
+
+ rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, bufsize);
+ return 0;
+}
+
+/* Resume receiving using existing skb,
+ * Note: DMA address is not changed by chip.
+ * MTU not changed while receiver active.
+ */
+static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
+{
+ struct skge_rx_desc *rd = e->desc;
+
+ rd->csum2 = 0;
+ rd->csum2_start = ETH_HLEN;
+
+ wmb();
+
+ rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
+}
+
+
+/* Free all buffers in receive ring, assumes receiver stopped */
+static void skge_rx_clean(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ struct skge_ring *ring = &skge->rx_ring;
+ struct skge_element *e;
+
+ e = ring->start;
+ do {
+ struct skge_rx_desc *rd = e->desc;
+ rd->control = 0;
+ if (e->skb) {
+ dma_unmap_single(&hw->pdev->dev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(e->skb);
+ e->skb = NULL;
+ }
+ } while ((e = e->next) != ring->start);
+}
+
+
+/* Allocate buffers for receive ring
+ * For receive: to_clean is next received frame.
+ */
+static int skge_rx_fill(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_ring *ring = &skge->rx_ring;
+ struct skge_element *e;
+
+ e = ring->start;
+ do {
+ struct sk_buff *skb;
+
+ skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
+ GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+ } while ((e = e->next) != ring->start);
+
+ ring->to_clean = ring->start;
+ return 0;
+}
+
+static const char *skge_pause(enum pause_status status)
+{
+ switch (status) {
+ case FLOW_STAT_NONE:
+ return "none";
+ case FLOW_STAT_REM_SEND:
+ return "rx only";
+ case FLOW_STAT_LOC_SEND:
+ return "tx_only";
+ case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */
+ return "both";
+ default:
+ return "indeterminated";
+ }
+}
+
+
+static void skge_link_up(struct skge_port *skge)
+{
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
+ LED_BLK_OFF|LED_SYNC_OFF|LED_REG_ON);
+
+ netif_carrier_on(skge->netdev);
+ netif_wake_queue(skge->netdev);
+
+ netif_info(skge, link, skge->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ skge->speed,
+ skge->duplex == DUPLEX_FULL ? "full" : "half",
+ skge_pause(skge->flow_status));
+}
+
+static void skge_link_down(struct skge_port *skge)
+{
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF);
+ netif_carrier_off(skge->netdev);
+ netif_stop_queue(skge->netdev);
+
+ netif_info(skge, link, skge->netdev, "Link is down\n");
+}
+
+static void xm_link_down(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+
+ xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
+
+ if (netif_carrier_ok(dev))
+ skge_link_down(skge);
+}
+
+static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
+{
+ int i;
+
+ xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
+ *val = xm_read16(hw, port, XM_PHY_DATA);
+
+ if (hw->phy_type == SK_PHY_XMAC)
+ goto ready;
+
+ for (i = 0; i < PHY_RETRIES; i++) {
+ if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
+ goto ready;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+ ready:
+ *val = xm_read16(hw, port, XM_PHY_DATA);
+
+ return 0;
+}
+
+static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
+{
+ u16 v = 0;
+ if (__xm_phy_read(hw, port, reg, &v))
+ pr_warn("%s: phy read timed out\n", hw->dev[port]->name);
+ return v;
+}
+
+static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
+{
+ int i;
+
+ xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
+ for (i = 0; i < PHY_RETRIES; i++) {
+ if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
+ goto ready;
+ udelay(1);
+ }
+ return -EIO;
+
+ ready:
+ xm_write16(hw, port, XM_PHY_DATA, val);
+ for (i = 0; i < PHY_RETRIES; i++) {
+ if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static void genesis_init(struct skge_hw *hw)
+{
+ /* set blink source counter */
+ skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
+ skge_write8(hw, B2_BSC_CTRL, BSC_START);
+
+ /* configure mac arbiter */
+ skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
+
+ /* configure mac arbiter timeout values */
+ skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
+ skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
+ skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
+ skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
+
+ skge_write8(hw, B3_MA_RCINI_RX1, 0);
+ skge_write8(hw, B3_MA_RCINI_RX2, 0);
+ skge_write8(hw, B3_MA_RCINI_TX1, 0);
+ skge_write8(hw, B3_MA_RCINI_TX2, 0);
+
+ /* configure packet arbiter timeout */
+ skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
+ skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
+ skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
+ skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
+ skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
+}
+
+static void genesis_reset(struct skge_hw *hw, int port)
+{
+ static const u8 zero[8] = { 0 };
+ u32 reg;
+
+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+
+ /* reset the statistics module */
+ xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
+ xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
+ xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
+ xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
+ xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
+
+ /* disable Broadcom PHY IRQ */
+ if (hw->phy_type == SK_PHY_BCOM)
+ xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
+
+ xm_outhash(hw, port, XM_HSM, zero);
+
+ /* Flush TX and RX fifo */
+ reg = xm_read32(hw, port, XM_MODE);
+ xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF);
+ xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF);
+}
+
+/* Convert mode to MII values */
+static const u16 phy_pause_map[] = {
+ [FLOW_MODE_NONE] = 0,
+ [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
+ [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
+ [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
+};
+
+/* special defines for FIBER (88E1011S only) */
+static const u16 fiber_pause_map[] = {
+ [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE,
+ [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD,
+ [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD,
+ [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD,
+};
+
+
+/* Check status of Broadcom phy link */
+static void bcom_check_link(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+ u16 status;
+
+ /* read twice because of latch */
+ xm_phy_read(hw, port, PHY_BCOM_STAT);
+ status = xm_phy_read(hw, port, PHY_BCOM_STAT);
+
+ if ((status & PHY_ST_LSYNC) == 0) {
+ xm_link_down(hw, port);
+ return;
+ }
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ u16 lpa, aux;
+
+ if (!(status & PHY_ST_AN_OVER))
+ return;
+
+ lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
+ if (lpa & PHY_B_AN_RF) {
+ netdev_notice(dev, "remote fault\n");
+ return;
+ }
+
+ aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
+
+ /* Check Duplex mismatch */
+ switch (aux & PHY_B_AS_AN_RES_MSK) {
+ case PHY_B_RES_1000FD:
+ skge->duplex = DUPLEX_FULL;
+ break;
+ case PHY_B_RES_1000HD:
+ skge->duplex = DUPLEX_HALF;
+ break;
+ default:
+ netdev_notice(dev, "duplex mismatch\n");
+ return;
+ }
+
+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
+ switch (aux & PHY_B_AS_PAUSE_MSK) {
+ case PHY_B_AS_PAUSE_MSK:
+ skge->flow_status = FLOW_STAT_SYMMETRIC;
+ break;
+ case PHY_B_AS_PRR:
+ skge->flow_status = FLOW_STAT_REM_SEND;
+ break;
+ case PHY_B_AS_PRT:
+ skge->flow_status = FLOW_STAT_LOC_SEND;
+ break;
+ default:
+ skge->flow_status = FLOW_STAT_NONE;
+ }
+ skge->speed = SPEED_1000;
+ }
+
+ if (!netif_carrier_ok(dev))
+ genesis_link_up(skge);
+}
+
+/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
+ * Phy on for 100 or 10Mbit operation
+ */
+static void bcom_phy_init(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ int i;
+ u16 id1, r, ext, ctl;
+
+ /* magic workaround patterns for Broadcom */
+ static const struct {
+ u16 reg;
+ u16 val;
+ } A1hack[] = {
+ { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
+ { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
+ { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
+ { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
+ }, C0hack[] = {
+ { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
+ { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
+ };
+
+ /* read Id from external PHY (all have the same address) */
+ id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
+
+ /* Optimize MDIO transfer by suppressing preamble. */
+ r = xm_read16(hw, port, XM_MMU_CMD);
+ r |= XM_MMU_NO_PRE;
+ xm_write16(hw, port, XM_MMU_CMD, r);
+
+ switch (id1) {
+ case PHY_BCOM_ID1_C0:
+ /*
+ * Workaround BCOM Errata for the C0 type.
+ * Write magic patterns to reserved registers.
+ */
+ for (i = 0; i < ARRAY_SIZE(C0hack); i++)
+ xm_phy_write(hw, port,
+ C0hack[i].reg, C0hack[i].val);
+
+ break;
+ case PHY_BCOM_ID1_A1:
+ /*
+ * Workaround BCOM Errata for the A1 type.
+ * Write magic patterns to reserved registers.
+ */
+ for (i = 0; i < ARRAY_SIZE(A1hack); i++)
+ xm_phy_write(hw, port,
+ A1hack[i].reg, A1hack[i].val);
+ break;
+ }
+
+ /*
+ * Workaround BCOM Errata (#10523) for all BCom PHYs.
+ * Disable Power Management after reset.
+ */
+ r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
+ r |= PHY_B_AC_DIS_PM;
+ xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
+
+ /* Dummy read */
+ xm_read16(hw, port, XM_ISRC);
+
+ ext = PHY_B_PEC_EN_LTR; /* enable tx led */
+ ctl = PHY_CT_SP1000; /* always 1000mbit */
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ /*
+ * Workaround BCOM Errata #1 for the C5 type.
+ * 1000Base-T Link Acquisition Failure in Slave Mode
+ * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
+ */
+ u16 adv = PHY_B_1000C_RD;
+ if (skge->advertising & ADVERTISED_1000baseT_Half)
+ adv |= PHY_B_1000C_AHD;
+ if (skge->advertising & ADVERTISED_1000baseT_Full)
+ adv |= PHY_B_1000C_AFD;
+ xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
+
+ ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+ } else {
+ if (skge->duplex == DUPLEX_FULL)
+ ctl |= PHY_CT_DUP_MD;
+ /* Force to slave */
+ xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
+ }
+
+ /* Set autonegotiation pause parameters */
+ xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
+ phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
+
+ /* Handle Jumbo frames */
+ if (hw->dev[port]->mtu > ETH_DATA_LEN) {
+ xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
+ PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
+
+ ext |= PHY_B_PEC_HIGH_LA;
+
+ }
+
+ xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
+ xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
+
+ /* Use link status change interrupt */
+ xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
+}
+
+static void xm_phy_init(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 ctrl = 0;
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ if (skge->advertising & ADVERTISED_1000baseT_Half)
+ ctrl |= PHY_X_AN_HD;
+ if (skge->advertising & ADVERTISED_1000baseT_Full)
+ ctrl |= PHY_X_AN_FD;
+
+ ctrl |= fiber_pause_map[skge->flow_control];
+
+ xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
+
+ /* Restart Auto-negotiation */
+ ctrl = PHY_CT_ANE | PHY_CT_RE_CFG;
+ } else {
+ /* Set DuplexMode in Config register */
+ if (skge->duplex == DUPLEX_FULL)
+ ctrl |= PHY_CT_DUP_MD;
+ /*
+ * Do NOT enable Auto-negotiation here. This would hold
+ * the link down because no IDLEs are transmitted
+ */
+ }
+
+ xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
+
+ /* Poll PHY for status changes */
+ mod_timer(&skge->link_timer, jiffies + LINK_HZ);
+}
+
+static int xm_check_link(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 status;
+
+ /* read twice because of latch */
+ xm_phy_read(hw, port, PHY_XMAC_STAT);
+ status = xm_phy_read(hw, port, PHY_XMAC_STAT);
+
+ if ((status & PHY_ST_LSYNC) == 0) {
+ xm_link_down(hw, port);
+ return 0;
+ }
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ u16 lpa, res;
+
+ if (!(status & PHY_ST_AN_OVER))
+ return 0;
+
+ lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
+ if (lpa & PHY_B_AN_RF) {
+ netdev_notice(dev, "remote fault\n");
+ return 0;
+ }
+
+ res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI);
+
+ /* Check Duplex mismatch */
+ switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) {
+ case PHY_X_RS_FD:
+ skge->duplex = DUPLEX_FULL;
+ break;
+ case PHY_X_RS_HD:
+ skge->duplex = DUPLEX_HALF;
+ break;
+ default:
+ netdev_notice(dev, "duplex mismatch\n");
+ return 0;
+ }
+
+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
+ if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
+ skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
+ (lpa & PHY_X_P_SYM_MD))
+ skge->flow_status = FLOW_STAT_SYMMETRIC;
+ else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
+ (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
+ /* Enable PAUSE receive, disable PAUSE transmit */
+ skge->flow_status = FLOW_STAT_REM_SEND;
+ else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
+ (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
+ /* Disable PAUSE receive, enable PAUSE transmit */
+ skge->flow_status = FLOW_STAT_LOC_SEND;
+ else
+ skge->flow_status = FLOW_STAT_NONE;
+
+ skge->speed = SPEED_1000;
+ }
+
+ if (!netif_carrier_ok(dev))
+ genesis_link_up(skge);
+ return 1;
+}
+
+/* Poll to check for link coming up.
+ *
+ * Since internal PHY is wired to a level triggered pin, can't
+ * get an interrupt when carrier is detected, need to poll for
+ * link coming up.
+ */
+static void xm_link_timer(struct timer_list *t)
+{
+ struct skge_port *skge = from_timer(skge, t, link_timer);
+ struct net_device *dev = skge->netdev;
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ int i;
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return;
+
+ spin_lock_irqsave(&hw->phy_lock, flags);
+
+ /*
+ * Verify that the link by checking GPIO register three times.
+ * This pin has the signal from the link_sync pin connected to it.
+ */
+ for (i = 0; i < 3; i++) {
+ if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS)
+ goto link_down;
+ }
+
+ /* Re-enable interrupt to detect link down */
+ if (xm_check_link(dev)) {
+ u16 msk = xm_read16(hw, port, XM_IMSK);
+ msk &= ~XM_IS_INP_ASS;
+ xm_write16(hw, port, XM_IMSK, msk);
+ xm_read16(hw, port, XM_ISRC);
+ } else {
+link_down:
+ mod_timer(&skge->link_timer,
+ round_jiffies(jiffies + LINK_HZ));
+ }
+ spin_unlock_irqrestore(&hw->phy_lock, flags);
+}
+
+static void genesis_mac_init(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+ int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
+ int i;
+ u32 r;
+ static const u8 zero[6] = { 0 };
+
+ for (i = 0; i < 10; i++) {
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
+ MFF_SET_MAC_RST);
+ if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
+ goto reset_ok;
+ udelay(1);
+ }
+
+ netdev_warn(dev, "genesis reset failed\n");
+
+ reset_ok:
+ /* Unreset the XMAC. */
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
+
+ /*
+ * Perform additional initialization for external PHYs,
+ * namely for the 1000baseTX cards that use the XMAC's
+ * GMII mode.
+ */
+ if (hw->phy_type != SK_PHY_XMAC) {
+ /* Take external Phy out of reset */
+ r = skge_read32(hw, B2_GP_IO);
+ if (port == 0)
+ r |= GP_DIR_0|GP_IO_0;
+ else
+ r |= GP_DIR_2|GP_IO_2;
+
+ skge_write32(hw, B2_GP_IO, r);
+
+ /* Enable GMII interface */
+ xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
+ }
+
+
+ switch (hw->phy_type) {
+ case SK_PHY_XMAC:
+ xm_phy_init(skge);
+ break;
+ case SK_PHY_BCOM:
+ bcom_phy_init(skge);
+ bcom_check_link(hw, port);
+ }
+
+ /* Set Station Address */
+ xm_outaddr(hw, port, XM_SA, dev->dev_addr);
+
+ /* We don't use match addresses so clear */
+ for (i = 1; i < 16; i++)
+ xm_outaddr(hw, port, XM_EXM(i), zero);
+
+ /* Clear MIB counters */
+ xm_write16(hw, port, XM_STAT_CMD,
+ XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+ /* Clear two times according to Errata #3 */
+ xm_write16(hw, port, XM_STAT_CMD,
+ XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+
+ /* configure Rx High Water Mark (XM_RX_HI_WM) */
+ xm_write16(hw, port, XM_RX_HI_WM, 1450);
+
+ /* We don't need the FCS appended to the packet. */
+ r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
+ if (jumbo)
+ r |= XM_RX_BIG_PK_OK;
+
+ if (skge->duplex == DUPLEX_HALF) {
+ /*
+ * If in manual half duplex mode the other side might be in
+ * full duplex mode, so ignore if a carrier extension is not seen
+ * on frames received
+ */
+ r |= XM_RX_DIS_CEXT;
+ }
+ xm_write16(hw, port, XM_RX_CMD, r);
+
+ /* We want short frames padded to 60 bytes. */
+ xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
+
+ /* Increase threshold for jumbo frames on dual port */
+ if (hw->ports > 1 && jumbo)
+ xm_write16(hw, port, XM_TX_THR, 1020);
+ else
+ xm_write16(hw, port, XM_TX_THR, 512);
+
+ /*
+ * Enable the reception of all error frames. This is
+ * a necessary evil due to the design of the XMAC. The
+ * XMAC's receive FIFO is only 8K in size, however jumbo
+ * frames can be up to 9000 bytes in length. When bad
+ * frame filtering is enabled, the XMAC's RX FIFO operates
+ * in 'store and forward' mode. For this to work, the
+ * entire frame has to fit into the FIFO, but that means
+ * that jumbo frames larger than 8192 bytes will be
+ * truncated. Disabling all bad frame filtering causes
+ * the RX FIFO to operate in streaming mode, in which
+ * case the XMAC will start transferring frames out of the
+ * RX FIFO as soon as the FIFO threshold is reached.
+ */
+ xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
+
+
+ /*
+ * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
+ * - Enable all bits excepting 'Octets Rx OK Low CntOv'
+ * and 'Octets Rx OK Hi Cnt Ov'.
+ */
+ xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
+
+ /*
+ * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
+ * - Enable all bits excepting 'Octets Tx OK Low CntOv'
+ * and 'Octets Tx OK Hi Cnt Ov'.
+ */
+ xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
+
+ /* Configure MAC arbiter */
+ skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
+
+ /* configure timeout values */
+ skge_write8(hw, B3_MA_TOINI_RX1, 72);
+ skge_write8(hw, B3_MA_TOINI_RX2, 72);
+ skge_write8(hw, B3_MA_TOINI_TX1, 72);
+ skge_write8(hw, B3_MA_TOINI_TX2, 72);
+
+ skge_write8(hw, B3_MA_RCINI_RX1, 0);
+ skge_write8(hw, B3_MA_RCINI_RX2, 0);
+ skge_write8(hw, B3_MA_RCINI_TX1, 0);
+ skge_write8(hw, B3_MA_RCINI_TX2, 0);
+
+ /* Configure Rx MAC FIFO */
+ skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
+ skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
+
+ /* Configure Tx MAC FIFO */
+ skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
+ skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
+
+ if (jumbo) {
+ /* Enable frame flushing if jumbo frames used */
+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH);
+ } else {
+ /* enable timeout timers if normal frames */
+ skge_write16(hw, B3_PA_CTRL,
+ (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
+ }
+}
+
+static void genesis_stop(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ unsigned retries = 1000;
+ u16 cmd;
+
+ /* Disable Tx and Rx */
+ cmd = xm_read16(hw, port, XM_MMU_CMD);
+ cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
+ xm_write16(hw, port, XM_MMU_CMD, cmd);
+
+ genesis_reset(hw, port);
+
+ /* Clear Tx packet arbiter timeout IRQ */
+ skge_write16(hw, B3_PA_CTRL,
+ port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
+
+ /* Reset the MAC */
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
+ do {
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
+ if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST))
+ break;
+ } while (--retries > 0);
+
+ /* For external PHYs there must be special handling */
+ if (hw->phy_type != SK_PHY_XMAC) {
+ u32 reg = skge_read32(hw, B2_GP_IO);
+ if (port == 0) {
+ reg |= GP_DIR_0;
+ reg &= ~GP_IO_0;
+ } else {
+ reg |= GP_DIR_2;
+ reg &= ~GP_IO_2;
+ }
+ skge_write32(hw, B2_GP_IO, reg);
+ skge_read32(hw, B2_GP_IO);
+ }
+
+ xm_write16(hw, port, XM_MMU_CMD,
+ xm_read16(hw, port, XM_MMU_CMD)
+ & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
+
+ xm_read16(hw, port, XM_MMU_CMD);
+}
+
+
+static void genesis_get_stats(struct skge_port *skge, u64 *data)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ int i;
+ unsigned long timeout = jiffies + HZ;
+
+ xm_write16(hw, port,
+ XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
+
+ /* wait for update to complete */
+ while (xm_read16(hw, port, XM_STAT_CMD)
+ & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
+ if (time_after(jiffies, timeout))
+ break;
+ udelay(10);
+ }
+
+ /* special case for 64 bit octet counter */
+ data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
+ | xm_read32(hw, port, XM_TXO_OK_LO);
+ data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
+ | xm_read32(hw, port, XM_RXO_OK_LO);
+
+ for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
+ data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
+}
+
+static void genesis_mac_intr(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+ u16 status = xm_read16(hw, port, XM_ISRC);
+
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
+
+ if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
+ xm_link_down(hw, port);
+ mod_timer(&skge->link_timer, jiffies + 1);
+ }
+
+ if (status & XM_IS_TXF_UR) {
+ xm_write32(hw, port, XM_MODE, XM_MD_FTF);
+ ++dev->stats.tx_fifo_errors;
+ }
+}
+
+static void genesis_link_up(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 cmd, msk;
+ u32 mode;
+
+ cmd = xm_read16(hw, port, XM_MMU_CMD);
+
+ /*
+ * enabling pause frame reception is required for 1000BT
+ * because the XMAC is not reset if the link is going down
+ */
+ if (skge->flow_status == FLOW_STAT_NONE ||
+ skge->flow_status == FLOW_STAT_LOC_SEND)
+ /* Disable Pause Frame Reception */
+ cmd |= XM_MMU_IGN_PF;
+ else
+ /* Enable Pause Frame Reception */
+ cmd &= ~XM_MMU_IGN_PF;
+
+ xm_write16(hw, port, XM_MMU_CMD, cmd);
+
+ mode = xm_read32(hw, port, XM_MODE);
+ if (skge->flow_status == FLOW_STAT_SYMMETRIC ||
+ skge->flow_status == FLOW_STAT_LOC_SEND) {
+ /*
+ * Configure Pause Frame Generation
+ * Use internal and external Pause Frame Generation.
+ * Sending pause frames is edge triggered.
+ * Send a Pause frame with the maximum pause time if
+ * internal oder external FIFO full condition occurs.
+ * Send a zero pause time frame to re-start transmission.
+ */
+ /* XM_PAUSE_DA = '010000C28001' (default) */
+ /* XM_MAC_PTIME = 0xffff (maximum) */
+ /* remember this value is defined in big endian (!) */
+ xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
+
+ mode |= XM_PAUSE_MODE;
+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
+ } else {
+ /*
+ * disable pause frame generation is required for 1000BT
+ * because the XMAC is not reset if the link is going down
+ */
+ /* Disable Pause Mode in Mode Register */
+ mode &= ~XM_PAUSE_MODE;
+
+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
+ }
+
+ xm_write32(hw, port, XM_MODE, mode);
+
+ /* Turn on detection of Tx underrun */
+ msk = xm_read16(hw, port, XM_IMSK);
+ msk &= ~XM_IS_TXF_UR;
+ xm_write16(hw, port, XM_IMSK, msk);
+
+ xm_read16(hw, port, XM_ISRC);
+
+ /* get MMU Command Reg. */
+ cmd = xm_read16(hw, port, XM_MMU_CMD);
+ if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
+ cmd |= XM_MMU_GMII_FD;
+
+ /*
+ * Workaround BCOM Errata (#10523) for all BCom Phys
+ * Enable Power Management after link up
+ */
+ if (hw->phy_type == SK_PHY_BCOM) {
+ xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
+ xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
+ & ~PHY_B_AC_DIS_PM);
+ xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
+ }
+
+ /* enable Rx/Tx */
+ xm_write16(hw, port, XM_MMU_CMD,
+ cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
+ skge_link_up(skge);
+}
+
+
+static inline void bcom_phy_intr(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 isrc;
+
+ isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x\n", isrc);
+
+ if (isrc & PHY_B_IS_PSE)
+ pr_err("%s: uncorrectable pair swap error\n",
+ hw->dev[port]->name);
+
+ /* Workaround BCom Errata:
+ * enable and disable loopback mode if "NO HCD" occurs.
+ */
+ if (isrc & PHY_B_IS_NO_HDCL) {
+ u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
+ xm_phy_write(hw, port, PHY_BCOM_CTRL,
+ ctrl | PHY_CT_LOOP);
+ xm_phy_write(hw, port, PHY_BCOM_CTRL,
+ ctrl & ~PHY_CT_LOOP);
+ }
+
+ if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
+ bcom_check_link(hw, port);
+
+}
+
+static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
+{
+ int i;
+
+ gma_write16(hw, port, GM_SMI_DATA, val);
+ gma_write16(hw, port, GM_SMI_CTRL,
+ GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
+ for (i = 0; i < PHY_RETRIES; i++) {
+ udelay(1);
+
+ if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
+ return 0;
+ }
+
+ pr_warn("%s: phy write timeout\n", hw->dev[port]->name);
+ return -EIO;
+}
+
+static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
+{
+ int i;
+
+ gma_write16(hw, port, GM_SMI_CTRL,
+ GM_SMI_CT_PHY_AD(hw->phy_addr)
+ | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
+
+ for (i = 0; i < PHY_RETRIES; i++) {
+ udelay(1);
+ if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
+ goto ready;
+ }
+
+ return -ETIMEDOUT;
+ ready:
+ *val = gma_read16(hw, port, GM_SMI_DATA);
+ return 0;
+}
+
+static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
+{
+ u16 v = 0;
+ if (__gm_phy_read(hw, port, reg, &v))
+ pr_warn("%s: phy read timeout\n", hw->dev[port]->name);
+ return v;
+}
+
+/* Marvell Phy Initialization */
+static void yukon_init(struct skge_hw *hw, int port)
+{
+ struct skge_port *skge = netdev_priv(hw->dev[port]);
+ u16 ctrl, ct1000, adv;
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+
+ ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
+ PHY_M_EC_MAC_S_MSK);
+ ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
+
+ ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
+ }
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
+ if (skge->autoneg == AUTONEG_DISABLE)
+ ctrl &= ~PHY_CT_ANE;
+
+ ctrl |= PHY_CT_RESET;
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+ ctrl = 0;
+ ct1000 = 0;
+ adv = PHY_AN_CSMA;
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ if (hw->copper) {
+ if (skge->advertising & ADVERTISED_1000baseT_Full)
+ ct1000 |= PHY_M_1000C_AFD;
+ if (skge->advertising & ADVERTISED_1000baseT_Half)
+ ct1000 |= PHY_M_1000C_AHD;
+ if (skge->advertising & ADVERTISED_100baseT_Full)
+ adv |= PHY_M_AN_100_FD;
+ if (skge->advertising & ADVERTISED_100baseT_Half)
+ adv |= PHY_M_AN_100_HD;
+ if (skge->advertising & ADVERTISED_10baseT_Full)
+ adv |= PHY_M_AN_10_FD;
+ if (skge->advertising & ADVERTISED_10baseT_Half)
+ adv |= PHY_M_AN_10_HD;
+
+ /* Set Flow-control capabilities */
+ adv |= phy_pause_map[skge->flow_control];
+ } else {
+ if (skge->advertising & ADVERTISED_1000baseT_Full)
+ adv |= PHY_M_AN_1000X_AFD;
+ if (skge->advertising & ADVERTISED_1000baseT_Half)
+ adv |= PHY_M_AN_1000X_AHD;
+
+ adv |= fiber_pause_map[skge->flow_control];
+ }
+
+ /* Restart Auto-negotiation */
+ ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+ } else {
+ /* forced speed/duplex settings */
+ ct1000 = PHY_M_1000C_MSE;
+
+ if (skge->duplex == DUPLEX_FULL)
+ ctrl |= PHY_CT_DUP_MD;
+
+ switch (skge->speed) {
+ case SPEED_1000:
+ ctrl |= PHY_CT_SP1000;
+ break;
+ case SPEED_100:
+ ctrl |= PHY_CT_SP100;
+ break;
+ }
+
+ ctrl |= PHY_CT_RESET;
+ }
+
+ gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
+
+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+ /* Enable phy interrupt on autonegotiation complete (or link up) */
+ if (skge->autoneg == AUTONEG_ENABLE)
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
+ else
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
+}
+
+static void yukon_reset(struct skge_hw *hw, int port)
+{
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
+ gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
+ gma_write16(hw, port, GM_MC_ADDR_H2, 0);
+ gma_write16(hw, port, GM_MC_ADDR_H3, 0);
+ gma_write16(hw, port, GM_MC_ADDR_H4, 0);
+
+ gma_write16(hw, port, GM_RX_CTRL,
+ gma_read16(hw, port, GM_RX_CTRL)
+ | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+}
+
+/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
+static int is_yukon_lite_a0(struct skge_hw *hw)
+{
+ u32 reg;
+ int ret;
+
+ if (hw->chip_id != CHIP_ID_YUKON)
+ return 0;
+
+ reg = skge_read32(hw, B2_FAR);
+ skge_write8(hw, B2_FAR + 3, 0xff);
+ ret = (skge_read8(hw, B2_FAR + 3) != 0);
+ skge_write32(hw, B2_FAR, reg);
+ return ret;
+}
+
+static void yukon_mac_init(struct skge_hw *hw, int port)
+{
+ struct skge_port *skge = netdev_priv(hw->dev[port]);
+ int i;
+ u32 reg;
+ const u8 *addr = hw->dev[port]->dev_addr;
+
+ /* WA code for COMA mode -- set PHY reset */
+ if (hw->chip_id == CHIP_ID_YUKON_LITE &&
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+ reg = skge_read32(hw, B2_GP_IO);
+ reg |= GP_DIR_9 | GP_IO_9;
+ skge_write32(hw, B2_GP_IO, reg);
+ }
+
+ /* hard reset */
+ skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+
+ /* WA code for COMA mode -- clear PHY reset */
+ if (hw->chip_id == CHIP_ID_YUKON_LITE &&
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+ reg = skge_read32(hw, B2_GP_IO);
+ reg |= GP_DIR_9;
+ reg &= ~GP_IO_9;
+ skge_write32(hw, B2_GP_IO, reg);
+ }
+
+ /* Set hardware config mode */
+ reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
+ GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
+ reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
+
+ /* Clear GMC reset */
+ skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
+ skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
+
+ if (skge->autoneg == AUTONEG_DISABLE) {
+ reg = GM_GPCR_AU_ALL_DIS;
+ gma_write16(hw, port, GM_GP_CTRL,
+ gma_read16(hw, port, GM_GP_CTRL) | reg);
+
+ switch (skge->speed) {
+ case SPEED_1000:
+ reg &= ~GM_GPCR_SPEED_100;
+ reg |= GM_GPCR_SPEED_1000;
+ break;
+ case SPEED_100:
+ reg &= ~GM_GPCR_SPEED_1000;
+ reg |= GM_GPCR_SPEED_100;
+ break;
+ case SPEED_10:
+ reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
+ break;
+ }
+
+ if (skge->duplex == DUPLEX_FULL)
+ reg |= GM_GPCR_DUP_FULL;
+ } else
+ reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
+
+ switch (skge->flow_control) {
+ case FLOW_MODE_NONE:
+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+ reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+ break;
+ case FLOW_MODE_LOC_SEND:
+ /* disable Rx flow-control */
+ reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+ break;
+ case FLOW_MODE_SYMMETRIC:
+ case FLOW_MODE_SYM_OR_REM:
+ /* enable Tx & Rx flow-control */
+ break;
+ }
+
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+ skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+ yukon_init(hw, port);
+
+ /* MIB clear */
+ reg = gma_read16(hw, port, GM_PHY_ADDR);
+ gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
+
+ for (i = 0; i < GM_MIB_CNT_SIZE; i++)
+ gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
+ gma_write16(hw, port, GM_PHY_ADDR, reg);
+
+ /* transmit control */
+ gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
+
+ /* receive control reg: unicast + multicast + no FCS */
+ gma_write16(hw, port, GM_RX_CTRL,
+ GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
+
+ /* transmit flow control */
+ gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
+
+ /* transmit parameter */
+ gma_write16(hw, port, GM_TX_PARAM,
+ TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
+ TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
+ TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
+
+ /* configure the Serial Mode Register */
+ reg = DATA_BLIND_VAL(DATA_BLIND_DEF)
+ | GM_SMOD_VLAN_ENA
+ | IPG_DATA_VAL(IPG_DATA_DEF);
+
+ if (hw->dev[port]->mtu > ETH_DATA_LEN)
+ reg |= GM_SMOD_JUMBO_ENA;
+
+ gma_write16(hw, port, GM_SERIAL_MODE, reg);
+
+ /* physical address: used for pause frames */
+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
+ /* virtual address for data */
+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
+
+ /* enable interrupt mask for counter overflows */
+ gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
+ gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
+ gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
+
+ /* Initialize Mac Fifo */
+
+ /* Configure Rx MAC FIFO */
+ skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
+ reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
+
+ /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
+ if (is_yukon_lite_a0(hw))
+ reg &= ~GMF_RX_F_FL_ON;
+
+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
+ skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
+ /*
+ * because Pause Packet Truncation in GMAC is not working
+ * we have to increase the Flush Threshold to 64 bytes
+ * in order to flush pause packets in Rx FIFO on Yukon-1
+ */
+ skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
+
+ /* Configure Tx MAC FIFO */
+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
+ skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
+}
+
+/* Go into power down mode */
+static void yukon_suspend(struct skge_hw *hw, int port)
+{
+ u16 ctrl;
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ ctrl |= PHY_M_PC_POL_R_DIS;
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
+ ctrl |= PHY_CT_RESET;
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+ /* switch IEEE compatible power down mode on */
+ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
+ ctrl |= PHY_CT_PDOWN;
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+}
+
+static void yukon_stop(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+
+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+ yukon_reset(hw, port);
+
+ gma_write16(hw, port, GM_GP_CTRL,
+ gma_read16(hw, port, GM_GP_CTRL)
+ & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
+ gma_read16(hw, port, GM_GP_CTRL);
+
+ yukon_suspend(hw, port);
+
+ /* set GPHY Control reset */
+ skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+ skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+}
+
+static void yukon_get_stats(struct skge_port *skge, u64 *data)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ int i;
+
+ data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
+ | gma_read32(hw, port, GM_TXO_OK_LO);
+ data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
+ | gma_read32(hw, port, GM_RXO_OK_LO);
+
+ for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
+ data[i] = gma_read32(hw, port,
+ skge_stats[i].gma_offset);
+}
+
+static void yukon_mac_intr(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+ u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
+
+ if (status & GM_IS_RX_FF_OR) {
+ ++dev->stats.rx_fifo_errors;
+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
+ }
+
+ if (status & GM_IS_TX_FF_UR) {
+ ++dev->stats.tx_fifo_errors;
+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
+ }
+
+}
+
+static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
+{
+ switch (aux & PHY_M_PS_SPEED_MSK) {
+ case PHY_M_PS_SPEED_1000:
+ return SPEED_1000;
+ case PHY_M_PS_SPEED_100:
+ return SPEED_100;
+ default:
+ return SPEED_10;
+ }
+}
+
+static void yukon_link_up(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 reg;
+
+ /* Enable Transmit FIFO Underrun */
+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
+
+ reg = gma_read16(hw, port, GM_GP_CTRL);
+ if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
+ reg |= GM_GPCR_DUP_FULL;
+
+ /* enable Rx/Tx */
+ reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
+ skge_link_up(skge);
+}
+
+static void yukon_link_down(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 ctrl;
+
+ ctrl = gma_read16(hw, port, GM_GP_CTRL);
+ ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
+ gma_write16(hw, port, GM_GP_CTRL, ctrl);
+
+ if (skge->flow_status == FLOW_STAT_REM_SEND) {
+ ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
+ ctrl |= PHY_M_AN_ASP;
+ /* restore Asymmetric Pause bit */
+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
+ }
+
+ skge_link_down(skge);
+
+ yukon_init(hw, port);
+}
+
+static void yukon_phy_intr(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ const char *reason = NULL;
+ u16 istatus, phystat;
+
+ istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
+ phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
+
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x 0x%x\n", istatus, phystat);
+
+ if (istatus & PHY_M_IS_AN_COMPL) {
+ if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
+ & PHY_M_AN_RF) {
+ reason = "remote fault";
+ goto failed;
+ }
+
+ if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
+ reason = "master/slave fault";
+ goto failed;
+ }
+
+ if (!(phystat & PHY_M_PS_SPDUP_RES)) {
+ reason = "speed/duplex";
+ goto failed;
+ }
+
+ skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
+ ? DUPLEX_FULL : DUPLEX_HALF;
+ skge->speed = yukon_speed(hw, phystat);
+
+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
+ switch (phystat & PHY_M_PS_PAUSE_MSK) {
+ case PHY_M_PS_PAUSE_MSK:
+ skge->flow_status = FLOW_STAT_SYMMETRIC;
+ break;
+ case PHY_M_PS_RX_P_EN:
+ skge->flow_status = FLOW_STAT_REM_SEND;
+ break;
+ case PHY_M_PS_TX_P_EN:
+ skge->flow_status = FLOW_STAT_LOC_SEND;
+ break;
+ default:
+ skge->flow_status = FLOW_STAT_NONE;
+ }
+
+ if (skge->flow_status == FLOW_STAT_NONE ||
+ (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
+ skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+ else
+ skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+ yukon_link_up(skge);
+ return;
+ }
+
+ if (istatus & PHY_M_IS_LSP_CHANGE)
+ skge->speed = yukon_speed(hw, phystat);
+
+ if (istatus & PHY_M_IS_DUP_CHANGE)
+ skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
+ if (istatus & PHY_M_IS_LST_CHANGE) {
+ if (phystat & PHY_M_PS_LINK_UP)
+ yukon_link_up(skge);
+ else
+ yukon_link_down(skge);
+ }
+ return;
+ failed:
+ pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason);
+
+ /* XXX restart autonegotiation? */
+}
+
+static void skge_phy_reset(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ struct net_device *dev = hw->dev[port];
+
+ netif_stop_queue(skge->netdev);
+ netif_carrier_off(skge->netdev);
+
+ spin_lock_bh(&hw->phy_lock);
+ if (is_genesis(hw)) {
+ genesis_reset(hw, port);
+ genesis_mac_init(hw, port);
+ } else {
+ yukon_reset(hw, port);
+ yukon_init(hw, port);
+ }
+ spin_unlock_bh(&hw->phy_lock);
+
+ skge_set_multicast(dev);
+}
+
+/* Basic MII support */
+static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int err = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -ENODEV; /* Phy still in reset */
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = hw->phy_addr;
+
+ fallthrough;
+ case SIOCGMIIREG: {
+ u16 val = 0;
+ spin_lock_bh(&hw->phy_lock);
+
+ if (is_genesis(hw))
+ err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
+ else
+ err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
+ spin_unlock_bh(&hw->phy_lock);
+ data->val_out = val;
+ break;
+ }
+
+ case SIOCSMIIREG:
+ spin_lock_bh(&hw->phy_lock);
+ if (is_genesis(hw))
+ err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
+ data->val_in);
+ else
+ err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
+ data->val_in);
+ spin_unlock_bh(&hw->phy_lock);
+ break;
+ }
+ return err;
+}
+
+static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
+{
+ u32 end;
+
+ start /= 8;
+ len /= 8;
+ end = start + len - 1;
+
+ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
+ skge_write32(hw, RB_ADDR(q, RB_START), start);
+ skge_write32(hw, RB_ADDR(q, RB_WP), start);
+ skge_write32(hw, RB_ADDR(q, RB_RP), start);
+ skge_write32(hw, RB_ADDR(q, RB_END), end);
+
+ if (q == Q_R1 || q == Q_R2) {
+ /* Set thresholds on receive queue's */
+ skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
+ start + (2*len)/3);
+ skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
+ start + (len/3));
+ } else {
+ /* Enable store & forward on Tx queue's because
+ * Tx FIFO is only 4K on Genesis and 1K on Yukon
+ */
+ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
+ }
+
+ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
+}
+
+/* Setup Bus Memory Interface */
+static void skge_qset(struct skge_port *skge, u16 q,
+ const struct skge_element *e)
+{
+ struct skge_hw *hw = skge->hw;
+ u32 watermark = 0x600;
+ u64 base = skge->dma + (e->desc - skge->mem);
+
+ /* optimization to reduce window on 32bit/33mhz */
+ if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
+ watermark /= 2;
+
+ skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
+ skge_write32(hw, Q_ADDR(q, Q_F), watermark);
+ skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
+ skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
+}
+
+static int skge_up(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u32 chunk, ram_addr;
+ size_t rx_size, tx_size;
+ int err;
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return -EINVAL;
+
+ netif_info(skge, ifup, skge->netdev, "enabling interface\n");
+
+ if (dev->mtu > RX_BUF_SIZE)
+ skge->rx_buf_size = dev->mtu + ETH_HLEN;
+ else
+ skge->rx_buf_size = RX_BUF_SIZE;
+
+
+ rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
+ tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
+ skge->mem_size = tx_size + rx_size;
+ skge->mem = dma_alloc_coherent(&hw->pdev->dev, skge->mem_size,
+ &skge->dma, GFP_KERNEL);
+ if (!skge->mem)
+ return -ENOMEM;
+
+ BUG_ON(skge->dma & 7);
+
+ if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
+ dev_err(&hw->pdev->dev, "dma_alloc_coherent region crosses 4G boundary\n");
+ err = -EINVAL;
+ goto free_pci_mem;
+ }
+
+ err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
+ if (err)
+ goto free_pci_mem;
+
+ err = skge_rx_fill(dev);
+ if (err)
+ goto free_rx_ring;
+
+ err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
+ skge->dma + rx_size);
+ if (err)
+ goto free_rx_ring;
+
+ if (hw->ports == 1) {
+ err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED,
+ dev->name, hw);
+ if (err) {
+ netdev_err(dev, "Unable to allocate interrupt %d error: %d\n",
+ hw->pdev->irq, err);
+ goto free_tx_ring;
+ }
+ }
+
+ /* Initialize MAC */
+ netif_carrier_off(dev);
+ spin_lock_bh(&hw->phy_lock);
+ if (is_genesis(hw))
+ genesis_mac_init(hw, port);
+ else
+ yukon_mac_init(hw, port);
+ spin_unlock_bh(&hw->phy_lock);
+
+ /* Configure RAMbuffers - equally between ports and tx/rx */
+ chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2);
+ ram_addr = hw->ram_offset + 2 * chunk * port;
+
+ skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
+ skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
+
+ BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
+ skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
+ skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
+
+ /* Start receiver BMU */
+ wmb();
+ skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
+ skge_led(skge, LED_MODE_ON);
+
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask |= portmask[port];
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+ spin_unlock_irq(&hw->hw_lock);
+
+ napi_enable(&skge->napi);
+
+ skge_set_multicast(dev);
+
+ return 0;
+
+ free_tx_ring:
+ kfree(skge->tx_ring.start);
+ free_rx_ring:
+ skge_rx_clean(skge);
+ kfree(skge->rx_ring.start);
+ free_pci_mem:
+ dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem,
+ skge->dma);
+ skge->mem = NULL;
+
+ return err;
+}
+
+/* stop receiver */
+static void skge_rx_stop(struct skge_hw *hw, int port)
+{
+ skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
+ skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
+ RB_RST_SET|RB_DIS_OP_MD);
+ skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
+}
+
+static int skge_down(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+
+ if (!skge->mem)
+ return 0;
+
+ netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
+
+ netif_tx_disable(dev);
+
+ if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)
+ del_timer_sync(&skge->link_timer);
+
+ napi_disable(&skge->napi);
+ netif_carrier_off(dev);
+
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask &= ~portmask[port];
+ skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+ spin_unlock_irq(&hw->hw_lock);
+
+ if (hw->ports == 1)
+ free_irq(hw->pdev->irq, hw);
+
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF);
+ if (is_genesis(hw))
+ genesis_stop(skge);
+ else
+ yukon_stop(skge);
+
+ /* Stop transmitter */
+ skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
+ skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
+ RB_RST_SET|RB_DIS_OP_MD);
+
+
+ /* Disable Force Sync bit and Enable Alloc bit */
+ skge_write8(hw, SK_REG(port, TXA_CTRL),
+ TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
+
+ /* Stop Interval Timer and Limit Counter of Tx Arbiter */
+ skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
+ skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
+
+ /* Reset PCI FIFO */
+ skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
+ skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
+
+ /* Reset the RAM Buffer async Tx queue */
+ skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
+
+ skge_rx_stop(hw, port);
+
+ if (is_genesis(hw)) {
+ skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
+ skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
+ } else {
+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+ }
+
+ skge_led(skge, LED_MODE_OFF);
+
+ netif_tx_lock_bh(dev);
+ skge_tx_clean(dev);
+ netif_tx_unlock_bh(dev);
+
+ skge_rx_clean(skge);
+
+ kfree(skge->rx_ring.start);
+ kfree(skge->tx_ring.start);
+ dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem,
+ skge->dma);
+ skge->mem = NULL;
+ return 0;
+}
+
+static inline int skge_avail(const struct skge_ring *ring)
+{
+ smp_mb();
+ return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
+ + (ring->to_clean - ring->to_use) - 1;
+}
+
+static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ struct skge_element *e;
+ struct skge_tx_desc *td;
+ int i;
+ u32 control, len;
+ dma_addr_t map;
+
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
+ return NETDEV_TX_BUSY;
+
+ e = skge->tx_ring.to_use;
+ td = e->desc;
+ BUG_ON(td->control & BMU_OWN);
+ e->skb = skb;
+ len = skb_headlen(skb);
+ map = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&hw->pdev->dev, map))
+ goto mapping_error;
+
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, len);
+
+ td->dma_lo = lower_32_bits(map);
+ td->dma_hi = upper_32_bits(map);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const int offset = skb_checksum_start_offset(skb);
+
+ /* This seems backwards, but it is what the sk98lin
+ * does. Looks like hardware is wrong?
+ */
+ if (ipip_hdr(skb)->protocol == IPPROTO_UDP &&
+ hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
+ control = BMU_TCP_CHECK;
+ else
+ control = BMU_UDP_CHECK;
+
+ td->csum_offs = 0;
+ td->csum_start = offset;
+ td->csum_write = offset + skb->csum_offset;
+ } else
+ control = BMU_CHECK;
+
+ if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
+ control |= BMU_EOF | BMU_IRQ_EOF;
+ else {
+ struct skge_tx_desc *tf = td;
+
+ control |= BMU_STFWD;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ if (dma_mapping_error(&hw->pdev->dev, map))
+ goto mapping_unwind;
+
+ e = e->next;
+ e->skb = skb;
+ tf = e->desc;
+ BUG_ON(tf->control & BMU_OWN);
+
+ tf->dma_lo = lower_32_bits(map);
+ tf->dma_hi = upper_32_bits(map);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, skb_frag_size(frag));
+
+ tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag);
+ }
+ tf->control |= BMU_EOF | BMU_IRQ_EOF;
+ }
+ /* Make sure all the descriptors written */
+ wmb();
+ td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
+ wmb();
+
+ netdev_sent_queue(dev, skb->len);
+
+ skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
+
+ netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
+ "tx queued, slot %td, len %d\n",
+ e - skge->tx_ring.start, skb->len);
+
+ skge->tx_ring.to_use = e->next;
+ smp_wmb();
+
+ if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
+ netdev_dbg(dev, "transmit queue full\n");
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+
+mapping_unwind:
+ e = skge->tx_ring.to_use;
+ dma_unmap_single(&hw->pdev->dev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen), DMA_TO_DEVICE);
+ while (i-- > 0) {
+ e = e->next;
+ dma_unmap_page(&hw->pdev->dev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen), DMA_TO_DEVICE);
+ }
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+
+/* Free resources associated with this reing element */
+static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
+ u32 control)
+{
+ /* skb header vs. fragment */
+ if (control & BMU_STF)
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen), DMA_TO_DEVICE);
+ else
+ dma_unmap_page(&pdev->dev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen), DMA_TO_DEVICE);
+}
+
+/* Free all buffers in transmit ring */
+static void skge_tx_clean(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_element *e;
+
+ for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
+ struct skge_tx_desc *td = e->desc;
+
+ skge_tx_unmap(skge->hw->pdev, e, td->control);
+
+ if (td->control & BMU_EOF)
+ dev_kfree_skb(e->skb);
+ td->control = 0;
+ }
+
+ netdev_reset_queue(dev);
+ skge->tx_ring.to_clean = e;
+}
+
+static void skge_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n");
+
+ skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
+ skge_tx_clean(dev);
+ netif_wake_queue(dev);
+}
+
+static int skge_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int err;
+
+ if (!netif_running(dev)) {
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ skge_down(dev);
+
+ dev->mtu = new_mtu;
+
+ err = skge_up(dev);
+ if (err)
+ dev_close(dev);
+
+ return err;
+}
+
+static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
+
+static void genesis_add_filter(u8 filter[8], const u8 *addr)
+{
+ u32 crc, bit;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+ bit = ~crc & 0x3f;
+ filter[bit/8] |= 1 << (bit%8);
+}
+
+static void genesis_set_multicast(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ struct netdev_hw_addr *ha;
+ u32 mode;
+ u8 filter[8];
+
+ mode = xm_read32(hw, port, XM_MODE);
+ mode |= XM_MD_ENA_HASH;
+ if (dev->flags & IFF_PROMISC)
+ mode |= XM_MD_ENA_PROM;
+ else
+ mode &= ~XM_MD_ENA_PROM;
+
+ if (dev->flags & IFF_ALLMULTI)
+ memset(filter, 0xff, sizeof(filter));
+ else {
+ memset(filter, 0, sizeof(filter));
+
+ if (skge->flow_status == FLOW_STAT_REM_SEND ||
+ skge->flow_status == FLOW_STAT_SYMMETRIC)
+ genesis_add_filter(filter, pause_mc_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ genesis_add_filter(filter, ha->addr);
+ }
+
+ xm_write32(hw, port, XM_MODE, mode);
+ xm_outhash(hw, port, XM_HSM, filter);
+}
+
+static void yukon_add_filter(u8 filter[8], const u8 *addr)
+{
+ u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
+
+ filter[bit / 8] |= 1 << (bit % 8);
+}
+
+static void yukon_set_multicast(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ struct netdev_hw_addr *ha;
+ int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
+ skge->flow_status == FLOW_STAT_SYMMETRIC);
+ u16 reg;
+ u8 filter[8];
+
+ memset(filter, 0, sizeof(filter));
+
+ reg = gma_read16(hw, port, GM_RX_CTRL);
+ reg |= GM_RXCR_UCF_ENA;
+
+ if (dev->flags & IFF_PROMISC) /* promiscuous */
+ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+ else if (dev->flags & IFF_ALLMULTI) /* all multicast */
+ memset(filter, 0xff, sizeof(filter));
+ else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
+ reg &= ~GM_RXCR_MCF_ENA;
+ else {
+ reg |= GM_RXCR_MCF_ENA;
+
+ if (rx_pause)
+ yukon_add_filter(filter, pause_mc_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ yukon_add_filter(filter, ha->addr);
+ }
+
+
+ gma_write16(hw, port, GM_MC_ADDR_H1,
+ (u16)filter[0] | ((u16)filter[1] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H2,
+ (u16)filter[2] | ((u16)filter[3] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H3,
+ (u16)filter[4] | ((u16)filter[5] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H4,
+ (u16)filter[6] | ((u16)filter[7] << 8));
+
+ gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+static inline u16 phy_length(const struct skge_hw *hw, u32 status)
+{
+ if (is_genesis(hw))
+ return status >> XMR_FS_LEN_SHIFT;
+ else
+ return status >> GMR_FS_LEN_SHIFT;
+}
+
+static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
+{
+ if (is_genesis(hw))
+ return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
+ else
+ return (status & GMR_FS_ANY_ERR) ||
+ (status & GMR_FS_RX_OK) == 0;
+}
+
+static void skge_set_multicast(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ if (is_genesis(skge->hw))
+ genesis_set_multicast(dev);
+ else
+ yukon_set_multicast(dev);
+
+}
+
+
+/* Get receive buffer from descriptor.
+ * Handles copy of small buffers and reallocation failures
+ */
+static struct sk_buff *skge_rx_get(struct net_device *dev,
+ struct skge_element *e,
+ u32 control, u32 status, u16 csum)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct sk_buff *skb;
+ u16 len = control & BMU_BBC;
+
+ netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev,
+ "rx slot %td status 0x%x len %d\n",
+ e - skge->rx_ring.start, status, len);
+
+ if (len > skge->rx_buf_size)
+ goto error;
+
+ if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
+ goto error;
+
+ if (bad_phy_status(skge->hw, status))
+ goto error;
+
+ if (phy_length(skge->hw, status) != len)
+ goto error;
+
+ if (len < RX_COPY_THRESHOLD) {
+ skb = netdev_alloc_skb_ip_align(dev, len);
+ if (!skb)
+ goto resubmit;
+
+ dma_sync_single_for_cpu(&skge->hw->pdev->dev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ DMA_FROM_DEVICE);
+ skb_copy_from_linear_data(e->skb, skb->data, len);
+ dma_sync_single_for_device(&skge->hw->pdev->dev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ DMA_FROM_DEVICE);
+ skge_rx_reuse(e, skge->rx_buf_size);
+ } else {
+ struct skge_element ee;
+ struct sk_buff *nskb;
+
+ nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
+ if (!nskb)
+ goto resubmit;
+
+ ee = *e;
+
+ skb = ee.skb;
+ prefetch(skb->data);
+
+ if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
+ dev_kfree_skb(nskb);
+ goto resubmit;
+ }
+
+ dma_unmap_single(&skge->hw->pdev->dev,
+ dma_unmap_addr(&ee, mapaddr),
+ dma_unmap_len(&ee, maplen), DMA_FROM_DEVICE);
+ }
+
+ skb_put(skb, len);
+
+ if (dev->features & NETIF_F_RXCSUM) {
+ skb->csum = le16_to_cpu(csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ return skb;
+error:
+
+ netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev,
+ "rx err, slot %td control 0x%x status 0x%x\n",
+ e - skge->rx_ring.start, control, status);
+
+ if (is_genesis(skge->hw)) {
+ if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
+ dev->stats.rx_length_errors++;
+ if (status & XMR_FS_FRA_ERR)
+ dev->stats.rx_frame_errors++;
+ if (status & XMR_FS_FCS_ERR)
+ dev->stats.rx_crc_errors++;
+ } else {
+ if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
+ dev->stats.rx_length_errors++;
+ if (status & GMR_FS_FRAGMENT)
+ dev->stats.rx_frame_errors++;
+ if (status & GMR_FS_CRC_ERR)
+ dev->stats.rx_crc_errors++;
+ }
+
+resubmit:
+ skge_rx_reuse(e, skge->rx_buf_size);
+ return NULL;
+}
+
+/* Free all buffers in Tx ring which are no longer owned by device */
+static void skge_tx_done(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_ring *ring = &skge->tx_ring;
+ struct skge_element *e;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+
+ skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
+ for (e = ring->to_clean; e != ring->to_use; e = e->next) {
+ u32 control = ((const struct skge_tx_desc *) e->desc)->control;
+
+ if (control & BMU_OWN)
+ break;
+
+ skge_tx_unmap(skge->hw->pdev, e, control);
+
+ if (control & BMU_EOF) {
+ netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
+ "tx done slot %td\n",
+ e - skge->tx_ring.start);
+
+ pkts_compl++;
+ bytes_compl += e->skb->len;
+
+ dev_consume_skb_any(e->skb);
+ }
+ }
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+ skge->tx_ring.to_clean = e;
+
+ /* Can run lockless until we need to synchronize to restart queue. */
+ smp_mb();
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
+ netif_tx_lock(dev);
+ if (unlikely(netif_queue_stopped(dev) &&
+ skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
+ netif_wake_queue(dev);
+
+ }
+ netif_tx_unlock(dev);
+ }
+}
+
+static int skge_poll(struct napi_struct *napi, int budget)
+{
+ struct skge_port *skge = container_of(napi, struct skge_port, napi);
+ struct net_device *dev = skge->netdev;
+ struct skge_hw *hw = skge->hw;
+ struct skge_ring *ring = &skge->rx_ring;
+ struct skge_element *e;
+ int work_done = 0;
+
+ skge_tx_done(dev);
+
+ skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
+ for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) {
+ struct skge_rx_desc *rd = e->desc;
+ struct sk_buff *skb;
+ u32 control;
+
+ rmb();
+ control = rd->control;
+ if (control & BMU_OWN)
+ break;
+
+ skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
+ if (likely(skb)) {
+ napi_gro_receive(napi, skb);
+ ++work_done;
+ }
+ }
+ ring->to_clean = e;
+
+ /* restart receiver */
+ wmb();
+ skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->hw_lock, flags);
+ hw->intr_mask |= napimask[skge->port];
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+ spin_unlock_irqrestore(&hw->hw_lock, flags);
+ }
+
+ return work_done;
+}
+
+/* Parity errors seem to happen when Genesis is connected to a switch
+ * with no other ports present. Heartbeat error??
+ */
+static void skge_mac_parity(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+
+ ++dev->stats.tx_heartbeat_errors;
+
+ if (is_genesis(hw))
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
+ MFF_CLR_PERR);
+ else
+ /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
+ (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
+ ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
+}
+
+static void skge_mac_intr(struct skge_hw *hw, int port)
+{
+ if (is_genesis(hw))
+ genesis_mac_intr(hw, port);
+ else
+ yukon_mac_intr(hw, port);
+}
+
+/* Handle device specific framing and timeout interrupts */
+static void skge_error_irq(struct skge_hw *hw)
+{
+ struct pci_dev *pdev = hw->pdev;
+ u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
+
+ if (is_genesis(hw)) {
+ /* clear xmac errors */
+ if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
+ skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
+ if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
+ skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
+ } else {
+ /* Timestamp (unused) overflow */
+ if (hwstatus & IS_IRQ_TIST_OV)
+ skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+ }
+
+ if (hwstatus & IS_RAM_RD_PAR) {
+ dev_err(&pdev->dev, "Ram read data parity error\n");
+ skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
+ }
+
+ if (hwstatus & IS_RAM_WR_PAR) {
+ dev_err(&pdev->dev, "Ram write data parity error\n");
+ skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
+ }
+
+ if (hwstatus & IS_M1_PAR_ERR)
+ skge_mac_parity(hw, 0);
+
+ if (hwstatus & IS_M2_PAR_ERR)
+ skge_mac_parity(hw, 1);
+
+ if (hwstatus & IS_R1_PAR_ERR) {
+ dev_err(&pdev->dev, "%s: receive queue parity error\n",
+ hw->dev[0]->name);
+ skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
+ }
+
+ if (hwstatus & IS_R2_PAR_ERR) {
+ dev_err(&pdev->dev, "%s: receive queue parity error\n",
+ hw->dev[1]->name);
+ skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
+ }
+
+ if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
+ u16 pci_status, pci_cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+
+ dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n",
+ pci_cmd, pci_status);
+
+ /* Write the error bits back to clear them. */
+ pci_status &= PCI_STATUS_ERROR_BITS;
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+ pci_write_config_word(pdev, PCI_STATUS, pci_status);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ /* if error still set then just ignore it */
+ hwstatus = skge_read32(hw, B0_HWE_ISRC);
+ if (hwstatus & IS_IRQ_STAT) {
+ dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n");
+ hw->intr_mask &= ~IS_HW_ERR;
+ }
+ }
+}
+
+/*
+ * Interrupt from PHY are handled in tasklet (softirq)
+ * because accessing phy registers requires spin wait which might
+ * cause excess interrupt latency.
+ */
+static void skge_extirq(struct tasklet_struct *t)
+{
+ struct skge_hw *hw = from_tasklet(hw, t, phy_task);
+ int port;
+
+ for (port = 0; port < hw->ports; port++) {
+ struct net_device *dev = hw->dev[port];
+
+ if (netif_running(dev)) {
+ struct skge_port *skge = netdev_priv(dev);
+
+ spin_lock(&hw->phy_lock);
+ if (!is_genesis(hw))
+ yukon_phy_intr(skge);
+ else if (hw->phy_type == SK_PHY_BCOM)
+ bcom_phy_intr(skge);
+ spin_unlock(&hw->phy_lock);
+ }
+ }
+
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask |= IS_EXT_REG;
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+ spin_unlock_irq(&hw->hw_lock);
+}
+
+static irqreturn_t skge_intr(int irq, void *dev_id)
+{
+ struct skge_hw *hw = dev_id;
+ u32 status;
+ int handled = 0;
+
+ spin_lock(&hw->hw_lock);
+ /* Reading this register masks IRQ */
+ status = skge_read32(hw, B0_SP_ISRC);
+ if (status == 0 || status == ~0)
+ goto out;
+
+ handled = 1;
+ status &= hw->intr_mask;
+ if (status & IS_EXT_REG) {
+ hw->intr_mask &= ~IS_EXT_REG;
+ tasklet_schedule(&hw->phy_task);
+ }
+
+ if (status & (IS_XA1_F|IS_R1_F)) {
+ struct skge_port *skge = netdev_priv(hw->dev[0]);
+ hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
+ napi_schedule(&skge->napi);
+ }
+
+ if (status & IS_PA_TO_TX1)
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
+
+ if (status & IS_PA_TO_RX1) {
+ ++hw->dev[0]->stats.rx_over_errors;
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
+ }
+
+
+ if (status & IS_MAC1)
+ skge_mac_intr(hw, 0);
+
+ if (hw->dev[1]) {
+ struct skge_port *skge = netdev_priv(hw->dev[1]);
+
+ if (status & (IS_XA2_F|IS_R2_F)) {
+ hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
+ napi_schedule(&skge->napi);
+ }
+
+ if (status & IS_PA_TO_RX2) {
+ ++hw->dev[1]->stats.rx_over_errors;
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
+ }
+
+ if (status & IS_PA_TO_TX2)
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
+
+ if (status & IS_MAC2)
+ skge_mac_intr(hw, 1);
+ }
+
+ if (status & IS_HW_ERR)
+ skge_error_irq(hw);
+out:
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+ spin_unlock(&hw->hw_lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void skge_netpoll(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ disable_irq(dev->irq);
+ skge_intr(dev->irq, skge->hw);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int skge_set_mac_address(struct net_device *dev, void *p)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ unsigned port = skge->port;
+ const struct sockaddr *addr = p;
+ u16 ctrl;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(dev, addr->sa_data);
+
+ if (!netif_running(dev)) {
+ memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
+ memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
+ } else {
+ /* disable Rx */
+ spin_lock_bh(&hw->phy_lock);
+ ctrl = gma_read16(hw, port, GM_GP_CTRL);
+ gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA);
+
+ memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
+ memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
+
+ if (is_genesis(hw))
+ xm_outaddr(hw, port, XM_SA, dev->dev_addr);
+ else {
+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
+ }
+
+ gma_write16(hw, port, GM_GP_CTRL, ctrl);
+ spin_unlock_bh(&hw->phy_lock);
+ }
+
+ return 0;
+}
+
+static const struct {
+ u8 id;
+ const char *name;
+} skge_chips[] = {
+ { CHIP_ID_GENESIS, "Genesis" },
+ { CHIP_ID_YUKON, "Yukon" },
+ { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
+ { CHIP_ID_YUKON_LP, "Yukon-LP"},
+};
+
+static const char *skge_board_name(const struct skge_hw *hw)
+{
+ int i;
+ static char buf[16];
+
+ for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
+ if (skge_chips[i].id == hw->chip_id)
+ return skge_chips[i].name;
+
+ snprintf(buf, sizeof(buf), "chipid 0x%x", hw->chip_id);
+ return buf;
+}
+
+
+/*
+ * Setup the board data structure, but don't bring up
+ * the port(s)
+ */
+static int skge_reset(struct skge_hw *hw)
+{
+ u32 reg;
+ u16 ctst, pci_status;
+ u8 t8, mac_cfg, pmd_type;
+ int i;
+
+ ctst = skge_read16(hw, B0_CTST);
+
+ /* do a SW reset */
+ skge_write8(hw, B0_CTST, CS_RST_SET);
+ skge_write8(hw, B0_CTST, CS_RST_CLR);
+
+ /* clear PCI errors, if any */
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ skge_write8(hw, B2_TST_CTRL2, 0);
+
+ pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
+ pci_write_config_word(hw->pdev, PCI_STATUS,
+ pci_status | PCI_STATUS_ERROR_BITS);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ skge_write8(hw, B0_CTST, CS_MRST_CLR);
+
+ /* restore CLK_RUN bits (for Yukon-Lite) */
+ skge_write16(hw, B0_CTST,
+ ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
+
+ hw->chip_id = skge_read8(hw, B2_CHIP_ID);
+ hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
+ pmd_type = skge_read8(hw, B2_PMD_TYP);
+ hw->copper = (pmd_type == 'T' || pmd_type == '1');
+
+ switch (hw->chip_id) {
+ case CHIP_ID_GENESIS:
+#ifdef CONFIG_SKGE_GENESIS
+ switch (hw->phy_type) {
+ case SK_PHY_XMAC:
+ hw->phy_addr = PHY_ADDR_XMAC;
+ break;
+ case SK_PHY_BCOM:
+ hw->phy_addr = PHY_ADDR_BCOM;
+ break;
+ default:
+ dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n",
+ hw->phy_type);
+ return -EOPNOTSUPP;
+ }
+ break;
+#else
+ dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n");
+ return -EOPNOTSUPP;
+#endif
+
+ case CHIP_ID_YUKON:
+ case CHIP_ID_YUKON_LITE:
+ case CHIP_ID_YUKON_LP:
+ if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
+ hw->copper = 1;
+
+ hw->phy_addr = PHY_ADDR_MARV;
+ break;
+
+ default:
+ dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
+ hw->chip_id);
+ return -EOPNOTSUPP;
+ }
+
+ mac_cfg = skge_read8(hw, B2_MAC_CFG);
+ hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
+ hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
+
+ /* read the adapters RAM size */
+ t8 = skge_read8(hw, B2_E_0);
+ if (is_genesis(hw)) {
+ if (t8 == 3) {
+ /* special case: 4 x 64k x 36, offset = 0x80000 */
+ hw->ram_size = 0x100000;
+ hw->ram_offset = 0x80000;
+ } else
+ hw->ram_size = t8 * 512;
+ } else if (t8 == 0)
+ hw->ram_size = 0x20000;
+ else
+ hw->ram_size = t8 * 4096;
+
+ hw->intr_mask = IS_HW_ERR;
+
+ /* Use PHY IRQ for all but fiber based Genesis board */
+ if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC))
+ hw->intr_mask |= IS_EXT_REG;
+
+ if (is_genesis(hw))
+ genesis_init(hw);
+ else {
+ /* switch power to VCC (WA for VAUX problem) */
+ skge_write8(hw, B0_POWER_CTRL,
+ PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
+
+ /* avoid boards with stuck Hardware error bits */
+ if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
+ (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
+ dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n");
+ hw->intr_mask &= ~IS_HW_ERR;
+ }
+
+ /* Clear PHY COMA */
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg);
+ reg &= ~PCI_PHY_COMA;
+ pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+
+ for (i = 0; i < hw->ports; i++) {
+ skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
+ skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
+ }
+ }
+
+ /* turn off hardware timer (unused) */
+ skge_write8(hw, B2_TI_CTRL, TIM_STOP);
+ skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
+ skge_write8(hw, B0_LED, LED_STAT_ON);
+
+ /* enable the Tx Arbiters */
+ for (i = 0; i < hw->ports; i++)
+ skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
+
+ /* Initialize ram interface */
+ skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
+
+ skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
+
+ skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
+
+ /* Set interrupt moderation for Transmit only
+ * Receive interrupts avoided by NAPI
+ */
+ skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
+ skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
+ skge_write32(hw, B2_IRQM_CTRL, TIM_START);
+
+ /* Leave irq disabled until first port is brought up. */
+ skge_write32(hw, B0_IMSK, 0);
+
+ for (i = 0; i < hw->ports; i++) {
+ if (is_genesis(hw))
+ genesis_reset(hw, i);
+ else
+ yukon_reset(hw, i);
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_SKGE_DEBUG
+
+static struct dentry *skge_debug;
+
+static int skge_debug_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ const struct skge_port *skge = netdev_priv(dev);
+ const struct skge_hw *hw = skge->hw;
+ const struct skge_element *e;
+
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
+ seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC),
+ skge_read32(hw, B0_IMSK));
+
+ seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring));
+ for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
+ const struct skge_tx_desc *t = e->desc;
+ seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n",
+ t->control, t->dma_hi, t->dma_lo, t->status,
+ t->csum_offs, t->csum_write, t->csum_start);
+ }
+
+ seq_puts(seq, "\nRx Ring:\n");
+ for (e = skge->rx_ring.to_clean; ; e = e->next) {
+ const struct skge_rx_desc *r = e->desc;
+
+ if (r->control & BMU_OWN)
+ break;
+
+ seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n",
+ r->control, r->dma_hi, r->dma_lo, r->status,
+ r->timestamp, r->csum1, r->csum1_start);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(skge_debug);
+
+/*
+ * Use network device events to create/remove/rename
+ * debugfs file entries
+ */
+static int skge_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct skge_port *skge;
+
+ if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
+ goto done;
+
+ skge = netdev_priv(dev);
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ if (skge->debugfs)
+ skge->debugfs = debugfs_rename(skge_debug,
+ skge->debugfs,
+ skge_debug, dev->name);
+ break;
+
+ case NETDEV_GOING_DOWN:
+ debugfs_remove(skge->debugfs);
+ skge->debugfs = NULL;
+ break;
+
+ case NETDEV_UP:
+ skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug,
+ dev, &skge_debug_fops);
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block skge_notifier = {
+ .notifier_call = skge_device_event,
+};
+
+
+static __init void skge_debug_init(void)
+{
+ skge_debug = debugfs_create_dir("skge", NULL);
+
+ register_netdevice_notifier(&skge_notifier);
+}
+
+static __exit void skge_debug_cleanup(void)
+{
+ if (skge_debug) {
+ unregister_netdevice_notifier(&skge_notifier);
+ debugfs_remove(skge_debug);
+ skge_debug = NULL;
+ }
+}
+
+#else
+#define skge_debug_init()
+#define skge_debug_cleanup()
+#endif
+
+static const struct net_device_ops skge_netdev_ops = {
+ .ndo_open = skge_up,
+ .ndo_stop = skge_down,
+ .ndo_start_xmit = skge_xmit_frame,
+ .ndo_eth_ioctl = skge_ioctl,
+ .ndo_get_stats = skge_get_stats,
+ .ndo_tx_timeout = skge_tx_timeout,
+ .ndo_change_mtu = skge_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = skge_set_multicast,
+ .ndo_set_mac_address = skge_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = skge_netpoll,
+#endif
+};
+
+
+/* Initialize network device */
+static struct net_device *skge_devinit(struct skge_hw *hw, int port,
+ int highmem)
+{
+ struct skge_port *skge;
+ struct net_device *dev = alloc_etherdev(sizeof(*skge));
+ u8 addr[ETH_ALEN];
+
+ if (!dev)
+ return NULL;
+
+ SET_NETDEV_DEV(dev, &hw->pdev->dev);
+ dev->netdev_ops = &skge_netdev_ops;
+ dev->ethtool_ops = &skge_ethtool_ops;
+ dev->watchdog_timeo = TX_WATCHDOG;
+ dev->irq = hw->pdev->irq;
+
+ /* MTU range: 60 - 9000 */
+ dev->min_mtu = ETH_ZLEN;
+ dev->max_mtu = ETH_JUMBO_MTU;
+
+ if (highmem)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ skge = netdev_priv(dev);
+ netif_napi_add(dev, &skge->napi, skge_poll);
+ skge->netdev = dev;
+ skge->hw = hw;
+ skge->msg_enable = netif_msg_init(debug, default_msg);
+
+ skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
+ skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
+
+ /* Auto speed and flow control */
+ skge->autoneg = AUTONEG_ENABLE;
+ skge->flow_control = FLOW_MODE_SYM_OR_REM;
+ skge->duplex = -1;
+ skge->speed = -1;
+ skge->advertising = skge_supported_modes(hw);
+
+ if (device_can_wakeup(&hw->pdev->dev)) {
+ skge->wol = wol_supported(hw) & WAKE_MAGIC;
+ device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
+ }
+
+ hw->dev[port] = dev;
+
+ skge->port = port;
+
+ /* Only used for Genesis XMAC */
+ if (is_genesis(hw))
+ timer_setup(&skge->link_timer, xm_link_timer, 0);
+ else {
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= dev->hw_features;
+ }
+
+ /* read the mac address */
+ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+
+ return dev;
+}
+
+static void skge_show_addr(struct net_device *dev)
+{
+ const struct skge_port *skge = netdev_priv(dev);
+
+ netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
+}
+
+static int only_32bit_dma;
+
+static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev, *dev1;
+ struct skge_hw *hw;
+ int err, using_dac = 0;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto err_out;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ if (!only_32bit_dma && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ using_dac = 1;
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ } else if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
+ using_dac = 0;
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ }
+
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_out_free_regions;
+ }
+
+#ifdef __BIG_ENDIAN
+ /* byte swap descriptors in hardware */
+ {
+ u32 reg;
+
+ pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
+ reg |= PCI_REV_DESC;
+ pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
+ }
+#endif
+
+ err = -ENOMEM;
+ /* space for skge@pci:0000:04:00.0 */
+ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
+ if (!hw)
+ goto err_out_free_regions;
+
+ sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
+
+ hw->pdev = pdev;
+ spin_lock_init(&hw->hw_lock);
+ spin_lock_init(&hw->phy_lock);
+ tasklet_setup(&hw->phy_task, skge_extirq);
+
+ hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
+ if (!hw->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ goto err_out_free_hw;
+ }
+
+ err = skge_reset(hw);
+ if (err)
+ goto err_out_iounmap;
+
+ pr_info("%s addr 0x%llx irq %d chip %s rev %d\n",
+ DRV_VERSION,
+ (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
+ skge_board_name(hw), hw->chip_rev);
+
+ dev = skge_devinit(hw, 0, using_dac);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out_led_off;
+ }
+
+ /* Some motherboards are broken and has zero in ROM. */
+ if (!is_valid_ether_addr(dev->dev_addr))
+ dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n");
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register net device\n");
+ goto err_out_free_netdev;
+ }
+
+ skge_show_addr(dev);
+
+ if (hw->ports > 1) {
+ dev1 = skge_devinit(hw, 1, using_dac);
+ if (!dev1) {
+ err = -ENOMEM;
+ goto err_out_unregister;
+ }
+
+ err = register_netdev(dev1);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register second net device\n");
+ goto err_out_free_dev1;
+ }
+
+ err = request_irq(pdev->irq, skge_intr, IRQF_SHARED,
+ hw->irq_name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign irq %d\n",
+ pdev->irq);
+ goto err_out_unregister_dev1;
+ }
+
+ skge_show_addr(dev1);
+ }
+ pci_set_drvdata(pdev, hw);
+
+ return 0;
+
+err_out_unregister_dev1:
+ unregister_netdev(dev1);
+err_out_free_dev1:
+ free_netdev(dev1);
+err_out_unregister:
+ unregister_netdev(dev);
+err_out_free_netdev:
+ free_netdev(dev);
+err_out_led_off:
+ skge_write16(hw, B0_LED, LED_STAT_OFF);
+err_out_iounmap:
+ iounmap(hw->regs);
+err_out_free_hw:
+ kfree(hw);
+err_out_free_regions:
+ pci_release_regions(pdev);
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+err_out:
+ return err;
+}
+
+static void skge_remove(struct pci_dev *pdev)
+{
+ struct skge_hw *hw = pci_get_drvdata(pdev);
+ struct net_device *dev0, *dev1;
+
+ if (!hw)
+ return;
+
+ dev1 = hw->dev[1];
+ if (dev1)
+ unregister_netdev(dev1);
+ dev0 = hw->dev[0];
+ unregister_netdev(dev0);
+
+ tasklet_kill(&hw->phy_task);
+
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask = 0;
+
+ if (hw->ports > 1) {
+ skge_write32(hw, B0_IMSK, 0);
+ skge_read32(hw, B0_IMSK);
+ }
+ spin_unlock_irq(&hw->hw_lock);
+
+ skge_write16(hw, B0_LED, LED_STAT_OFF);
+ skge_write8(hw, B0_CTST, CS_RST_SET);
+
+ if (hw->ports > 1)
+ free_irq(pdev->irq, hw);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ if (dev1)
+ free_netdev(dev1);
+ free_netdev(dev0);
+
+ iounmap(hw->regs);
+ kfree(hw);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int skge_suspend(struct device *dev)
+{
+ struct skge_hw *hw = dev_get_drvdata(dev);
+ int i;
+
+ if (!hw)
+ return 0;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct skge_port *skge = netdev_priv(dev);
+
+ if (netif_running(dev))
+ skge_down(dev);
+
+ if (skge->wol)
+ skge_wol_init(skge);
+ }
+
+ skge_write32(hw, B0_IMSK, 0);
+
+ return 0;
+}
+
+static int skge_resume(struct device *dev)
+{
+ struct skge_hw *hw = dev_get_drvdata(dev);
+ int i, err;
+
+ if (!hw)
+ return 0;
+
+ err = skge_reset(hw);
+ if (err)
+ goto out;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+
+ if (netif_running(dev)) {
+ err = skge_up(dev);
+
+ if (err) {
+ netdev_err(dev, "could not up: %d\n", err);
+ dev_close(dev);
+ goto out;
+ }
+ }
+ }
+out:
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
+#define SKGE_PM_OPS (&skge_pm_ops)
+
+#else
+
+#define SKGE_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static void skge_shutdown(struct pci_dev *pdev)
+{
+ struct skge_hw *hw = pci_get_drvdata(pdev);
+ int i;
+
+ if (!hw)
+ return;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct skge_port *skge = netdev_priv(dev);
+
+ if (skge->wol)
+ skge_wol_init(skge);
+ }
+
+ pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static struct pci_driver skge_driver = {
+ .name = DRV_NAME,
+ .id_table = skge_id_table,
+ .probe = skge_probe,
+ .remove = skge_remove,
+ .shutdown = skge_shutdown,
+ .driver.pm = SKGE_PM_OPS,
+};
+
+static const struct dmi_system_id skge_32bit_dma_boards[] = {
+ {
+ .ident = "Gigabyte nForce boards",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
+ DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+ },
+ },
+ {
+ .ident = "ASUS P5NSLI",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
+ },
+ },
+ {
+ .ident = "FUJITSU SIEMENS A8NE-FM",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")
+ },
+ },
+ {}
+};
+
+static int __init skge_init_module(void)
+{
+ if (dmi_check_system(skge_32bit_dma_boards))
+ only_32bit_dma = 1;
+ skge_debug_init();
+ return pci_register_driver(&skge_driver);
+}
+
+static void __exit skge_cleanup_module(void)
+{
+ pci_unregister_driver(&skge_driver);
+ skge_debug_cleanup();
+}
+
+module_init(skge_init_module);
+module_exit(skge_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
new file mode 100644
index 000000000..f72217348
--- /dev/null
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -0,0 +1,2579 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for the new Marvell Yukon / SysKonnect driver.
+ */
+#ifndef _SKGE_H
+#define _SKGE_H
+#include <linux/interrupt.h>
+
+/* PCI config registers */
+#define PCI_DEV_REG1 0x40
+#define PCI_PHY_COMA 0x8000000
+#define PCI_VIO 0x2000000
+
+#define PCI_DEV_REG2 0x44
+#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */
+#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */
+
+enum csr_regs {
+ B0_RAP = 0x0000,
+ B0_CTST = 0x0004,
+ B0_LED = 0x0006,
+ B0_POWER_CTRL = 0x0007,
+ B0_ISRC = 0x0008,
+ B0_IMSK = 0x000c,
+ B0_HWE_ISRC = 0x0010,
+ B0_HWE_IMSK = 0x0014,
+ B0_SP_ISRC = 0x0018,
+ B0_XM1_IMSK = 0x0020,
+ B0_XM1_ISRC = 0x0028,
+ B0_XM1_PHY_ADDR = 0x0030,
+ B0_XM1_PHY_DATA = 0x0034,
+ B0_XM2_IMSK = 0x0040,
+ B0_XM2_ISRC = 0x0048,
+ B0_XM2_PHY_ADDR = 0x0050,
+ B0_XM2_PHY_DATA = 0x0054,
+ B0_R1_CSR = 0x0060,
+ B0_R2_CSR = 0x0064,
+ B0_XS1_CSR = 0x0068,
+ B0_XA1_CSR = 0x006c,
+ B0_XS2_CSR = 0x0070,
+ B0_XA2_CSR = 0x0074,
+
+ B2_MAC_1 = 0x0100,
+ B2_MAC_2 = 0x0108,
+ B2_MAC_3 = 0x0110,
+ B2_CONN_TYP = 0x0118,
+ B2_PMD_TYP = 0x0119,
+ B2_MAC_CFG = 0x011a,
+ B2_CHIP_ID = 0x011b,
+ B2_E_0 = 0x011c,
+ B2_E_1 = 0x011d,
+ B2_E_2 = 0x011e,
+ B2_E_3 = 0x011f,
+ B2_FAR = 0x0120,
+ B2_FDP = 0x0124,
+ B2_LD_CTRL = 0x0128,
+ B2_LD_TEST = 0x0129,
+ B2_TI_INI = 0x0130,
+ B2_TI_VAL = 0x0134,
+ B2_TI_CTRL = 0x0138,
+ B2_TI_TEST = 0x0139,
+ B2_IRQM_INI = 0x0140,
+ B2_IRQM_VAL = 0x0144,
+ B2_IRQM_CTRL = 0x0148,
+ B2_IRQM_TEST = 0x0149,
+ B2_IRQM_MSK = 0x014c,
+ B2_IRQM_HWE_MSK = 0x0150,
+ B2_TST_CTRL1 = 0x0158,
+ B2_TST_CTRL2 = 0x0159,
+ B2_GP_IO = 0x015c,
+ B2_I2C_CTRL = 0x0160,
+ B2_I2C_DATA = 0x0164,
+ B2_I2C_IRQ = 0x0168,
+ B2_I2C_SW = 0x016c,
+ B2_BSC_INI = 0x0170,
+ B2_BSC_VAL = 0x0174,
+ B2_BSC_CTRL = 0x0178,
+ B2_BSC_STAT = 0x0179,
+ B2_BSC_TST = 0x017a,
+
+ B3_RAM_ADDR = 0x0180,
+ B3_RAM_DATA_LO = 0x0184,
+ B3_RAM_DATA_HI = 0x0188,
+ B3_RI_WTO_R1 = 0x0190,
+ B3_RI_WTO_XA1 = 0x0191,
+ B3_RI_WTO_XS1 = 0x0192,
+ B3_RI_RTO_R1 = 0x0193,
+ B3_RI_RTO_XA1 = 0x0194,
+ B3_RI_RTO_XS1 = 0x0195,
+ B3_RI_WTO_R2 = 0x0196,
+ B3_RI_WTO_XA2 = 0x0197,
+ B3_RI_WTO_XS2 = 0x0198,
+ B3_RI_RTO_R2 = 0x0199,
+ B3_RI_RTO_XA2 = 0x019a,
+ B3_RI_RTO_XS2 = 0x019b,
+ B3_RI_TO_VAL = 0x019c,
+ B3_RI_CTRL = 0x01a0,
+ B3_RI_TEST = 0x01a2,
+ B3_MA_TOINI_RX1 = 0x01b0,
+ B3_MA_TOINI_RX2 = 0x01b1,
+ B3_MA_TOINI_TX1 = 0x01b2,
+ B3_MA_TOINI_TX2 = 0x01b3,
+ B3_MA_TOVAL_RX1 = 0x01b4,
+ B3_MA_TOVAL_RX2 = 0x01b5,
+ B3_MA_TOVAL_TX1 = 0x01b6,
+ B3_MA_TOVAL_TX2 = 0x01b7,
+ B3_MA_TO_CTRL = 0x01b8,
+ B3_MA_TO_TEST = 0x01ba,
+ B3_MA_RCINI_RX1 = 0x01c0,
+ B3_MA_RCINI_RX2 = 0x01c1,
+ B3_MA_RCINI_TX1 = 0x01c2,
+ B3_MA_RCINI_TX2 = 0x01c3,
+ B3_MA_RCVAL_RX1 = 0x01c4,
+ B3_MA_RCVAL_RX2 = 0x01c5,
+ B3_MA_RCVAL_TX1 = 0x01c6,
+ B3_MA_RCVAL_TX2 = 0x01c7,
+ B3_MA_RC_CTRL = 0x01c8,
+ B3_MA_RC_TEST = 0x01ca,
+ B3_PA_TOINI_RX1 = 0x01d0,
+ B3_PA_TOINI_RX2 = 0x01d4,
+ B3_PA_TOINI_TX1 = 0x01d8,
+ B3_PA_TOINI_TX2 = 0x01dc,
+ B3_PA_TOVAL_RX1 = 0x01e0,
+ B3_PA_TOVAL_RX2 = 0x01e4,
+ B3_PA_TOVAL_TX1 = 0x01e8,
+ B3_PA_TOVAL_TX2 = 0x01ec,
+ B3_PA_CTRL = 0x01f0,
+ B3_PA_TEST = 0x01f2,
+};
+
+/* B0_CTST 16 bit Control/Status register */
+enum {
+ CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */
+ CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */
+ CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */
+ CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */
+ CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */
+ CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */
+ CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
+ CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
+ CS_STOP_DONE = 1<<5, /* Stop Master is finished */
+ CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
+ CS_MRST_CLR = 1<<3, /* Clear Master reset */
+ CS_MRST_SET = 1<<2, /* Set Master reset */
+ CS_RST_CLR = 1<<1, /* Clear Software reset */
+ CS_RST_SET = 1, /* Set Software reset */
+
+/* B0_LED 8 Bit LED register */
+/* Bit 7.. 2: reserved */
+ LED_STAT_ON = 1<<1, /* Status LED on */
+ LED_STAT_OFF = 1, /* Status LED off */
+
+/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
+ PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
+ PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
+ PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
+ PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
+ PC_VAUX_ON = 1<<3, /* Switch VAUX On */
+ PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
+ PC_VCC_ON = 1<<1, /* Switch VCC On */
+ PC_VCC_OFF = 1<<0, /* Switch VCC Off */
+};
+
+/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
+enum {
+ IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */
+ IS_HW_ERR = 1<<31, /* Interrupt HW Error */
+ /* Bit 30: reserved */
+ IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */
+ IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */
+ IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */
+ IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */
+ IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */
+ IS_IRQ_SW = 1<<24, /* SW forced IRQ */
+ IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */
+ /* IRQ from PHY (YUKON only) */
+ IS_TIMINT = 1<<22, /* IRQ from Timer */
+ IS_MAC1 = 1<<21, /* IRQ from MAC 1 */
+ IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */
+ IS_MAC2 = 1<<19, /* IRQ from MAC 2 */
+ IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */
+/* Receive Queue 1 */
+ IS_R1_B = 1<<17, /* Q_R1 End of Buffer */
+ IS_R1_F = 1<<16, /* Q_R1 End of Frame */
+ IS_R1_C = 1<<15, /* Q_R1 Encoding Error */
+/* Receive Queue 2 */
+ IS_R2_B = 1<<14, /* Q_R2 End of Buffer */
+ IS_R2_F = 1<<13, /* Q_R2 End of Frame */
+ IS_R2_C = 1<<12, /* Q_R2 Encoding Error */
+/* Synchronous Transmit Queue 1 */
+ IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */
+ IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */
+ IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */
+/* Asynchronous Transmit Queue 1 */
+ IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */
+ IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */
+ IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */
+/* Synchronous Transmit Queue 2 */
+ IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */
+ IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */
+ IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */
+/* Asynchronous Transmit Queue 2 */
+ IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */
+ IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */
+ IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */
+
+ IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1,
+ IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2,
+
+ IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1,
+ IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2,
+};
+
+
+/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
+enum {
+ IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
+ IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
+ IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
+ IS_IRQ_STAT = 1<<10, /* IRQ status exception */
+ IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
+ IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
+ IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
+ IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
+ IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
+ IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
+ IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
+ IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
+ IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
+ IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
+
+ IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT
+ | IS_RAM_RD_PAR | IS_RAM_WR_PAR
+ | IS_M1_PAR_ERR | IS_M2_PAR_ERR
+ | IS_R1_PAR_ERR | IS_R2_PAR_ERR,
+};
+
+/* B2_TST_CTRL1 8 bit Test Control Register 1 */
+enum {
+ TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
+ TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
+ TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
+ TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
+ TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
+ TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
+ TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
+ TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
+};
+
+/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
+enum {
+ CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
+ /* Bit 3.. 2: reserved */
+ CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
+ CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
+};
+
+/* B2_CHIP_ID 8 bit Chip Identification Number */
+enum {
+ CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
+ CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
+ CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
+ CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
+ CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
+ CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
+ CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
+
+ CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */
+ CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */
+};
+
+/* B2_TI_CTRL 8 bit Timer control */
+/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
+enum {
+ TIM_START = 1<<2, /* Start Timer */
+ TIM_STOP = 1<<1, /* Stop Timer */
+ TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
+};
+
+/* B2_TI_TEST 8 Bit Timer Test */
+/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
+/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
+enum {
+ TIM_T_ON = 1<<2, /* Test mode on */
+ TIM_T_OFF = 1<<1, /* Test mode off */
+ TIM_T_STEP = 1<<0, /* Test step */
+};
+
+/* B2_GP_IO 32 bit General Purpose I/O Register */
+enum {
+ GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */
+ GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */
+ GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */
+ GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */
+ GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */
+ GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */
+ GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */
+ GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */
+ GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */
+ GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */
+
+ GP_IO_9 = 1<<9, /* IO_9 pin */
+ GP_IO_8 = 1<<8, /* IO_8 pin */
+ GP_IO_7 = 1<<7, /* IO_7 pin */
+ GP_IO_6 = 1<<6, /* IO_6 pin */
+ GP_IO_5 = 1<<5, /* IO_5 pin */
+ GP_IO_4 = 1<<4, /* IO_4 pin */
+ GP_IO_3 = 1<<3, /* IO_3 pin */
+ GP_IO_2 = 1<<2, /* IO_2 pin */
+ GP_IO_1 = 1<<1, /* IO_1 pin */
+ GP_IO_0 = 1<<0, /* IO_0 pin */
+};
+
+/* Descriptor Bit Definition */
+/* TxCtrl Transmit Buffer Control Field */
+/* RxCtrl Receive Buffer Control Field */
+enum {
+ BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */
+ BMU_STF = 1<<30, /* Start of Frame */
+ BMU_EOF = 1<<29, /* End of Frame */
+ BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */
+ BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */
+ /* TxCtrl specific bits */
+ BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */
+ BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */
+ BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */
+ /* RxCtrl specific bits */
+ BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */
+ BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */
+ BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */
+ /* Bit 23..16: BMU Check Opcodes */
+ BMU_CHECK = 0x55<<16, /* Default BMU check */
+ BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */
+ BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */
+ BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */
+};
+
+/* B2_BSC_CTRL 8 bit Blink Source Counter Control */
+enum {
+ BSC_START = 1<<1, /* Start Blink Source Counter */
+ BSC_STOP = 1<<0, /* Stop Blink Source Counter */
+};
+
+/* B2_BSC_STAT 8 bit Blink Source Counter Status */
+enum {
+ BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */
+};
+
+/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */
+enum {
+ BSC_T_ON = 1<<2, /* Test mode on */
+ BSC_T_OFF = 1<<1, /* Test mode off */
+ BSC_T_STEP = 1<<0, /* Test step */
+};
+
+/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
+ /* Bit 31..19: reserved */
+#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
+/* RAM Interface Registers */
+
+/* B3_RI_CTRL 16 bit RAM Iface Control Register */
+enum {
+ RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
+ RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
+
+ RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
+ RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
+};
+
+/* MAC Arbiter Registers */
+/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */
+enum {
+ MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */
+ MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */
+ MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */
+ MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */
+
+};
+
+/* Timeout values */
+#define SK_MAC_TO_53 72 /* MAC arbiter timeout */
+#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */
+#define SK_PKT_TO_MAX 0xffff /* Maximum value */
+#define SK_RI_TO_53 36 /* RAM interface timeout */
+
+/* Packet Arbiter Registers */
+/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */
+enum {
+ PA_CLR_TO_TX2 = 1<<13,/* Clear IRQ Packet Timeout TX2 */
+ PA_CLR_TO_TX1 = 1<<12,/* Clear IRQ Packet Timeout TX1 */
+ PA_CLR_TO_RX2 = 1<<11,/* Clear IRQ Packet Timeout RX2 */
+ PA_CLR_TO_RX1 = 1<<10,/* Clear IRQ Packet Timeout RX1 */
+ PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */
+ PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */
+ PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */
+ PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */
+ PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */
+ PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */
+ PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */
+ PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */
+ PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */
+ PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */
+};
+
+#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\
+ PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
+
+
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
+/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
+/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
+/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
+
+#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
+
+/* TXA_CTRL 8 bit Tx Arbiter Control Register */
+enum {
+ TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
+ TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
+ TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
+ TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
+ TXA_START_RC = 1<<3, /* Start sync Rate Control */
+ TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
+ TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
+ TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
+};
+
+/*
+ * Bank 4 - 5
+ */
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+enum {
+ TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
+ TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
+ TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
+ TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
+ TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
+ TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
+ TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
+};
+
+
+enum {
+ B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
+ B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
+ B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
+ B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
+ B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
+ B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
+ B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
+ B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
+ B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
+};
+
+/* Queue Register Offsets, use Q_ADDR() to access */
+enum {
+ B8_Q_REGS = 0x0400, /* base of Queue registers */
+ Q_D = 0x00, /* 8*32 bit Current Descriptor */
+ Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
+ Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
+ Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
+ Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
+ Q_BC = 0x30, /* 32 bit Current Byte Counter */
+ Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
+ Q_F = 0x38, /* 32 bit Flag Register */
+ Q_T1 = 0x3c, /* 32 bit Test Register 1 */
+ Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */
+ Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */
+ Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */
+ Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */
+ Q_T2 = 0x40, /* 32 bit Test Register 2 */
+ Q_T3 = 0x44, /* 32 bit Test Register 3 */
+
+};
+#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
+
+/* RAM Buffer Register Offsets */
+enum {
+
+ RB_START= 0x00,/* 32 bit RAM Buffer Start Address */
+ RB_END = 0x04,/* 32 bit RAM Buffer End Address */
+ RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
+ RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
+ RB_RX_UTPP= 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
+ RB_RX_LTPP= 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
+ RB_RX_UTHP= 0x18,/* 32 bit Rx Upper Threshold, High Prio */
+ RB_RX_LTHP= 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
+ /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
+ RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
+ RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
+ RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
+ RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
+ RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
+};
+
+/* Receive and Transmit Queues */
+enum {
+ Q_R1 = 0x0000, /* Receive Queue 1 */
+ Q_R2 = 0x0080, /* Receive Queue 2 */
+ Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
+ Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
+ Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
+ Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
+};
+
+/* Different MAC Types */
+enum {
+ SK_MAC_XMAC = 0, /* Xaqti XMAC II */
+ SK_MAC_GMAC = 1, /* Marvell GMAC */
+};
+
+/* Different PHY Types */
+enum {
+ SK_PHY_XMAC = 0,/* integrated in XMAC II */
+ SK_PHY_BCOM = 1,/* Broadcom BCM5400 */
+ SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/
+ SK_PHY_NAT = 3,/* National DP83891 [not supported] */
+ SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */
+ SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */
+};
+
+/* PHY addresses (bits 12..8 of PHY address reg) */
+enum {
+ PHY_ADDR_XMAC = 0<<8,
+ PHY_ADDR_BCOM = 1<<8,
+
+/* GPHY address (bits 15..11 of SMI control reg) */
+ PHY_ADDR_MARV = 0,
+};
+
+#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs))
+
+/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */
+enum {
+ RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */
+ RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */
+
+ RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */
+ RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */
+ RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */
+ RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/
+ RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */
+ RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */
+ RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/
+ RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */
+ RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */
+
+ RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */
+ RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */
+ RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */
+ RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */
+
+ LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
+ LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
+ LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
+ LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
+ LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
+};
+
+/* Receive and Transmit MAC FIFO Registers (GENESIS only) */
+/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */
+enum {
+ MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */
+ MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */
+ MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */
+ MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */
+ MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */
+ MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */
+ MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */
+ MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */
+ MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */
+ MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */
+ MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */
+ MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */
+ MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */
+ MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */
+ MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT,
+};
+
+/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */
+enum {
+ MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */
+
+ MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */
+ MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */
+
+ MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */
+ MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */
+
+ MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */
+ MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */
+ MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */
+ MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */
+
+ MFF_TX_CTRL_DEF = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH,
+};
+
+
+/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */
+/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */
+enum {
+ MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */
+ MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */
+ MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */
+ MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */
+ MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */
+ MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */
+ MFF_PC_INC = 1<<0, /* Packet Counter Increment */
+};
+
+/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */
+/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */
+enum {
+ MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */
+ MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */
+ MFF_WP_INC = 1<<4, /* Write Pointer Increm */
+
+ MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */
+ MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */
+ MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */
+};
+
+/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */
+/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */
+enum {
+ MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
+ MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
+ MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */
+ MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */
+};
+
+
+/* Link LED Counter Registers (GENESIS only) */
+
+/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */
+/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */
+/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */
+enum {
+ LED_START = 1<<2, /* Start Timer */
+ LED_STOP = 1<<1, /* Stop Timer */
+ LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */
+};
+
+/* RX_LED_TST 8 bit Receive LED Cnt Test Register */
+/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */
+/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */
+enum {
+ LED_T_ON = 1<<2, /* LED Counter Test mode On */
+ LED_T_OFF = 1<<1, /* LED Counter Test mode Off */
+ LED_T_STEP = 1<<0, /* LED Counter Step */
+};
+
+/* LNK_LED_REG 8 bit Link LED Register */
+enum {
+ LED_BLK_ON = 1<<5, /* Link LED Blinking On */
+ LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */
+ LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */
+ LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */
+ LED_REG_ON = 1<<1, /* switch LED on */
+ LED_REG_OFF = 1<<0, /* switch LED off */
+};
+
+/* Receive GMAC FIFO (YUKON) */
+enum {
+ RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
+ RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
+ RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
+ RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
+ RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */
+ RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
+ RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
+ RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
+ RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
+};
+
+
+/* TXA_TEST 8 bit Tx Arbiter Test Register */
+enum {
+ TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */
+ TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */
+ TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */
+ TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */
+ TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */
+ TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */
+};
+
+/* TXA_STAT 8 bit Tx Arbiter Status Register */
+enum {
+ TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */
+};
+
+
+/* Q_BC 32 bit Current Byte Counter */
+
+/* BMU Control Status Registers */
+/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
+/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
+/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
+/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
+/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
+/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
+/* Q_CSR 32 bit BMU Control/Status Register */
+
+enum {
+ CSR_SV_IDLE = 1<<24, /* BMU SM Idle */
+
+ CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */
+ CSR_DESC_SET = 1<<20, /* Set Reset for Descr */
+ CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */
+ CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */
+ CSR_HPI_RUN = 1<<17, /* Release HPI SM */
+ CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */
+ CSR_SV_RUN = 1<<15, /* Release Supervisor SM */
+ CSR_SV_RST = 1<<14, /* Reset Supervisor SM */
+ CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */
+ CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */
+ CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */
+ CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */
+ CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */
+ CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */
+ CSR_ENA_POL = 1<<7, /* Enable Descr Polling */
+ CSR_DIS_POL = 1<<6, /* Disable Descr Polling */
+ CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */
+ CSR_START = 1<<4, /* Start Rx/Tx Queue */
+ CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */
+ CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */
+ CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */
+ CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */
+};
+
+#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\
+ CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\
+ CSR_TRANS_RST)
+#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\
+ CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\
+ CSR_TRANS_RUN)
+
+/* Q_F 32 bit Flag Register */
+enum {
+ F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
+ F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
+ F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
+ F_WM_REACHED = 1<<25, /* Watermark reached */
+
+ F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
+ F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
+};
+
+/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
+/* RB_START 32 bit RAM Buffer Start Address */
+/* RB_END 32 bit RAM Buffer End Address */
+/* RB_WP 32 bit RAM Buffer Write Pointer */
+/* RB_RP 32 bit RAM Buffer Read Pointer */
+/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
+/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
+/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
+/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
+/* RB_PC 32 bit RAM Buffer Packet Counter */
+/* RB_LEV 32 bit RAM Buffer Level Register */
+
+#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
+/* RB_TST2 8 bit RAM Buffer Test Register 2 */
+/* RB_TST1 8 bit RAM Buffer Test Register 1 */
+
+/* RB_CTRL 8 bit RAM Buffer Control Register */
+enum {
+ RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
+ RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
+ RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
+ RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
+ RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
+ RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
+};
+
+/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */
+enum {
+ TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */
+ TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */
+ TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */
+ TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */
+ TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */
+ TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */
+ TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */
+ TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */
+
+ TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */
+ TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */
+ TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */
+
+ TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */
+ TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */
+ TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */
+ TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */
+};
+
+/* Counter and Timer constants, for a host clock of 62.5 MHz */
+#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */
+#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */
+
+#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */
+
+#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */
+ /* 215 ms at 78.12 MHz */
+
+#define SK_FACT_62 100 /* is given in percent */
+#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */
+#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */
+
+
+/* Transmit GMAC FIFO (YUKON only) */
+enum {
+ TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
+ TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
+ TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
+
+ TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
+ TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
+ TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
+
+ TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
+ TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
+ TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
+
+ /* Descriptor Poll Timer Registers */
+ B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
+ B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
+ B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
+
+ B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
+
+ /* Time Stamp Timer Registers (YUKON only) */
+ GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
+ GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
+ GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
+};
+
+
+enum {
+ LINKLED_OFF = 0x01,
+ LINKLED_ON = 0x02,
+ LINKLED_LINKSYNC_OFF = 0x04,
+ LINKLED_LINKSYNC_ON = 0x08,
+ LINKLED_BLINK_OFF = 0x10,
+ LINKLED_BLINK_ON = 0x20,
+};
+
+/* GMAC and GPHY Control Registers (YUKON only) */
+enum {
+ GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
+ GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
+ GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
+ GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
+ GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
+
+/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
+
+ WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
+
+ WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
+ WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
+ WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
+ WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
+ WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
+
+/* WOL Pattern Length Registers (YUKON only) */
+
+ WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
+ WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
+
+/* WOL Pattern Counter Registers (YUKON only) */
+
+ WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
+ WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
+};
+#define WOL_REGS(port, x) (x + (port)*0x80)
+
+enum {
+ WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
+ WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
+};
+#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
+
+enum {
+ BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */
+ BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
+ BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */
+ BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
+};
+
+/*
+ * Receive Frame Status Encoding
+ */
+enum {
+ XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */
+ XMR_FS_LEN_SHIFT = 18,
+ XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/
+ XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/
+ XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */
+ XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */
+ XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */
+
+ XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */
+ XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */
+ XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */
+ XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */
+ XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */
+ XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */
+ XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */
+ XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */
+ XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */
+ XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */
+ XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */
+ XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */
+
+/*
+ * XMR_FS_ERR will be set if
+ * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT,
+ * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR
+ * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue
+ * XMR_FS_ERR unless the corresponding bit in the Receive Command
+ * Register is set.
+ */
+};
+
+/*
+,* XMAC-PHY Registers, indirect addressed over the XMAC
+ */
+enum {
+ PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
+ PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */
+ PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
+ PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
+ PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
+ PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */
+ PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
+ PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */
+ PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
+
+ PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */
+ PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */
+};
+/*
+ * Broadcom-PHY Registers, indirect addressed over XMAC
+ */
+enum {
+ PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
+ PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */
+ PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
+ PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
+ PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
+ PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
+ PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
+ PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */
+ PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
+ /* Broadcom-specific registers */
+ PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
+ PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
+ PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
+ PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */
+ PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */
+ PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */
+ PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */
+ PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */
+
+ PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */
+ PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */
+ PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */
+ PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */
+};
+
+/*
+ * Marvel-PHY Registers, indirect addressed over GMAC
+ */
+enum {
+ PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
+ PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
+ PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
+ PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
+ PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
+ PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
+ PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
+ PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
+ PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
+ /* Marvel-specific registers */
+ PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
+ PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
+ PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
+ PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
+ PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
+ PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
+ PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
+ PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
+ PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
+ PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
+ PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
+ PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
+ PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
+ PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
+ PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
+ PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
+ PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
+ PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+ PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
+ PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
+ PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
+ PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
+ PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
+};
+
+enum {
+ PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
+ PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
+ PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
+ PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
+ PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
+ PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
+ PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
+ PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
+ PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
+ PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
+};
+
+enum {
+ PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
+ PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
+ PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
+};
+
+enum {
+ PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
+
+ PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
+ PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
+ PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */
+ PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
+ PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
+ PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
+ PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
+};
+
+enum {
+ PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
+ PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
+ PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
+};
+
+/* different Broadcom PHY Ids */
+enum {
+ PHY_BCOM_ID1_A1 = 0x6041,
+ PHY_BCOM_ID1_B2 = 0x6043,
+ PHY_BCOM_ID1_C0 = 0x6044,
+ PHY_BCOM_ID1_C5 = 0x6047,
+};
+
+/* different Marvell PHY Ids */
+enum {
+ PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
+ PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
+ PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
+ PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
+ PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
+};
+
+/* Advertisement register bits */
+enum {
+ PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
+ PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
+ PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
+
+ PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
+ PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
+ PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
+ PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
+ PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
+ PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
+ PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
+ PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
+ PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
+ PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
+ PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
+ PHY_AN_100HALF | PHY_AN_100FULL,
+};
+
+/* Xmac Specific */
+enum {
+ PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
+ PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
+ PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */
+
+ PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */
+ PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */
+ PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */
+};
+
+/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */
+enum {
+ PHY_X_P_NO_PAUSE= 0<<7,/* Bit 8..7: no Pause Mode */
+ PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */
+ PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */
+ PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */
+};
+
+
+/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/
+enum {
+ PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */
+ PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */
+};
+
+/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/
+enum {
+ PHY_X_RS_PAUSE = 3<<7, /* Bit 8..7: selected Pause Mode */
+ PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */
+ PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */
+ PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */
+ PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */
+};
+
+/* Remote Fault Bits (PHY_X_AN_RFB) encoding */
+enum {
+ X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */
+ X_RFB_LF = 1<<12,/* Bit 13..12 Link Failure */
+ X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */
+ X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */
+};
+
+/* Broadcom-Specific */
+/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
+enum {
+ PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
+ PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
+ PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
+ PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
+ PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
+ PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
+};
+
+/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+enum {
+ PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
+ PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
+ PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
+ PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
+ PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
+ PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
+ /* Bit 9..8: reserved */
+ PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
+};
+
+/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/
+enum {
+ PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */
+ PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */
+ PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */
+ PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */
+};
+
+/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/
+enum {
+ PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */
+ PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */
+ PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */
+ PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */
+ PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */
+ PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */
+ PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */
+ PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */
+ PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */
+ PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */
+ PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */
+ PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */
+ PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */
+ PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */
+ PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */
+ PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */
+};
+
+/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/
+enum {
+ PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */
+ PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */
+ PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */
+ PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */
+ PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */
+ PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */
+ PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */
+ PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */
+ PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */
+ PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */
+ PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */
+ PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */
+ PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */
+ PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */
+};
+
+/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
+/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
+enum {
+ PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
+
+ PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
+ PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
+};
+
+
+/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/
+enum {
+ PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */
+
+/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/
+ PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */
+ PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */
+
+/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/
+ PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */
+ PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */
+ PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */
+ /* Bit 11: reserved */
+ PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */
+ /* Bit 9.. 8: reserved */
+ PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */
+ /* Bit 6: reserved */
+ PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */
+ /* Bit 4: reserved */
+ PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */
+};
+
+/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/
+enum {
+ PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */
+ PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */
+ PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */
+ PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */
+ PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */
+ PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */
+ PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */
+ PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */
+ PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */
+ PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */
+ PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */
+ PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */
+ PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */
+ PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */
+};
+#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT)
+
+/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/
+/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/
+enum {
+ PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */
+ PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */
+ PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */
+ PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */
+ PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */
+ PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */
+ PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */
+ PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */
+ PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */
+ PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */
+ PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */
+ PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */
+ PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */
+ PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */
+ PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */
+};
+#define PHY_B_DEF_MSK \
+ (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \
+ PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE))
+
+/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
+enum {
+ PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */
+ PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */
+ PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */
+ PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */
+};
+/*
+ * Resolved Duplex mode and Capabilities (Aux Status Summary Reg)
+ */
+enum {
+ PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */
+ PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */
+};
+
+/** Marvell-Specific */
+enum {
+ PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
+ PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
+ PHY_M_AN_RF = 1<<13, /* Remote Fault */
+
+ PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
+ PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
+ PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
+ PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
+ PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
+ PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
+ PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
+ PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
+};
+
+/* special defines for FIBER (88E1011S only) */
+enum {
+ PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
+ PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
+ PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
+ PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
+};
+
+/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
+enum {
+ PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
+ PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
+ PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
+ PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
+};
+
+/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
+enum {
+ PHY_M_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */
+ PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
+ PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
+ PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
+ PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
+ PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
+};
+
+/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
+enum {
+ PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
+ PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
+ PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
+ PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
+ PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
+ PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
+ PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
+ PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
+ PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
+ PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
+ PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
+ PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
+};
+
+enum {
+ PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
+ PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
+};
+
+enum {
+ PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
+ PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
+ PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+ PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
+ PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
+ PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
+ PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
+ PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
+
+ PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
+ PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
+
+ PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
+ PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
+};
+
+/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
+enum {
+ PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
+ PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
+ PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
+ PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
+ PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
+ PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
+ PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
+ PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
+ PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
+ PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
+ PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
+ PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
+ PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
+ PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
+ PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
+ PHY_M_PS_JABBER = 1<<0, /* Jabber */
+};
+
+#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+ PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
+ PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
+};
+
+enum {
+ PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
+ PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
+ PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
+ PHY_M_IS_AN_PR = 1<<12, /* Page Received */
+ PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
+ PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
+ PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
+ PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
+ PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
+ PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
+ PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
+ PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
+
+ PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
+ PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
+ PHY_M_IS_JABBER = 1<<0, /* Jabber */
+
+ PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE |
+ PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR,
+
+ PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
+};
+
+/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
+enum {
+ PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
+ PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
+
+ PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
+ PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
+ /* (88E1011 only) */
+ PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */
+ /* (88E1011 only) */
+ PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */
+ /* (88E1111 only) */
+ PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
+ /* !!! Errata in spec. (1 = disable) */
+ PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
+ PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */
+ PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
+ PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
+ PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
+ PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */};
+
+#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */
+#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */
+#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */
+
+#define PHY_M_EC_M_DSC_2(x) ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */
+ /* 100=5x; 101=6x; 110=7x; 111=8x */
+enum {
+ MAC_TX_CLK_0_MHZ = 2,
+ MAC_TX_CLK_2_5_MHZ = 6,
+ MAC_TX_CLK_25_MHZ = 7,
+};
+
+/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
+enum {
+ PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
+ PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
+ PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
+ PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
+ PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
+ PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
+ PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
+ /* (88E1111 only) */
+};
+#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
+#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
+
+enum {
+ PHY_M_LEDC_LINK_MSK = 3<<3, /* Bit 4.. 3: Link Control Mask */
+ /* (88E1011 only) */
+ PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
+ PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
+ PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
+ PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
+ PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
+};
+
+enum {
+ PULS_NO_STR = 0, /* no pulse stretching */
+ PULS_21MS = 1, /* 21 ms to 42 ms */
+ PULS_42MS = 2, /* 42 ms to 84 ms */
+ PULS_84MS = 3, /* 84 ms to 170 ms */
+ PULS_170MS = 4, /* 170 ms to 340 ms */
+ PULS_340MS = 5, /* 340 ms to 670 ms */
+ PULS_670MS = 6, /* 670 ms to 1.3 s */
+ PULS_1300MS = 7, /* 1.3 s to 2.7 s */
+};
+
+
+enum {
+ BLINK_42MS = 0, /* 42 ms */
+ BLINK_84MS = 1, /* 84 ms */
+ BLINK_170MS = 2, /* 170 ms */
+ BLINK_340MS = 3, /* 340 ms */
+ BLINK_670MS = 4, /* 670 ms */
+};
+
+/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
+#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
+ /* Bit 13..12: reserved */
+#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
+#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
+#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
+#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
+#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
+#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
+
+enum {
+ MO_LED_NORM = 0,
+ MO_LED_BLINK = 1,
+ MO_LED_OFF = 2,
+ MO_LED_ON = 3,
+};
+
+/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
+enum {
+ PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
+ PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
+ PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
+ PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
+ PHY_M_EC2_FO_AM_MSK = 7, /* Bit 2.. 0: Fiber Output Amplitude */
+};
+
+/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
+enum {
+ PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
+ PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
+ PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
+ PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
+ PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
+ PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
+ PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
+ /* (88E1111 only) */
+ /* Bit 9.. 4: reserved (88E1011 only) */
+ PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
+ PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
+ PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
+};
+
+/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/
+enum {
+ PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */
+ PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */
+ /* (88E1111 only) */
+ PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */
+ PHY_M_CABD_AMPL_MSK = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */
+ /* (88E1111 only) */
+ PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */
+};
+
+/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */
+enum {
+ CABD_STAT_NORMAL= 0,
+ CABD_STAT_SHORT = 1,
+ CABD_STAT_OPEN = 2,
+ CABD_STAT_FAIL = 3,
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
+ /* Bit 15..12: reserved (used internally) */
+enum {
+ PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
+ PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
+ PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
+};
+
+#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK)
+#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK)
+#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK)
+
+enum {
+ LED_PAR_CTRL_COLX = 0x00,
+ LED_PAR_CTRL_ERROR = 0x01,
+ LED_PAR_CTRL_DUPLEX = 0x02,
+ LED_PAR_CTRL_DP_COL = 0x03,
+ LED_PAR_CTRL_SPEED = 0x04,
+ LED_PAR_CTRL_LINK = 0x05,
+ LED_PAR_CTRL_TX = 0x06,
+ LED_PAR_CTRL_RX = 0x07,
+ LED_PAR_CTRL_ACT = 0x08,
+ LED_PAR_CTRL_LNK_RX = 0x09,
+ LED_PAR_CTRL_LNK_AC = 0x0a,
+ LED_PAR_CTRL_ACT_BL = 0x0b,
+ LED_PAR_CTRL_TX_BL = 0x0c,
+ LED_PAR_CTRL_RX_BL = 0x0d,
+ LED_PAR_CTRL_COL_BL = 0x0e,
+ LED_PAR_CTRL_INACT = 0x0f
+};
+
+/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
+enum {
+ PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
+ PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
+ PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
+};
+
+
+/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
+enum {
+ PHY_M_LEDC_LOS_MSK = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */
+ PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
+ PHY_M_LEDC_STA1_MSK = 0xf<<4, /* Bit 7.. 4: STAT1 LED Ctrl. Mask */
+ PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
+};
+
+#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK)
+#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK)
+#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK)
+#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK)
+
+/* GMAC registers */
+/* Port Registers */
+enum {
+ GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
+ GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
+ GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
+ GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
+ GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
+ GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
+ GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
+/* Source Address Registers */
+ GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
+ GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
+ GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
+ GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
+ GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
+ GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
+
+/* Multicast Address Hash Registers */
+ GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
+ GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
+ GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
+ GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
+
+/* Interrupt Source Registers */
+ GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
+ GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
+ GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
+
+/* Interrupt Mask Registers */
+ GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
+ GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
+ GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
+
+/* Serial Management Interface (SMI) Registers */
+ GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
+ GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
+ GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
+};
+
+/* MIB Counters */
+#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
+#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
+
+/*
+ * MIB Counters base address definitions (low word) -
+ * use offset 4 for access to high word (32 bit r/o)
+ */
+enum {
+ GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
+ GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
+ GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
+ GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
+ GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
+ /* GM_MIB_CNT_BASE + 40: reserved */
+ GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
+ GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
+ GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
+ GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
+ GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
+ GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
+ GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
+ GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */
+ GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */
+ GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */
+ GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */
+ GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */
+ GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */
+ GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */
+ GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */
+ /* GM_MIB_CNT_BASE + 168: reserved */
+ GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */
+ /* GM_MIB_CNT_BASE + 184: reserved */
+ GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */
+ GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */
+ GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */
+ GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */
+ GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */
+ GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */
+ GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */
+ GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */
+ GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */
+ GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */
+ GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */
+ GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */
+ GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */
+
+ GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */
+ GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */
+ GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */
+ GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */
+ GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */
+ GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */
+};
+
+/* GMAC Bit Definitions */
+/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
+enum {
+ GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
+ GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
+ GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
+ GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
+ GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
+ GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
+ GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */
+ GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */
+
+ GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
+ GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
+ GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
+ GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
+ GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
+};
+
+/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
+enum {
+ GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
+ GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
+ GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
+ GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
+ GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
+ GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
+ GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
+ GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
+ GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
+ GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
+ GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
+ GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
+ GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
+ GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
+ GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
+};
+
+#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
+#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
+
+/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
+enum {
+ GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
+ GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
+ GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
+ GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
+};
+
+#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
+#define TX_COL_DEF 0x04 /* late collision after 64 byte */
+
+/* GM_RX_CTRL 16 bit r/w Receive Control Register */
+enum {
+ GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
+ GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
+ GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
+ GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
+};
+
+/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
+enum {
+ GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
+ GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
+ GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
+
+ TX_JAM_LEN_DEF = 0x03,
+ TX_JAM_IPG_DEF = 0x0b,
+ TX_IPG_JAM_DEF = 0x1c,
+};
+
+#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
+#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
+#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
+
+
+/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
+enum {
+ GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
+ GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
+ GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
+ GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
+ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
+};
+
+#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
+#define DATA_BLIND_DEF 0x04
+
+#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
+#define IPG_DATA_DEF 0x1e
+
+/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
+enum {
+ GM_SMI_CT_PHY_A_MSK = 0x1f<<11, /* Bit 15..11: PHY Device Address */
+ GM_SMI_CT_REG_A_MSK = 0x1f<<6, /* Bit 10.. 6: PHY Register Address */
+ GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
+ GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
+ GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
+};
+
+#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
+#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
+
+/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
+enum {
+ GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
+ GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
+};
+
+/* Receive Frame Status Encoding */
+enum {
+ GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
+ GMR_FS_LEN_SHIFT = 16,
+ GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */
+ GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */
+ GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */
+ GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */
+ GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */
+ GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */
+ GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */
+ GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */
+ GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */
+ GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */
+ GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */
+
+ GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */
+ GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */
+
+/*
+ * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
+ */
+ GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR |
+ GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
+ GMR_FS_JABBER,
+/* Rx GMAC FIFO Flush Mask (default) */
+ RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
+ GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER,
+};
+
+/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
+enum {
+ GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
+ GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
+ GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
+
+ GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
+ GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
+ GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
+ GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
+ GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
+ GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
+ GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */
+ GMF_OPER_ON = 1<<3, /* Operational Mode On */
+ GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
+ GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
+ GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
+
+ RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
+};
+
+
+/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
+enum {
+ GMF_WSP_TST_ON = 1<<18, /* Write Shadow Pointer Test On */
+ GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */
+ GMF_WSP_STEP = 1<<16, /* Write Shadow Pointer Step/Increment */
+
+ GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
+ GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
+ GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
+};
+
+/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
+enum {
+ GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
+ GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
+ GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
+};
+
+/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
+enum {
+ GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
+ GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
+ GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
+ GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
+ GMC_PAUSE_ON = 1<<3, /* Pause On */
+ GMC_PAUSE_OFF = 1<<2, /* Pause Off */
+ GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
+ GMC_RST_SET = 1<<0, /* Set GMAC Reset */
+};
+
+/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
+enum {
+ GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
+ GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */
+ GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */
+ GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */
+ GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */
+ GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */
+ GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */
+ GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */
+ GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */
+ GPC_ANEG_0 = 1<<19, /* ANEG[0] */
+ GPC_ENA_XC = 1<<18, /* Enable MDI crossover */
+ GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */
+ GPC_ANEG_3 = 1<<16, /* ANEG[3] */
+ GPC_ANEG_2 = 1<<15, /* ANEG[2] */
+ GPC_ANEG_1 = 1<<14, /* ANEG[1] */
+ GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */
+ GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */
+ GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */
+ GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */
+ GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */
+ GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */
+ /* Bits 7..2: reserved */
+ GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
+ GPC_RST_SET = 1<<0, /* Set GPHY Reset */
+};
+
+#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
+#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
+#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0)
+
+/* forced speed and duplex mode (don't mix with other ANEG bits) */
+#define GPC_FRC10MBIT_HALF 0
+#define GPC_FRC10MBIT_FULL GPC_ANEG_0
+#define GPC_FRC100MBIT_HALF GPC_ANEG_1
+#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1)
+
+/* auto-negotiation with limited advertised speeds */
+/* mix only with master/slave settings (for copper) */
+#define GPC_ADV_1000_HALF GPC_ANEG_2
+#define GPC_ADV_1000_FULL GPC_ANEG_3
+#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3)
+
+/* master/slave settings */
+/* only for copper with 1000 Mbps */
+#define GPC_FORCE_MASTER 0
+#define GPC_FORCE_SLAVE GPC_ANEG_0
+#define GPC_PREF_MASTER GPC_ANEG_1
+#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0)
+
+/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
+/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
+enum {
+ GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
+ GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
+ GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
+ GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
+ GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
+ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
+
+#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR)
+
+/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
+ /* Bits 15.. 2: reserved */
+ GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
+ GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
+
+
+/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
+ WOL_CTL_LINK_CHG_OCC = 1<<15,
+ WOL_CTL_MAGIC_PKT_OCC = 1<<14,
+ WOL_CTL_PATTERN_OCC = 1<<13,
+ WOL_CTL_CLEAR_RESULT = 1<<12,
+ WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
+ WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
+ WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
+ WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
+ WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
+ WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
+ WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
+ WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
+ WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
+ WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
+ WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
+ WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
+};
+
+#define WOL_CTL_DEFAULT \
+ (WOL_CTL_DIS_PME_ON_LINK_CHG | \
+ WOL_CTL_DIS_PME_ON_PATTERN | \
+ WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
+ WOL_CTL_DIS_LINK_CHG_UNIT | \
+ WOL_CTL_DIS_PATTERN_UNIT | \
+ WOL_CTL_DIS_MAGIC_PKT_UNIT)
+
+/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
+#define WOL_CTL_PATT_ENA(x) (1 << (x))
+
+
+/* XMAC II registers */
+enum {
+ XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */
+ XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */
+ XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/
+ XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */
+ XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */
+ XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */
+ XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */
+ XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */
+ XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */
+ XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */
+ XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */
+ XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */
+ XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */
+ XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */
+ XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */
+ XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */
+ XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */
+ XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */
+ XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */
+ XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */
+ XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */
+ XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */
+ XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */
+ XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/
+ XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */
+
+ XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */
+#define XM_EXM(reg) (XM_EXM_START + ((reg) << 3))
+};
+
+enum {
+ XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */
+ XM_SA = 0x0108, /* NA reg r/w Station Address Register */
+ XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */
+ XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */
+ XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */
+ XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */
+ XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */
+ XM_MODE = 0x0124, /* 32 bit r/w Mode Register */
+ XM_LSA = 0x0128, /* NA reg r/o Last Source Register */
+ XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */
+ XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */
+ XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */
+ XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */
+ XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */
+ XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */
+ XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */
+ XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */
+ XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/
+ XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */
+ XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */
+ XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */
+ XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */
+ XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */
+ XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */
+ XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */
+ XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */
+ XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */
+ XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */
+ XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */
+ XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */
+ XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */
+ XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */
+ XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */
+ XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */
+ XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */
+ XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */
+ XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */
+ XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */
+ XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */
+ XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/
+ XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/
+ XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */
+ XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */
+ XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/
+ XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */
+ XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */
+ XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */
+ XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */
+ XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */
+ XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */
+ XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/
+ XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */
+ XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */
+ XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */
+ XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */
+ XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */
+ XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */
+ XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */
+ XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */
+ XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */
+ XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */
+ XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */
+ XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */
+ XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/
+ XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */
+ XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */
+ XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */
+ XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */
+ XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */
+ XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/
+ XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/
+};
+
+/* XM_MMU_CMD 16 bit r/w MMU Command Register */
+enum {
+ XM_MMU_PHY_RDY = 1<<12, /* Bit 12: PHY Read Ready */
+ XM_MMU_PHY_BUSY = 1<<11, /* Bit 11: PHY Busy */
+ XM_MMU_IGN_PF = 1<<10, /* Bit 10: Ignore Pause Frame */
+ XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */
+ XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */
+ XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */
+ XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */
+ XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */
+ XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */
+ XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */
+ XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */
+ XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */
+};
+
+
+/* XM_TX_CMD 16 bit r/w Transmit Command Register */
+enum {
+ XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/
+ XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */
+ XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */
+ XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */
+ XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */
+ XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */
+ XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */
+};
+
+/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */
+#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */
+
+
+/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */
+#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */
+
+
+/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */
+#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */
+
+
+/* XM_RX_CMD 16 bit r/w Receive Command Register */
+enum {
+ XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */
+ /* inrange error packets */
+ XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */
+ /* jumbo packets */
+ XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */
+ XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */
+ XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */
+ XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */
+ XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */
+ XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */
+ XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */
+};
+
+
+/* XM_GP_PORT 32 bit r/w General Purpose Port Register */
+enum {
+ XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */
+ XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */
+ XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */
+ XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */
+ XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */
+};
+
+
+/* XM_IMSK 16 bit r/w Interrupt Mask Register */
+/* XM_ISRC 16 bit r/o Interrupt Status Register */
+enum {
+ XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */
+ XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */
+ XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */
+ XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */
+ XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */
+ XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */
+ XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */
+ XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */
+ XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */
+ XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */
+ XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */
+ XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */
+ XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */
+ XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */
+ XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */
+
+ XM_IMSK_DISABLE = 0xffff,
+};
+
+/* XM_HW_CFG 16 bit r/w Hardware Config Register */
+enum {
+ XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */
+ XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/
+ XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */
+};
+
+
+/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */
+/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */
+#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */
+
+/* XM_TX_THR 16 bit r/w Tx Request Threshold */
+/* XM_HT_THR 16 bit r/w Host Request Threshold */
+/* XM_RX_THR 16 bit r/w Rx Request Threshold */
+#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */
+
+
+/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */
+enum {
+ XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */
+ XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */
+ XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */
+ XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */
+ XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */
+ XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/
+ XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */
+ XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */
+ XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */
+ XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */
+ XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occurred */
+ XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */
+ XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */
+ XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */
+ XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */
+};
+
+/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */
+/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */
+#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */
+
+
+/* XM_DEV_ID 32 bit r/o Device ID Register */
+#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */
+#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */
+
+
+/* XM_MODE 32 bit r/w Mode Register */
+enum {
+ XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */
+ XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */
+ /* extern generated */
+ XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */
+ XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */
+ /* intern generated */
+ XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */
+ XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */
+ XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */
+ XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */
+ XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */
+ /* intern generated */
+ XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */
+ /* intern generated */
+ XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */
+ XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */
+ XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */
+ XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */
+ XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */
+ XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */
+ XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */
+ XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */
+ XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */
+ XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */
+ XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */
+ XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */
+ XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */
+ XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */
+ XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */
+ XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */
+ XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */
+};
+
+#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
+#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
+ XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA)
+
+/* XM_STAT_CMD 16 bit r/w Statistics Command Register */
+enum {
+ XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */
+ XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */
+ XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */
+ XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */
+ XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */
+ XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */
+};
+
+
+/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */
+/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */
+enum {
+ XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/
+ XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/
+ XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/
+ XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/
+ XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */
+ XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */
+ XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */
+ XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */
+ XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */
+ XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */
+ XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/
+ XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */
+ XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/
+ XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */
+ XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */
+ XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */
+ XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */
+ XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */
+ XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */
+ XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */
+ XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/
+ XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */
+ XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */
+ XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/
+ XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/
+ XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */
+ XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */
+ XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/
+ XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/
+ XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */
+};
+
+#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV)
+
+/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */
+/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */
+enum {
+ XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/
+ XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/
+ XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/
+ XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/
+ XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */
+ XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */
+ XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */
+ XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */
+ XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/
+ XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */
+ XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */
+ XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */
+ XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */
+ XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/
+ XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */
+ XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */
+ XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/
+ XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/
+ XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */
+ XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */
+ XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */
+ XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */
+ XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */
+ XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/
+ XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/
+ XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */
+};
+
+#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV)
+
+struct skge_rx_desc {
+ u32 control;
+ u32 next_offset;
+ u32 dma_lo;
+ u32 dma_hi;
+ u32 status;
+ u32 timestamp;
+ u16 csum2;
+ u16 csum1;
+ u16 csum2_start;
+ u16 csum1_start;
+};
+
+struct skge_tx_desc {
+ u32 control;
+ u32 next_offset;
+ u32 dma_lo;
+ u32 dma_hi;
+ u32 status;
+ u32 csum_offs;
+ u16 csum_write;
+ u16 csum_start;
+ u32 rsvd;
+};
+
+struct skge_element {
+ struct skge_element *next;
+ void *desc;
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct skge_ring {
+ struct skge_element *to_clean;
+ struct skge_element *to_use;
+ struct skge_element *start;
+ unsigned long count;
+};
+
+
+struct skge_hw {
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ spinlock_t hw_lock;
+ u32 intr_mask;
+ struct net_device *dev[2];
+
+ u8 chip_id;
+ u8 chip_rev;
+ u8 copper;
+ u8 ports;
+ u8 phy_type;
+
+ u32 ram_size;
+ u32 ram_offset;
+ u16 phy_addr;
+ spinlock_t phy_lock;
+ struct tasklet_struct phy_task;
+
+ char irq_name[]; /* skge@pci:000:04:00.0 */
+};
+
+enum pause_control {
+ FLOW_MODE_NONE = 1, /* No Flow-Control */
+ FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */
+ FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
+ FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or
+ * just the remote station may send PAUSE
+ */
+};
+
+enum pause_status {
+ FLOW_STAT_INDETERMINATED=0, /* indeterminated */
+ FLOW_STAT_NONE, /* No Flow Control */
+ FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */
+ FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */
+ FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */
+};
+
+
+struct skge_port {
+ struct skge_hw *hw;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ int port;
+ u32 msg_enable;
+
+ struct skge_ring tx_ring;
+
+ struct skge_ring rx_ring ____cacheline_aligned_in_smp;
+ unsigned int rx_buf_size;
+
+ struct timer_list link_timer;
+ enum pause_control flow_control;
+ enum pause_status flow_status;
+ u8 blink_on;
+ u8 wol;
+ u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
+ u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
+ u16 speed; /* SPEED_1000, SPEED_100, ... */
+ u32 advertising;
+
+ void *mem; /* PCI memory for rings */
+ dma_addr_t dma;
+ unsigned long mem_size;
+#ifdef CONFIG_SKGE_DEBUG
+ struct dentry *debugfs;
+#endif
+};
+
+
+/* Register accessor for memory mapped device */
+static inline u32 skge_read32(const struct skge_hw *hw, int reg)
+{
+ return readl(hw->regs + reg);
+}
+
+static inline u16 skge_read16(const struct skge_hw *hw, int reg)
+{
+ return readw(hw->regs + reg);
+}
+
+static inline u8 skge_read8(const struct skge_hw *hw, int reg)
+{
+ return readb(hw->regs + reg);
+}
+
+static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val)
+{
+ writel(val, hw->regs + reg);
+}
+
+static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val)
+{
+ writew(val, hw->regs + reg);
+}
+
+static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val)
+{
+ writeb(val, hw->regs + reg);
+}
+
+/* MAC Related Registers inside the device. */
+#define SK_REG(port,reg) (((port)<<7)+(u16)(reg))
+#define SK_XMAC_REG(port, reg) \
+ ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
+
+static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg)
+{
+ u32 v;
+ v = skge_read16(hw, SK_XMAC_REG(port, reg));
+ v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16;
+ return v;
+}
+
+static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg)
+{
+ return skge_read16(hw, SK_XMAC_REG(port,reg));
+}
+
+static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
+{
+ skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff);
+ skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16);
+}
+
+static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
+{
+ skge_write16(hw, SK_XMAC_REG(port,r), v);
+}
+
+static inline void xm_outhash(const struct skge_hw *hw, int port, int reg,
+ const u8 *hash)
+{
+ xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8));
+ xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8));
+ xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8));
+ xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8));
+}
+
+static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg,
+ const u8 *addr)
+{
+ xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8));
+ xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8));
+ xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8));
+}
+
+#define SK_GMAC_REG(port,reg) \
+ (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
+
+static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg)
+{
+ return skge_read16(hw, SK_GMAC_REG(port,reg));
+}
+
+static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg)
+{
+ return (u32) skge_read16(hw, SK_GMAC_REG(port,reg))
+ | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16);
+}
+
+static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
+{
+ skge_write16(hw, SK_GMAC_REG(port,r), v);
+}
+
+static inline void gma_set_addr(struct skge_hw *hw, int port, int reg,
+ const u8 *addr)
+{
+ gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
+ gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
+ gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
+}
+
+#endif
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
new file mode 100644
index 000000000..07720841a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -0,0 +1,5164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * New driver for Marvell Yukon 2 chipset.
+ * Based on earlier sk98lin, and skge driver.
+ *
+ * This driver intentionally does not support all the features
+ * of the original driver such as link fail-over and link management because
+ * those should be done at higher levels.
+ *
+ * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/slab.h>
+#include <net/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/mii.h>
+#include <linux/of_net.h>
+#include <linux/dmi.h>
+
+#include <asm/irq.h>
+
+#include "sky2.h"
+
+#define DRV_NAME "sky2"
+#define DRV_VERSION "1.30"
+
+/*
+ * The Yukon II chipset takes 64 bit command blocks (called list elements)
+ * that are organized into three (receive, transmit, status) different rings
+ * similar to Tigon3.
+ */
+
+#define RX_LE_SIZE 1024
+#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
+#define RX_MAX_PENDING (RX_LE_SIZE/6 - 2)
+#define RX_DEF_PENDING RX_MAX_PENDING
+
+/* This is the worst case number of transmit list elements for a single skb:
+ * VLAN:GSO + CKSUM + Data + skb_frags * DMA
+ */
+#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
+#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
+#define TX_MAX_PENDING 1024
+#define TX_DEF_PENDING 63
+
+#define TX_WATCHDOG (5 * HZ)
+#define PHY_RETRIES 1000
+
+#define SKY2_EEPROM_MAGIC 0x9955aabb
+
+#define RING_NEXT(x, s) (((x)+1) & ((s)-1))
+
+static const u32 default_msg =
+ NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+ | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int copybreak __read_mostly = 128;
+module_param(copybreak, int, 0);
+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+
+static int disable_msi = -1;
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
+static int legacy_pme = 0;
+module_param(legacy_pme, int, 0);
+MODULE_PARM_DESC(legacy_pme, "Legacy power management");
+
+static const struct pci_device_id sky2_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, sky2_id_table);
+
+/* Avoid conditionals by using array */
+static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
+static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
+static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
+
+static void sky2_set_multicast(struct net_device *dev);
+static irqreturn_t sky2_intr(int irq, void *dev_id);
+
+/* Access to PHY via serial interconnect */
+static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
+{
+ int i;
+
+ gma_write16(hw, port, GM_SMI_DATA, val);
+ gma_write16(hw, port, GM_SMI_CTRL,
+ GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
+
+ for (i = 0; i < PHY_RETRIES; i++) {
+ u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
+ if (ctrl == 0xffff)
+ goto io_error;
+
+ if (!(ctrl & GM_SMI_CT_BUSY))
+ return 0;
+
+ udelay(10);
+ }
+
+ dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name);
+ return -ETIMEDOUT;
+
+io_error:
+ dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
+ return -EIO;
+}
+
+static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
+{
+ int i;
+
+ gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
+ | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
+
+ for (i = 0; i < PHY_RETRIES; i++) {
+ u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
+ if (ctrl == 0xffff)
+ goto io_error;
+
+ if (ctrl & GM_SMI_CT_RD_VAL) {
+ *val = gma_read16(hw, port, GM_SMI_DATA);
+ return 0;
+ }
+
+ udelay(10);
+ }
+
+ dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name);
+ return -ETIMEDOUT;
+io_error:
+ dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
+ return -EIO;
+}
+
+static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
+{
+ u16 v = 0;
+ __gm_phy_read(hw, port, reg, &v);
+ return v;
+}
+
+
+static void sky2_power_on(struct sky2_hw *hw)
+{
+ /* switch power to VCC (WA for VAUX problem) */
+ sky2_write8(hw, B0_POWER_CTRL,
+ PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
+
+ /* disable Core Clock Division, */
+ sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
+
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
+ /* enable bits are inverted */
+ sky2_write8(hw, B2_Y2_CLK_GATE,
+ Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
+ Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
+ Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
+ else
+ sky2_write8(hw, B2_Y2_CLK_GATE, 0);
+
+ if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
+ u32 reg;
+
+ sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+
+ reg = sky2_pci_read32(hw, PCI_DEV_REG4);
+ /* set all bits to 0 except bits 15..12 and 8 */
+ reg &= P_ASPM_CONTROL_MSK;
+ sky2_pci_write32(hw, PCI_DEV_REG4, reg);
+
+ reg = sky2_pci_read32(hw, PCI_DEV_REG5);
+ /* set all bits to 0 except bits 28 & 27 */
+ reg &= P_CTL_TIM_VMAIN_AV_MSK;
+ sky2_pci_write32(hw, PCI_DEV_REG5, reg);
+
+ sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
+
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
+
+ /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
+ reg = sky2_read32(hw, B2_GP_IO);
+ reg |= GLB_GPIO_STAT_RACE_DIS;
+ sky2_write32(hw, B2_GP_IO, reg);
+
+ sky2_read32(hw, B2_GP_IO);
+ }
+
+ /* Turn on "driver loaded" LED */
+ sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON);
+}
+
+static void sky2_power_aux(struct sky2_hw *hw)
+{
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
+ sky2_write8(hw, B2_Y2_CLK_GATE, 0);
+ else
+ /* enable bits are inverted */
+ sky2_write8(hw, B2_Y2_CLK_GATE,
+ Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
+ Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
+ Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
+
+ /* switch power to VAUX if supported and PME from D3cold */
+ if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
+ pci_pme_capable(hw->pdev, PCI_D3cold))
+ sky2_write8(hw, B0_POWER_CTRL,
+ (PC_VAUX_ENA | PC_VCC_ENA |
+ PC_VAUX_ON | PC_VCC_OFF));
+
+ /* turn off "driver loaded LED" */
+ sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF);
+}
+
+static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
+{
+ u16 reg;
+
+ /* disable all GMAC IRQ's */
+ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+
+ gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
+ gma_write16(hw, port, GM_MC_ADDR_H2, 0);
+ gma_write16(hw, port, GM_MC_ADDR_H3, 0);
+ gma_write16(hw, port, GM_MC_ADDR_H4, 0);
+
+ reg = gma_read16(hw, port, GM_RX_CTRL);
+ reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
+ gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+/* flow control to advertise bits */
+static const u16 copper_fc_adv[] = {
+ [FC_NONE] = 0,
+ [FC_TX] = PHY_M_AN_ASP,
+ [FC_RX] = PHY_M_AN_PC,
+ [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP,
+};
+
+/* flow control to advertise bits when using 1000BaseX */
+static const u16 fiber_fc_adv[] = {
+ [FC_NONE] = PHY_M_P_NO_PAUSE_X,
+ [FC_TX] = PHY_M_P_ASYM_MD_X,
+ [FC_RX] = PHY_M_P_SYM_MD_X,
+ [FC_BOTH] = PHY_M_P_BOTH_MD_X,
+};
+
+/* flow control to GMA disable bits */
+static const u16 gm_fc_disable[] = {
+ [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
+ [FC_TX] = GM_GPCR_FC_RX_DIS,
+ [FC_RX] = GM_GPCR_FC_TX_DIS,
+ [FC_BOTH] = 0,
+};
+
+
+static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
+{
+ struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
+ u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
+
+ if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
+ !(hw->flags & SKY2_HW_NEWER_PHY)) {
+ u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+
+ ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
+ PHY_M_EC_MAC_S_MSK);
+ ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
+
+ /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
+ if (hw->chip_id == CHIP_ID_YUKON_EC)
+ /* set downshift counter to 3x and enable downshift */
+ ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
+ else
+ /* set master & slave downshift counter to 1x */
+ ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
+ }
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ if (sky2_is_copper(hw)) {
+ if (!(hw->flags & SKY2_HW_GIGABIT)) {
+ /* enable automatic crossover */
+ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
+
+ if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+ u16 spec;
+
+ /* Enable Class A driver for FE+ A0 */
+ spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
+ spec |= PHY_M_FESC_SEL_CL_A;
+ gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
+ }
+ } else {
+ /* disable energy detect */
+ ctrl &= ~PHY_M_PC_EN_DET_MSK;
+
+ /* enable automatic crossover */
+ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
+
+ /* downshift on PHY 88E1112 and 88E1149 is changed */
+ if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
+ (hw->flags & SKY2_HW_NEWER_PHY)) {
+ /* set downshift counter to 3x and enable downshift */
+ ctrl &= ~PHY_M_PC_DSC_MSK;
+ ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
+ }
+ }
+ } else {
+ /* workaround for deviation #4.88 (CRC errors) */
+ /* disable Automatic Crossover */
+
+ ctrl &= ~PHY_M_PC_MDIX_MSK;
+ }
+
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ /* special setup for PHY 88E1112 Fiber */
+ if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+ /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ ctrl &= ~PHY_M_MAC_MD_MSK;
+ ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ if (hw->pmd_type == 'P') {
+ /* select page 1 to access Fiber registers */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
+
+ /* for SFP-module set SIGDET polarity to low */
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ ctrl |= PHY_M_FIB_SIGD_POL;
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+ }
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+ }
+
+ ctrl = PHY_CT_RESET;
+ ct1000 = 0;
+ adv = PHY_AN_CSMA;
+ reg = 0;
+
+ if (sky2->flags & SKY2_FLAG_AUTO_SPEED) {
+ if (sky2_is_copper(hw)) {
+ if (sky2->advertising & ADVERTISED_1000baseT_Full)
+ ct1000 |= PHY_M_1000C_AFD;
+ if (sky2->advertising & ADVERTISED_1000baseT_Half)
+ ct1000 |= PHY_M_1000C_AHD;
+ if (sky2->advertising & ADVERTISED_100baseT_Full)
+ adv |= PHY_M_AN_100_FD;
+ if (sky2->advertising & ADVERTISED_100baseT_Half)
+ adv |= PHY_M_AN_100_HD;
+ if (sky2->advertising & ADVERTISED_10baseT_Full)
+ adv |= PHY_M_AN_10_FD;
+ if (sky2->advertising & ADVERTISED_10baseT_Half)
+ adv |= PHY_M_AN_10_HD;
+
+ } else { /* special defines for FIBER (88E1040S only) */
+ if (sky2->advertising & ADVERTISED_1000baseT_Full)
+ adv |= PHY_M_AN_1000X_AFD;
+ if (sky2->advertising & ADVERTISED_1000baseT_Half)
+ adv |= PHY_M_AN_1000X_AHD;
+ }
+
+ /* Restart Auto-negotiation */
+ ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+ } else {
+ /* forced speed/duplex settings */
+ ct1000 = PHY_M_1000C_MSE;
+
+ /* Disable auto update for duplex flow control and duplex */
+ reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS;
+
+ switch (sky2->speed) {
+ case SPEED_1000:
+ ctrl |= PHY_CT_SP1000;
+ reg |= GM_GPCR_SPEED_1000;
+ break;
+ case SPEED_100:
+ ctrl |= PHY_CT_SP100;
+ reg |= GM_GPCR_SPEED_100;
+ break;
+ }
+
+ if (sky2->duplex == DUPLEX_FULL) {
+ reg |= GM_GPCR_DUP_FULL;
+ ctrl |= PHY_CT_DUP_MD;
+ } else if (sky2->speed < SPEED_1000)
+ sky2->flow_mode = FC_NONE;
+ }
+
+ if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) {
+ if (sky2_is_copper(hw))
+ adv |= copper_fc_adv[sky2->flow_mode];
+ else
+ adv |= fiber_fc_adv[sky2->flow_mode];
+ } else {
+ reg |= GM_GPCR_AU_FCT_DIS;
+ reg |= gm_fc_disable[sky2->flow_mode];
+
+ /* Forward pause packets to GMAC? */
+ if (sky2->flow_mode & FC_RX)
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+ else
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+ }
+
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+
+ if (hw->flags & SKY2_HW_GIGABIT)
+ gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
+
+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+ /* Setup Phy LED's */
+ ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
+ ledover = 0;
+
+ switch (hw->chip_id) {
+ case CHIP_ID_YUKON_FE:
+ /* on 88E3082 these bits are at 11..9 (shifted left) */
+ ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
+
+ /* delete ACT LED control bits */
+ ctrl &= ~PHY_M_FELP_LED1_MSK;
+ /* change ACT LED control to blink mode */
+ ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
+ gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
+ break;
+
+ case CHIP_ID_YUKON_FE_P:
+ /* Enable Link Partner Next Page */
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ ctrl |= PHY_M_PC_ENA_LIP_NP;
+
+ /* disable Energy Detect and enable scrambler */
+ ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
+ ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
+ PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
+ PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
+
+ gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
+ break;
+
+ case CHIP_ID_YUKON_XL:
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+ /* select page 3 to access LED control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+ /* set LED Function Control register */
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
+ PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
+ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
+ PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
+
+ /* set Polarity Control register */
+ gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
+ (PHY_M_POLC_LS1_P_MIX(4) |
+ PHY_M_POLC_IS0_P_MIX(4) |
+ PHY_M_POLC_LOS_CTRL(2) |
+ PHY_M_POLC_INIT_CTRL(2) |
+ PHY_M_POLC_STA1_CTRL(2) |
+ PHY_M_POLC_STA0_CTRL(2)));
+
+ /* restore page register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+ break;
+
+ case CHIP_ID_YUKON_EC_U:
+ case CHIP_ID_YUKON_EX:
+ case CHIP_ID_YUKON_SUPR:
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+ /* select page 3 to access LED control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+ /* set LED Function Control register */
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
+ PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
+ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
+ PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
+
+ /* set Blink Rate in LED Timer Control Register */
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK,
+ ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
+ /* restore page register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+ break;
+
+ default:
+ /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
+ ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
+
+ /* turn off the Rx LED (LED_RX) */
+ ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
+ }
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) {
+ /* apply fixes in PHY AFE */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
+
+ /* increase differential signal amplitude in 10BASE-T */
+ gm_phy_write(hw, port, 0x18, 0xaa99);
+ gm_phy_write(hw, port, 0x17, 0x2011);
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
+ gm_phy_write(hw, port, 0x18, 0xa204);
+ gm_phy_write(hw, port, 0x17, 0x2002);
+ }
+
+ /* set page register to 0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+ } else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+ /* apply workaround for integrated resistors calibration */
+ gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
+ gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
+ } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
+ /* apply fixes in PHY AFE */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
+
+ /* apply RDAC termination workaround */
+ gm_phy_write(hw, port, 24, 0x2800);
+ gm_phy_write(hw, port, 23, 0x2001);
+
+ /* set page register back to 0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+ } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
+ hw->chip_id < CHIP_ID_YUKON_SUPR) {
+ /* no effect on Yukon-XL */
+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
+
+ if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) ||
+ sky2->speed == SPEED_100) {
+ /* turn on 100 Mbps LED (LED_LINK100) */
+ ledover |= PHY_M_LED_MO_100(MO_LED_ON);
+ }
+
+ if (ledover)
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+
+ } else if (hw->chip_id == CHIP_ID_YUKON_PRM &&
+ (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) {
+ int i;
+ /* This a phy register setup workaround copied from vendor driver. */
+ static const struct {
+ u16 reg, val;
+ } eee_afe[] = {
+ { 0x156, 0x58ce },
+ { 0x153, 0x99eb },
+ { 0x141, 0x8064 },
+ /* { 0x155, 0x130b },*/
+ { 0x000, 0x0000 },
+ { 0x151, 0x8433 },
+ { 0x14b, 0x8c44 },
+ { 0x14c, 0x0f90 },
+ { 0x14f, 0x39aa },
+ /* { 0x154, 0x2f39 },*/
+ { 0x14d, 0xba33 },
+ { 0x144, 0x0048 },
+ { 0x152, 0x2010 },
+ /* { 0x158, 0x1223 },*/
+ { 0x140, 0x4444 },
+ { 0x154, 0x2f3b },
+ { 0x158, 0xb203 },
+ { 0x157, 0x2029 },
+ };
+
+ /* Start Workaround for OptimaEEE Rev.Z0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb);
+
+ gm_phy_write(hw, port, 1, 0x4099);
+ gm_phy_write(hw, port, 3, 0x1120);
+ gm_phy_write(hw, port, 11, 0x113c);
+ gm_phy_write(hw, port, 14, 0x8100);
+ gm_phy_write(hw, port, 15, 0x112a);
+ gm_phy_write(hw, port, 17, 0x1008);
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc);
+ gm_phy_write(hw, port, 1, 0x20b0);
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
+
+ for (i = 0; i < ARRAY_SIZE(eee_afe); i++) {
+ /* apply AFE settings */
+ gm_phy_write(hw, port, 17, eee_afe[i].val);
+ gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13);
+ }
+
+ /* End Workaround for OptimaEEE */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+
+ /* Enable 10Base-Te (EEE) */
+ if (hw->chip_id >= CHIP_ID_YUKON_PRM) {
+ reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL,
+ reg | PHY_M_10B_TE_ENABLE);
+ }
+ }
+
+ /* Enable phy interrupt on auto-negotiation complete (or link up) */
+ if (sky2->flags & SKY2_FLAG_AUTO_SPEED)
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
+ else
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
+}
+
+static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
+static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
+
+static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
+{
+ u32 reg1;
+
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+ reg1 &= ~phy_power[port];
+
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
+ reg1 |= coma_mode[port];
+
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ sky2_pci_read32(hw, PCI_DEV_REG1);
+
+ if (hw->chip_id == CHIP_ID_YUKON_FE)
+ gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
+ else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+}
+
+static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
+{
+ u32 reg1;
+ u16 ctrl;
+
+ /* release GPHY Control reset */
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+
+ /* release GMAC reset */
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+ if (hw->flags & SKY2_HW_NEWER_PHY) {
+ /* select page 2 to access MAC control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ /* allow GMII Power Down */
+ ctrl &= ~PHY_M_MAC_GMIF_PUP;
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ /* set page register back to 0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+ }
+
+ /* setup General Purpose Control Register */
+ gma_write16(hw, port, GM_GP_CTRL,
+ GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 |
+ GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |
+ GM_GPCR_AU_SPD_DIS);
+
+ if (hw->chip_id != CHIP_ID_YUKON_EC) {
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ /* select page 2 to access MAC control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ /* enable Power Down */
+ ctrl |= PHY_M_PC_POW_D_ENA;
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ /* set page register back to 0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+ }
+
+ /* set IEEE compatible Power Down Mode (dev. #4.99) */
+ gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
+ }
+
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+ reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+}
+
+/* configure IPG according to used link speed */
+static void sky2_set_ipg(struct sky2_port *sky2)
+{
+ u16 reg;
+
+ reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE);
+ reg &= ~GM_SMOD_IPG_MSK;
+ if (sky2->speed > SPEED_100)
+ reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
+ else
+ reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
+ gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg);
+}
+
+/* Enable Rx/Tx */
+static void sky2_enable_rx_tx(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u16 reg;
+
+ reg = gma_read16(hw, port, GM_GP_CTRL);
+ reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+}
+
+/* Force a renegotiation */
+static void sky2_phy_reinit(struct sky2_port *sky2)
+{
+ spin_lock_bh(&sky2->phy_lock);
+ sky2_phy_init(sky2->hw, sky2->port);
+ sky2_enable_rx_tx(sky2);
+ spin_unlock_bh(&sky2->phy_lock);
+}
+
+/* Put device in state to listen for Wake On Lan */
+static void sky2_wol_init(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ enum flow_control save_mode;
+ u16 ctrl;
+
+ /* Bring hardware out of reset */
+ sky2_write16(hw, B0_CTST, CS_RST_CLR);
+ sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+ /* Force to 10/100
+ * sky2_reset will re-enable on resume
+ */
+ save_mode = sky2->flow_mode;
+ ctrl = sky2->advertising;
+
+ sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
+ sky2->flow_mode = FC_NONE;
+
+ spin_lock_bh(&sky2->phy_lock);
+ sky2_phy_power_up(hw, port);
+ sky2_phy_init(hw, port);
+ spin_unlock_bh(&sky2->phy_lock);
+
+ sky2->flow_mode = save_mode;
+ sky2->advertising = ctrl;
+
+ /* Set GMAC to no flow control and auto update for speed/duplex */
+ gma_write16(hw, port, GM_GP_CTRL,
+ GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
+ GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
+
+ /* Set WOL address */
+ memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
+ sky2->netdev->dev_addr, ETH_ALEN);
+
+ /* Turn on appropriate WOL control bits */
+ sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
+ ctrl = 0;
+ if (sky2->wol & WAKE_PHY)
+ ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
+ else
+ ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
+
+ if (sky2->wol & WAKE_MAGIC)
+ ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
+ else
+ ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
+
+ ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
+ sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
+
+ /* Disable PiG firmware */
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
+
+ /* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */
+ if (legacy_pme) {
+ u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+ reg1 |= PCI_Y2_PME_LEGACY;
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ }
+
+ /* block receiver */
+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+ sky2_read32(hw, B0_CTST);
+}
+
+static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
+{
+ struct net_device *dev = hw->dev[port];
+
+ if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
+ hw->chip_rev != CHIP_REV_YU_EX_A0) ||
+ hw->chip_id >= CHIP_ID_YUKON_FE_P) {
+ /* Yukon-Extreme B0 and further Extreme devices */
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
+ } else if (dev->mtu > ETH_DATA_LEN) {
+ /* set Tx GMAC FIFO Almost Empty Threshold */
+ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+ (ECU_JUMBO_WM << 16) | ECU_AE_THR);
+
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
+ } else
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
+}
+
+static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
+{
+ struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
+ u16 reg;
+ u32 rx_reg;
+ int i;
+ const u8 *addr = hw->dev[port]->dev_addr;
+
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+ if (hw->chip_id == CHIP_ID_YUKON_XL &&
+ hw->chip_rev == CHIP_REV_YU_XL_A0 &&
+ port == 1) {
+ /* WA DEV_472 -- looks like crossed wires on port 2 */
+ /* clear GMAC 1 Control reset */
+ sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
+ do {
+ sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
+ sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
+ } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
+ gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
+ gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
+ }
+
+ sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+ /* Enable Transmit FIFO Underrun */
+ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
+
+ spin_lock_bh(&sky2->phy_lock);
+ sky2_phy_power_up(hw, port);
+ sky2_phy_init(hw, port);
+ spin_unlock_bh(&sky2->phy_lock);
+
+ /* MIB clear */
+ reg = gma_read16(hw, port, GM_PHY_ADDR);
+ gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
+
+ for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
+ gma_read16(hw, port, i);
+ gma_write16(hw, port, GM_PHY_ADDR, reg);
+
+ /* transmit control */
+ gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
+
+ /* receive control reg: unicast + multicast + no FCS */
+ gma_write16(hw, port, GM_RX_CTRL,
+ GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
+
+ /* transmit flow control */
+ gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
+
+ /* transmit parameter */
+ gma_write16(hw, port, GM_TX_PARAM,
+ TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
+ TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
+ TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
+ TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
+
+ /* serial mode register */
+ reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
+ GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000);
+
+ if (hw->dev[port]->mtu > ETH_DATA_LEN)
+ reg |= GM_SMOD_JUMBO_ENA;
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ hw->chip_rev == CHIP_REV_YU_EC_U_B1)
+ reg |= GM_NEW_FLOW_CTRL;
+
+ gma_write16(hw, port, GM_SERIAL_MODE, reg);
+
+ /* virtual address for data */
+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
+
+ /* physical address: used for pause frames */
+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
+
+ /* ignore counter overflows */
+ gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
+ gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
+ gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
+
+ /* Configure Rx MAC FIFO */
+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
+ rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
+ if (hw->chip_id == CHIP_ID_YUKON_EX ||
+ hw->chip_id == CHIP_ID_YUKON_FE_P)
+ rx_reg |= GMF_RX_OVER_ON;
+
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
+
+ if (hw->chip_id == CHIP_ID_YUKON_XL) {
+ /* Hardware errata - clear flush mask */
+ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
+ } else {
+ /* Flush Rx MAC FIFO on any flow control or error */
+ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
+ }
+
+ /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */
+ reg = RX_GMF_FL_THR_DEF + 1;
+ /* Another magic mystery workaround from sk98lin */
+ if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0)
+ reg = 0x178;
+ sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
+
+ /* Configure Tx MAC FIFO */
+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
+ sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
+
+ /* On chips without ram buffer, pause is controlled by MAC level */
+ if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
+ /* Pause threshold is scaled by 8 in bytes */
+ if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0)
+ reg = 1568 / 8;
+ else
+ reg = 1024 / 8;
+ sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
+ sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
+
+ sky2_set_tx_stfwd(hw, port);
+ }
+
+ if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+ /* disable dynamic watermark */
+ reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
+ reg &= ~TX_DYN_WM_ENA;
+ sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
+ }
+}
+
+/* Assign Ram Buffer allocation to queue */
+static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
+{
+ u32 end;
+
+ /* convert from K bytes to qwords used for hw register */
+ start *= 1024/8;
+ space *= 1024/8;
+ end = start + space - 1;
+
+ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
+ sky2_write32(hw, RB_ADDR(q, RB_START), start);
+ sky2_write32(hw, RB_ADDR(q, RB_END), end);
+ sky2_write32(hw, RB_ADDR(q, RB_WP), start);
+ sky2_write32(hw, RB_ADDR(q, RB_RP), start);
+
+ if (q == Q_R1 || q == Q_R2) {
+ u32 tp = space - space/4;
+
+ /* On receive queue's set the thresholds
+ * give receiver priority when > 3/4 full
+ * send pause when down to 2K
+ */
+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
+
+ tp = space - 8192/8;
+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
+ } else {
+ /* Enable store & forward on Tx queue's because
+ * Tx FIFO is only 1K on Yukon
+ */
+ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
+ }
+
+ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
+ sky2_read8(hw, RB_ADDR(q, RB_CTRL));
+}
+
+/* Setup Bus Memory Interface */
+static void sky2_qset(struct sky2_hw *hw, u16 q)
+{
+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
+ sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
+}
+
+/* Setup prefetch unit registers. This is the interface between
+ * hardware and driver list elements
+ */
+static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
+ dma_addr_t addr, u32 last)
+{
+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr));
+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr));
+ sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
+
+ sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
+}
+
+static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
+{
+ struct sky2_tx_le *le = sky2->tx_le + *slot;
+
+ *slot = RING_NEXT(*slot, sky2->tx_ring_size);
+ le->ctrl = 0;
+ return le;
+}
+
+static void tx_init(struct sky2_port *sky2)
+{
+ struct sky2_tx_le *le;
+
+ sky2->tx_prod = sky2->tx_cons = 0;
+ sky2->tx_tcpsum = 0;
+ sky2->tx_last_mss = 0;
+ netdev_reset_queue(sky2->netdev);
+
+ le = get_tx_le(sky2, &sky2->tx_prod);
+ le->addr = 0;
+ le->opcode = OP_ADDR64 | HW_OWNER;
+ sky2->tx_last_upper = 0;
+}
+
+/* Update chip's next pointer */
+static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
+{
+ /* Make sure write' to descriptors are complete before we tell hardware */
+ wmb();
+ sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
+}
+
+
+static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
+{
+ struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
+ sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
+ le->ctrl = 0;
+ return le;
+}
+
+static unsigned sky2_get_rx_threshold(struct sky2_port *sky2)
+{
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ /* Stopping point for hardware truncation */
+ return (size - 8) / sizeof(u32);
+}
+
+static unsigned sky2_get_rx_data_size(struct sky2_port *sky2)
+{
+ struct rx_ring_info *re;
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ sky2->rx_nfrags = size >> PAGE_SHIFT;
+ BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
+
+ /* Compute residue after pages */
+ size -= sky2->rx_nfrags << PAGE_SHIFT;
+
+ /* Optimize to handle small packets and headers */
+ if (size < copybreak)
+ size = copybreak;
+ if (size < ETH_HLEN)
+ size = ETH_HLEN;
+
+ return size;
+}
+
+/* Build description to hardware for one receive segment */
+static void sky2_rx_add(struct sky2_port *sky2, u8 op,
+ dma_addr_t map, unsigned len)
+{
+ struct sky2_rx_le *le;
+
+ if (sizeof(dma_addr_t) > sizeof(u32)) {
+ le = sky2_next_rx(sky2);
+ le->addr = cpu_to_le32(upper_32_bits(map));
+ le->opcode = OP_ADDR64 | HW_OWNER;
+ }
+
+ le = sky2_next_rx(sky2);
+ le->addr = cpu_to_le32(lower_32_bits(map));
+ le->length = cpu_to_le16(len);
+ le->opcode = op | HW_OWNER;
+}
+
+/* Build description to hardware for one possibly fragmented skb */
+static void sky2_rx_submit(struct sky2_port *sky2,
+ const struct rx_ring_info *re)
+{
+ int i;
+
+ sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
+
+ for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
+ sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
+}
+
+
+static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
+ unsigned size)
+{
+ struct sk_buff *skb = re->skb;
+ int i;
+
+ re->data_addr = dma_map_single(&pdev->dev, skb->data, size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, re->data_addr))
+ goto mapping_error;
+
+ dma_unmap_len_set(re, data_size, size);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
+ goto map_page_error;
+ }
+ return 0;
+
+map_page_error:
+ while (--i >= 0) {
+ dma_unmap_page(&pdev->dev, re->frag_addr[i],
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
+ DMA_FROM_DEVICE);
+ }
+
+ dma_unmap_single(&pdev->dev, re->data_addr,
+ dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&pdev->dev, "%s: rx mapping error\n",
+ skb->dev->name);
+ return -EIO;
+}
+
+static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
+{
+ struct sk_buff *skb = re->skb;
+ int i;
+
+ dma_unmap_single(&pdev->dev, re->data_addr,
+ dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ dma_unmap_page(&pdev->dev, re->frag_addr[i],
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
+ DMA_FROM_DEVICE);
+}
+
+/* Tell chip where to start receive checksum.
+ * Actually has two checksums, but set both same to avoid possible byte
+ * order problems.
+ */
+static void rx_set_checksum(struct sky2_port *sky2)
+{
+ struct sky2_rx_le *le = sky2_next_rx(sky2);
+
+ le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
+ le->ctrl = 0;
+ le->opcode = OP_TCPSTART | HW_OWNER;
+
+ sky2_write32(sky2->hw,
+ Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ (sky2->netdev->features & NETIF_F_RXCSUM)
+ ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+}
+
+/* Enable/disable receive hash calculation (RSS) */
+static void rx_set_rss(struct net_device *dev, netdev_features_t features)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ int i, nkeys = 4;
+
+ /* Supports IPv6 and other modes */
+ if (hw->flags & SKY2_HW_NEW_LE) {
+ nkeys = 10;
+ sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
+ }
+
+ /* Program RSS initial values */
+ if (features & NETIF_F_RXHASH) {
+ u32 rss_key[10];
+
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ for (i = 0; i < nkeys; i++)
+ sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
+ rss_key[i]);
+
+ /* Need to turn on (undocumented) flag to make hashing work */
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
+ RX_STFW_ENA);
+
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_ENA_RX_RSS_HASH);
+ } else
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_RSS_HASH);
+}
+
+/*
+ * The RX Stop command will not work for Yukon-2 if the BMU does not
+ * reach the end of packet and since we can't make sure that we have
+ * incoming data, we must reset the BMU while it is not doing a DMA
+ * transfer. Since it is possible that the RX path is still active,
+ * the RX RAM buffer will be stopped first, so any possible incoming
+ * data will not trigger a DMA. After the RAM buffer is stopped, the
+ * BMU is polled until any DMA in progress is ended and only then it
+ * will be reset.
+ */
+static void sky2_rx_stop(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned rxq = rxqaddr[sky2->port];
+ int i;
+
+ /* disable the RAM Buffer receive queue */
+ sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
+
+ for (i = 0; i < 0xffff; i++)
+ if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
+ == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
+ goto stopped;
+
+ netdev_warn(sky2->netdev, "receiver stop failed\n");
+stopped:
+ sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
+
+ /* reset the Rx prefetch unit */
+ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+}
+
+/* Clean out receive buffer area, assumes receiver hardware stopped */
+static void sky2_rx_clean(struct sky2_port *sky2)
+{
+ unsigned i;
+
+ if (sky2->rx_le)
+ memset(sky2->rx_le, 0, RX_LE_BYTES);
+
+ for (i = 0; i < sky2->rx_pending; i++) {
+ struct rx_ring_info *re = sky2->rx_ring + i;
+
+ if (re->skb) {
+ sky2_rx_unmap_skb(sky2->hw->pdev, re);
+ kfree_skb(re->skb);
+ re->skb = NULL;
+ }
+ }
+}
+
+/* Basic MII support */
+static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ int err = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -ENODEV; /* Phy still in reset */
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = PHY_ADDR_MARV;
+
+ fallthrough;
+ case SIOCGMIIREG: {
+ u16 val = 0;
+
+ spin_lock_bh(&sky2->phy_lock);
+ err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
+ spin_unlock_bh(&sky2->phy_lock);
+
+ data->val_out = val;
+ break;
+ }
+
+ case SIOCSMIIREG:
+ spin_lock_bh(&sky2->phy_lock);
+ err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
+ data->val_in);
+ spin_unlock_bh(&sky2->phy_lock);
+ break;
+ }
+ return err;
+}
+
+#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
+
+static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ u16 port = sky2->port;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+ RX_VLAN_STRIP_ON);
+ else
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+ RX_VLAN_STRIP_OFF);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_TX) {
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_VLAN_TAG_ON);
+
+ dev->vlan_features |= SKY2_VLAN_OFFLOADS;
+ } else {
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_VLAN_TAG_OFF);
+
+ /* Can't do transmit offload of vlan without hw vlan */
+ dev->vlan_features &= ~SKY2_VLAN_OFFLOADS;
+ }
+}
+
+/* Amount of required worst case padding in rx buffer */
+static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
+{
+ return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2;
+}
+
+/*
+ * Allocate an skb for receiving. If the MTU is large enough
+ * make the skb non-linear with a fragment list of pages.
+ */
+static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ int i;
+
+ skb = __netdev_alloc_skb(sky2->netdev,
+ sky2->rx_data_size + sky2_rx_pad(sky2->hw),
+ gfp);
+ if (!skb)
+ goto nomem;
+
+ if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
+ unsigned char *start;
+ /*
+ * Workaround for a bug in FIFO that cause hang
+ * if the FIFO if the receive buffer is not 64 byte aligned.
+ * The buffer returned from netdev_alloc_skb is
+ * aligned except if slab debugging is enabled.
+ */
+ start = PTR_ALIGN(skb->data, 8);
+ skb_reserve(skb, start - skb->data);
+ } else
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ for (i = 0; i < sky2->rx_nfrags; i++) {
+ struct page *page = alloc_page(gfp);
+
+ if (!page)
+ goto free_partial;
+ skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
+ }
+
+ return skb;
+free_partial:
+ kfree_skb(skb);
+nomem:
+ return NULL;
+}
+
+static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
+{
+ sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
+}
+
+static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned i;
+
+ sky2->rx_data_size = sky2_get_rx_data_size(sky2);
+
+ /* Fill Rx ring */
+ for (i = 0; i < sky2->rx_pending; i++) {
+ struct rx_ring_info *re = sky2->rx_ring + i;
+
+ re->skb = sky2_rx_alloc(sky2, GFP_KERNEL);
+ if (!re->skb)
+ return -ENOMEM;
+
+ if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
+ dev_kfree_skb(re->skb);
+ re->skb = NULL;
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Setup receiver buffer pool.
+ * Normal case this ends up creating one list element for skb
+ * in the receive ring. Worst case if using large MTU and each
+ * allocation falls on a different 64 bit region, that results
+ * in 6 list elements per ring entry.
+ * One element is used for checksum enable/disable, and one
+ * extra to avoid wrap.
+ */
+static void sky2_rx_start(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ struct rx_ring_info *re;
+ unsigned rxq = rxqaddr[sky2->port];
+ unsigned i, thresh;
+
+ sky2->rx_put = sky2->rx_next = 0;
+ sky2_qset(hw, rxq);
+
+ /* On PCI express lowering the watermark gives better performance */
+ if (pci_is_pcie(hw->pdev))
+ sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
+
+ /* These chips have no ram buffer?
+ * MAC Rx RAM Read is controlled by hardware
+ */
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ hw->chip_rev > CHIP_REV_YU_EC_U_A0)
+ sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
+
+ sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
+
+ if (!(hw->flags & SKY2_HW_NEW_LE))
+ rx_set_checksum(sky2);
+
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ rx_set_rss(sky2->netdev, sky2->netdev->features);
+
+ /* submit Rx ring */
+ for (i = 0; i < sky2->rx_pending; i++) {
+ re = sky2->rx_ring + i;
+ sky2_rx_submit(sky2, re);
+ }
+
+ /*
+ * The receiver hangs if it receives frames larger than the
+ * packet buffer. As a workaround, truncate oversize frames, but
+ * the register is limited to 9 bits, so if you do frames > 2052
+ * you better get the MTU right!
+ */
+ thresh = sky2_get_rx_threshold(sky2);
+ if (thresh > 0x1ff)
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
+ else {
+ sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
+ }
+
+ /* Tell chip about available buffers */
+ sky2_rx_update(sky2, rxq);
+
+ if (hw->chip_id == CHIP_ID_YUKON_EX ||
+ hw->chip_id == CHIP_ID_YUKON_SUPR) {
+ /*
+ * Disable flushing of non ASF packets;
+ * must be done after initializing the BMUs;
+ * drivers without ASF support should do this too, otherwise
+ * it may happen that they cannot run on ASF devices;
+ * remember that the MAC FIFO isn't reset during initialization.
+ */
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
+ }
+
+ if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
+ /* Enable RX Home Address & Routing Header checksum fix */
+ sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
+ RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
+
+ /* Enable TX Home Address & Routing Header checksum fix */
+ sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
+ TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
+ }
+}
+
+static int sky2_alloc_buffers(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+
+ /* must be power of 2 */
+ sky2->tx_le = dma_alloc_coherent(&hw->pdev->dev,
+ sky2->tx_ring_size * sizeof(struct sky2_tx_le),
+ &sky2->tx_le_map, GFP_KERNEL);
+ if (!sky2->tx_le)
+ goto nomem;
+
+ sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info),
+ GFP_KERNEL);
+ if (!sky2->tx_ring)
+ goto nomem;
+
+ sky2->rx_le = dma_alloc_coherent(&hw->pdev->dev, RX_LE_BYTES,
+ &sky2->rx_le_map, GFP_KERNEL);
+ if (!sky2->rx_le)
+ goto nomem;
+
+ sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
+ GFP_KERNEL);
+ if (!sky2->rx_ring)
+ goto nomem;
+
+ return sky2_alloc_rx_skbs(sky2);
+nomem:
+ return -ENOMEM;
+}
+
+static void sky2_free_buffers(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+
+ sky2_rx_clean(sky2);
+
+ if (sky2->rx_le) {
+ dma_free_coherent(&hw->pdev->dev, RX_LE_BYTES, sky2->rx_le,
+ sky2->rx_le_map);
+ sky2->rx_le = NULL;
+ }
+ if (sky2->tx_le) {
+ dma_free_coherent(&hw->pdev->dev,
+ sky2->tx_ring_size * sizeof(struct sky2_tx_le),
+ sky2->tx_le, sky2->tx_le_map);
+ sky2->tx_le = NULL;
+ }
+ kfree(sky2->tx_ring);
+ kfree(sky2->rx_ring);
+
+ sky2->tx_ring = NULL;
+ sky2->rx_ring = NULL;
+}
+
+static void sky2_hw_up(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u32 ramsize;
+ int cap;
+ struct net_device *otherdev = hw->dev[sky2->port^1];
+
+ tx_init(sky2);
+
+ /*
+ * On dual port PCI-X card, there is an problem where status
+ * can be received out of order due to split transactions
+ */
+ if (otherdev && netif_running(otherdev) &&
+ (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
+ u16 cmd;
+
+ cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
+ cmd &= ~PCI_X_CMD_MAX_SPLIT;
+ sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
+ }
+
+ sky2_mac_init(hw, port);
+
+ /* Register is number of 4K blocks on internal RAM buffer. */
+ ramsize = sky2_read8(hw, B2_E_0) * 4;
+ if (ramsize > 0) {
+ u32 rxspace;
+
+ netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize);
+ if (ramsize < 16)
+ rxspace = ramsize / 2;
+ else
+ rxspace = 8 + (2*(ramsize - 16))/3;
+
+ sky2_ramset(hw, rxqaddr[port], 0, rxspace);
+ sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
+
+ /* Make sure SyncQ is disabled */
+ sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
+ RB_RST_SET);
+ }
+
+ sky2_qset(hw, txqaddr[port]);
+
+ /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
+ if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
+
+ /* Set almost empty threshold */
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ hw->chip_rev == CHIP_REV_YU_EC_U_A0)
+ sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
+
+ sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
+ sky2->tx_ring_size - 1);
+
+ sky2_vlan_mode(sky2->netdev, sky2->netdev->features);
+ netdev_update_features(sky2->netdev);
+
+ sky2_rx_start(sky2);
+}
+
+/* Setup device IRQ and enable napi to process */
+static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
+{
+ struct pci_dev *pdev = hw->pdev;
+ int err;
+
+ err = request_irq(pdev->irq, sky2_intr,
+ (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
+ name, hw);
+ if (err)
+ dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
+ else {
+ hw->flags |= SKY2_HW_IRQ_SETUP;
+
+ napi_enable(&hw->napi);
+ sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
+ sky2_read32(hw, B0_IMSK);
+ }
+
+ return err;
+}
+
+
+/* Bring up network interface. */
+static int sky2_open(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u32 imask;
+ int err;
+
+ netif_carrier_off(dev);
+
+ err = sky2_alloc_buffers(sky2);
+ if (err)
+ goto err_out;
+
+ /* With single port, IRQ is setup when device is brought up */
+ if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name)))
+ goto err_out;
+
+ sky2_hw_up(sky2);
+
+ /* Enable interrupts from phy/mac for port */
+ imask = sky2_read32(hw, B0_IMSK);
+
+ if (hw->chip_id == CHIP_ID_YUKON_OPT ||
+ hw->chip_id == CHIP_ID_YUKON_PRM ||
+ hw->chip_id == CHIP_ID_YUKON_OP_2)
+ imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */
+
+ imask |= portirq_msk[port];
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
+
+ netif_info(sky2, ifup, dev, "enabling interface\n");
+
+ return 0;
+
+err_out:
+ sky2_free_buffers(sky2);
+ return err;
+}
+
+/* Modular subtraction in ring */
+static inline int tx_inuse(const struct sky2_port *sky2)
+{
+ return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1);
+}
+
+/* Number of list elements available for next tx */
+static inline int tx_avail(const struct sky2_port *sky2)
+{
+ return sky2->tx_pending - tx_inuse(sky2);
+}
+
+/* Estimate of number of transmit list elements required */
+static unsigned tx_le_req(const struct sk_buff *skb)
+{
+ unsigned count;
+
+ count = (skb_shinfo(skb)->nr_frags + 1)
+ * (sizeof(dma_addr_t) / sizeof(u32));
+
+ if (skb_is_gso(skb))
+ ++count;
+ else if (sizeof(dma_addr_t) == sizeof(u32))
+ ++count; /* possible vlan */
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ ++count;
+
+ return count;
+}
+
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
+{
+ if (re->flags & TX_MAP_SINGLE)
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen), DMA_TO_DEVICE);
+ else if (re->flags & TX_MAP_PAGE)
+ dma_unmap_page(&pdev->dev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen), DMA_TO_DEVICE);
+ re->flags = 0;
+}
+
+/*
+ * Put one packet in ring for transmit.
+ * A single packet can generate multiple list elements, and
+ * the number of ring elements will probably be less than the number
+ * of list elements used.
+ */
+static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ struct sky2_tx_le *le = NULL;
+ struct tx_ring_info *re;
+ unsigned i, len;
+ dma_addr_t mapping;
+ u32 upper;
+ u16 slot;
+ u16 mss;
+ u8 ctrl;
+
+ if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
+ return NETDEV_TX_BUSY;
+
+ len = skb_headlen(skb);
+ mapping = dma_map_single(&hw->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&hw->pdev->dev, mapping))
+ goto mapping_error;
+
+ slot = sky2->tx_prod;
+ netif_printk(sky2, tx_queued, KERN_DEBUG, dev,
+ "tx queued, slot %u, len %d\n", slot, skb->len);
+
+ /* Send high bits if needed */
+ upper = upper_32_bits(mapping);
+ if (upper != sky2->tx_last_upper) {
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(upper);
+ sky2->tx_last_upper = upper;
+ le->opcode = OP_ADDR64 | HW_OWNER;
+ }
+
+ /* Check for TCP Segmentation Offload */
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss != 0) {
+
+ if (!(hw->flags & SKY2_HW_NEW_LE))
+ mss += skb_tcp_all_headers(skb);
+
+ if (mss != sky2->tx_last_mss) {
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(mss);
+
+ if (hw->flags & SKY2_HW_NEW_LE)
+ le->opcode = OP_MSS | HW_OWNER;
+ else
+ le->opcode = OP_LRGLEN | HW_OWNER;
+ sky2->tx_last_mss = mss;
+ }
+ }
+
+ ctrl = 0;
+
+ /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
+ if (skb_vlan_tag_present(skb)) {
+ if (!le) {
+ le = get_tx_le(sky2, &slot);
+ le->addr = 0;
+ le->opcode = OP_VLAN|HW_OWNER;
+ } else
+ le->opcode |= OP_VLAN;
+ le->length = cpu_to_be16(skb_vlan_tag_get(skb));
+ ctrl |= INS_VLAN;
+ }
+
+ /* Handle TCP checksum offload */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* On Yukon EX (some versions) encoding change. */
+ if (hw->flags & SKY2_HW_AUTO_TX_SUM)
+ ctrl |= CALSUM; /* auto checksum */
+ else {
+ const unsigned offset = skb_transport_offset(skb);
+ u32 tcpsum;
+
+ tcpsum = offset << 16; /* sum start */
+ tcpsum |= offset + skb->csum_offset; /* sum write */
+
+ ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ ctrl |= UDPTCP;
+
+ if (tcpsum != sky2->tx_tcpsum) {
+ sky2->tx_tcpsum = tcpsum;
+
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(tcpsum);
+ le->length = 0; /* initial checksum value */
+ le->ctrl = 1; /* one packet */
+ le->opcode = OP_TCPLISW | HW_OWNER;
+ }
+ }
+ }
+
+ re = sky2->tx_ring + slot;
+ re->flags = TX_MAP_SINGLE;
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, len);
+
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(lower_32_bits(mapping));
+ le->length = cpu_to_le16(len);
+ le->ctrl = ctrl;
+ le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
+
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&hw->pdev->dev, mapping))
+ goto mapping_unwind;
+
+ upper = upper_32_bits(mapping);
+ if (upper != sky2->tx_last_upper) {
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(upper);
+ sky2->tx_last_upper = upper;
+ le->opcode = OP_ADDR64 | HW_OWNER;
+ }
+
+ re = sky2->tx_ring + slot;
+ re->flags = TX_MAP_PAGE;
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, skb_frag_size(frag));
+
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(lower_32_bits(mapping));
+ le->length = cpu_to_le16(skb_frag_size(frag));
+ le->ctrl = ctrl;
+ le->opcode = OP_BUFFER | HW_OWNER;
+ }
+
+ re->skb = skb;
+ le->ctrl |= EOP;
+
+ sky2->tx_prod = slot;
+
+ if (tx_avail(sky2) <= MAX_SKB_TX_LE)
+ netif_stop_queue(dev);
+
+ netdev_sent_queue(dev, skb->len);
+ sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
+
+ return NETDEV_TX_OK;
+
+mapping_unwind:
+ for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) {
+ re = sky2->tx_ring + i;
+
+ sky2_tx_unmap(hw->pdev, re);
+ }
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Free ring elements from starting at tx_cons until "done"
+ *
+ * NB:
+ * 1. The hardware will tell us about partial completion of multi-part
+ * buffers so make sure not to free skb to early.
+ * 2. This may run in parallel start_xmit because the it only
+ * looks at the tail of the queue of FIFO (tx_cons), not
+ * the head (tx_prod)
+ */
+static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
+{
+ struct net_device *dev = sky2->netdev;
+ u16 idx;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+
+ BUG_ON(done >= sky2->tx_ring_size);
+
+ for (idx = sky2->tx_cons; idx != done;
+ idx = RING_NEXT(idx, sky2->tx_ring_size)) {
+ struct tx_ring_info *re = sky2->tx_ring + idx;
+ struct sk_buff *skb = re->skb;
+
+ sky2_tx_unmap(sky2->hw->pdev, re);
+
+ if (skb) {
+ netif_printk(sky2, tx_done, KERN_DEBUG, dev,
+ "tx done %u\n", idx);
+
+ pkts_compl++;
+ bytes_compl += skb->len;
+
+ re->skb = NULL;
+ dev_kfree_skb_any(skb);
+
+ sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
+ }
+ }
+
+ sky2->tx_cons = idx;
+ smp_mb();
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ u64_stats_update_begin(&sky2->tx_stats.syncp);
+ sky2->tx_stats.packets += pkts_compl;
+ sky2->tx_stats.bytes += bytes_compl;
+ u64_stats_update_end(&sky2->tx_stats.syncp);
+}
+
+static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
+{
+ /* Disable Force Sync bit and Enable Alloc bit */
+ sky2_write8(hw, SK_REG(port, TXA_CTRL),
+ TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
+
+ /* Stop Interval Timer and Limit Counter of Tx Arbiter */
+ sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
+ sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
+
+ /* Reset the PCI FIFO of the async Tx queue */
+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
+ BMU_RST_SET | BMU_FIFO_RST);
+
+ /* Reset the Tx prefetch units */
+ sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
+ PREF_UNIT_RST_SET);
+
+ sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+
+ sky2_read32(hw, B0_CTST);
+}
+
+static void sky2_hw_down(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u16 ctrl;
+
+ /* Force flow control off */
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+
+ /* Stop transmitter */
+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
+ sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
+
+ sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
+ RB_RST_SET | RB_DIS_OP_MD);
+
+ ctrl = gma_read16(hw, port, GM_GP_CTRL);
+ ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
+ gma_write16(hw, port, GM_GP_CTRL, ctrl);
+
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+
+ /* Workaround shared GMAC reset */
+ if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 &&
+ port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+
+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+
+ /* Force any delayed status interrupt and NAPI */
+ sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
+ sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
+ sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
+ sky2_read8(hw, STAT_ISR_TIMER_CTRL);
+
+ sky2_rx_stop(sky2);
+
+ spin_lock_bh(&sky2->phy_lock);
+ sky2_phy_power_down(hw, port);
+ spin_unlock_bh(&sky2->phy_lock);
+
+ sky2_tx_reset(hw, port);
+
+ /* Free any pending frames stuck in HW queue */
+ sky2_tx_complete(sky2, sky2->tx_prod);
+}
+
+/* Network shutdown */
+static int sky2_close(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ /* Never really got started! */
+ if (!sky2->tx_le)
+ return 0;
+
+ netif_info(sky2, ifdown, dev, "disabling interface\n");
+
+ if (hw->ports == 1) {
+ sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
+
+ napi_disable(&hw->napi);
+ free_irq(hw->pdev->irq, hw);
+ hw->flags &= ~SKY2_HW_IRQ_SETUP;
+ } else {
+ u32 imask;
+
+ /* Disable port IRQ */
+ imask = sky2_read32(hw, B0_IMSK);
+ imask &= ~portirq_msk[sky2->port];
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
+
+ synchronize_irq(hw->pdev->irq);
+ napi_synchronize(&hw->napi);
+ }
+
+ sky2_hw_down(sky2);
+
+ sky2_free_buffers(sky2);
+
+ return 0;
+}
+
+static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
+{
+ if (hw->flags & SKY2_HW_FIBRE_PHY)
+ return SPEED_1000;
+
+ if (!(hw->flags & SKY2_HW_GIGABIT)) {
+ if (aux & PHY_M_PS_SPEED_100)
+ return SPEED_100;
+ else
+ return SPEED_10;
+ }
+
+ switch (aux & PHY_M_PS_SPEED_MSK) {
+ case PHY_M_PS_SPEED_1000:
+ return SPEED_1000;
+ case PHY_M_PS_SPEED_100:
+ return SPEED_100;
+ default:
+ return SPEED_10;
+ }
+}
+
+static void sky2_link_up(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ static const char *fc_name[] = {
+ [FC_NONE] = "none",
+ [FC_TX] = "tx",
+ [FC_RX] = "rx",
+ [FC_BOTH] = "both",
+ };
+
+ sky2_set_ipg(sky2);
+
+ sky2_enable_rx_tx(sky2);
+
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
+
+ netif_carrier_on(sky2->netdev);
+
+ mod_timer(&hw->watchdog_timer, jiffies + 1);
+
+ /* Turn on link LED */
+ sky2_write8(hw, SK_REG(port, LNK_LED_REG),
+ LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
+
+ netif_info(sky2, link, sky2->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ sky2->speed,
+ sky2->duplex == DUPLEX_FULL ? "full" : "half",
+ fc_name[sky2->flow_status]);
+}
+
+static void sky2_link_down(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u16 reg;
+
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
+
+ reg = gma_read16(hw, port, GM_GP_CTRL);
+ reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+
+ netif_carrier_off(sky2->netdev);
+
+ /* Turn off link LED */
+ sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
+
+ netif_info(sky2, link, sky2->netdev, "Link is down\n");
+
+ sky2_phy_init(hw, port);
+}
+
+static enum flow_control sky2_flow(int rx, int tx)
+{
+ if (rx)
+ return tx ? FC_BOTH : FC_RX;
+ else
+ return tx ? FC_TX : FC_NONE;
+}
+
+static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u16 advert, lpa;
+
+ advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
+ lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
+ if (lpa & PHY_M_AN_RF) {
+ netdev_err(sky2->netdev, "remote fault\n");
+ return -1;
+ }
+
+ if (!(aux & PHY_M_PS_SPDUP_RES)) {
+ netdev_err(sky2->netdev, "speed/duplex mismatch\n");
+ return -1;
+ }
+
+ sky2->speed = sky2_phy_speed(hw, aux);
+ sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ /* Since the pause result bits seem to in different positions on
+ * different chips. look at registers.
+ */
+ if (hw->flags & SKY2_HW_FIBRE_PHY) {
+ /* Shift for bits in fiber PHY */
+ advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
+ lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
+
+ if (advert & ADVERTISE_1000XPAUSE)
+ advert |= ADVERTISE_PAUSE_CAP;
+ if (advert & ADVERTISE_1000XPSE_ASYM)
+ advert |= ADVERTISE_PAUSE_ASYM;
+ if (lpa & LPA_1000XPAUSE)
+ lpa |= LPA_PAUSE_CAP;
+ if (lpa & LPA_1000XPAUSE_ASYM)
+ lpa |= LPA_PAUSE_ASYM;
+ }
+
+ sky2->flow_status = FC_NONE;
+ if (advert & ADVERTISE_PAUSE_CAP) {
+ if (lpa & LPA_PAUSE_CAP)
+ sky2->flow_status = FC_BOTH;
+ else if (advert & ADVERTISE_PAUSE_ASYM)
+ sky2->flow_status = FC_RX;
+ } else if (advert & ADVERTISE_PAUSE_ASYM) {
+ if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
+ sky2->flow_status = FC_TX;
+ }
+
+ if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 &&
+ !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
+ sky2->flow_status = FC_NONE;
+
+ if (sky2->flow_status & FC_TX)
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+ else
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+
+ return 0;
+}
+
+/* Interrupt from PHY */
+static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct sky2_port *sky2 = netdev_priv(dev);
+ u16 istatus, phystat;
+
+ if (!netif_running(dev))
+ return;
+
+ spin_lock(&sky2->phy_lock);
+ istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
+ phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
+
+ netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n",
+ istatus, phystat);
+
+ if (istatus & PHY_M_IS_AN_COMPL) {
+ if (sky2_autoneg_done(sky2, phystat) == 0 &&
+ !netif_carrier_ok(dev))
+ sky2_link_up(sky2);
+ goto out;
+ }
+
+ if (istatus & PHY_M_IS_LSP_CHANGE)
+ sky2->speed = sky2_phy_speed(hw, phystat);
+
+ if (istatus & PHY_M_IS_DUP_CHANGE)
+ sky2->duplex =
+ (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ if (istatus & PHY_M_IS_LST_CHANGE) {
+ if (phystat & PHY_M_PS_LINK_UP)
+ sky2_link_up(sky2);
+ else
+ sky2_link_down(sky2);
+ }
+out:
+ spin_unlock(&sky2->phy_lock);
+}
+
+/* Special quick link interrupt (Yukon-2 Optima only) */
+static void sky2_qlink_intr(struct sky2_hw *hw)
+{
+ struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
+ u32 imask;
+ u16 phy;
+
+ /* disable irq */
+ imask = sky2_read32(hw, B0_IMSK);
+ imask &= ~Y2_IS_PHY_QLNK;
+ sky2_write32(hw, B0_IMSK, imask);
+
+ /* reset PHY Link Detect */
+ phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ sky2_link_up(sky2);
+}
+
+/* Transmit timeout is only called if we are running, carrier is up
+ * and tx queue is full (stopped).
+ */
+static void sky2_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ netif_err(sky2, timer, dev, "tx timeout\n");
+
+ netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n",
+ sky2->tx_cons, sky2->tx_prod,
+ sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
+ sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
+
+ /* can't restart safely under softirq */
+ schedule_work(&hw->restart_work);
+}
+
+static int sky2_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ int err;
+ u16 ctl, mode;
+ u32 imask;
+
+ if (!netif_running(dev)) {
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+ return 0;
+ }
+
+ imask = sky2_read32(hw, B0_IMSK);
+ sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
+
+ netif_trans_update(dev); /* prevent tx timeout */
+ napi_disable(&hw->napi);
+ netif_tx_disable(dev);
+
+ synchronize_irq(hw->pdev->irq);
+
+ if (!(hw->flags & SKY2_HW_RAM_BUFFER))
+ sky2_set_tx_stfwd(hw, port);
+
+ ctl = gma_read16(hw, port, GM_GP_CTRL);
+ gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
+ sky2_rx_stop(sky2);
+ sky2_rx_clean(sky2);
+
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
+ mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA;
+ if (sky2->speed > SPEED_100)
+ mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
+ else
+ mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
+
+ if (dev->mtu > ETH_DATA_LEN)
+ mode |= GM_SMOD_JUMBO_ENA;
+
+ gma_write16(hw, port, GM_SERIAL_MODE, mode);
+
+ sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
+
+ err = sky2_alloc_rx_skbs(sky2);
+ if (!err)
+ sky2_rx_start(sky2);
+ else
+ sky2_rx_clean(sky2);
+ sky2_write32(hw, B0_IMSK, imask);
+
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
+
+ if (err)
+ dev_close(dev);
+ else {
+ gma_write16(hw, port, GM_GP_CTRL, ctl);
+
+ netif_wake_queue(dev);
+ }
+
+ return err;
+}
+
+static inline bool needs_copy(const struct rx_ring_info *re,
+ unsigned length)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ /* Some architectures need the IP header to be aligned */
+ if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32)))
+ return true;
+#endif
+ return length < copybreak;
+}
+
+/* For small just reuse existing skb for next receive */
+static struct sk_buff *receive_copy(struct sky2_port *sky2,
+ const struct rx_ring_info *re,
+ unsigned length)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
+ if (likely(skb)) {
+ dma_sync_single_for_cpu(&sky2->hw->pdev->dev, re->data_addr,
+ length, DMA_FROM_DEVICE);
+ skb_copy_from_linear_data(re->skb, skb->data, length);
+ skb->ip_summed = re->skb->ip_summed;
+ skb->csum = re->skb->csum;
+ skb_copy_hash(skb, re->skb);
+ __vlan_hwaccel_copy_tag(skb, re->skb);
+
+ dma_sync_single_for_device(&sky2->hw->pdev->dev,
+ re->data_addr, length,
+ DMA_FROM_DEVICE);
+ __vlan_hwaccel_clear_tag(re->skb);
+ skb_clear_hash(re->skb);
+ re->skb->ip_summed = CHECKSUM_NONE;
+ skb_put(skb, length);
+ }
+ return skb;
+}
+
+/* Adjust length of skb with fragments to match received data */
+static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+ unsigned int length)
+{
+ int i, num_frags;
+ unsigned int size;
+
+ /* put header into skb */
+ size = min(length, hdr_space);
+ skb->tail += size;
+ skb->len += size;
+ length -= size;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ if (length == 0) {
+ /* don't need this page */
+ __skb_frag_unref(frag, false);
+ --skb_shinfo(skb)->nr_frags;
+ } else {
+ size = min(length, (unsigned) PAGE_SIZE);
+
+ skb_frag_size_set(frag, size);
+ skb->data_len += size;
+ skb->truesize += PAGE_SIZE;
+ skb->len += size;
+ length -= size;
+ }
+ }
+}
+
+/* Normal packet - take skb from ring element and put in a new one */
+static struct sk_buff *receive_new(struct sky2_port *sky2,
+ struct rx_ring_info *re,
+ unsigned int length)
+{
+ struct sk_buff *skb;
+ struct rx_ring_info nre;
+ unsigned hdr_space = sky2->rx_data_size;
+
+ nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC);
+ if (unlikely(!nre.skb))
+ goto nobuf;
+
+ if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
+ goto nomap;
+
+ skb = re->skb;
+ sky2_rx_unmap_skb(sky2->hw->pdev, re);
+ prefetch(skb->data);
+ *re = nre;
+
+ if (skb_shinfo(skb)->nr_frags)
+ skb_put_frags(skb, hdr_space, length);
+ else
+ skb_put(skb, length);
+ return skb;
+
+nomap:
+ dev_kfree_skb(nre.skb);
+nobuf:
+ return NULL;
+}
+
+/*
+ * Receive one packet.
+ * For larger packets, get new buffer.
+ */
+static struct sk_buff *sky2_receive(struct net_device *dev,
+ u16 length, u32 status)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
+ struct sk_buff *skb = NULL;
+ u16 count = (status & GMR_FS_LEN) >> 16;
+
+ netif_printk(sky2, rx_status, KERN_DEBUG, dev,
+ "rx slot %u status 0x%x len %d\n",
+ sky2->rx_next, status, length);
+
+ sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
+ prefetch(sky2->rx_ring + sky2->rx_next);
+
+ if (skb_vlan_tag_present(re->skb))
+ count -= VLAN_HLEN; /* Account for vlan tag */
+
+ /* This chip has hardware problems that generates bogus status.
+ * So do only marginal checking and expect higher level protocols
+ * to handle crap frames.
+ */
+ if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
+ length != count)
+ goto okay;
+
+ if (status & GMR_FS_ANY_ERR)
+ goto error;
+
+ if (!(status & GMR_FS_RX_OK))
+ goto resubmit;
+
+ /* if length reported by DMA does not match PHY, packet was truncated */
+ if (length != count)
+ goto error;
+
+okay:
+ if (needs_copy(re, length))
+ skb = receive_copy(sky2, re, length);
+ else
+ skb = receive_new(sky2, re, length);
+
+ dev->stats.rx_dropped += (skb == NULL);
+
+resubmit:
+ sky2_rx_submit(sky2, re);
+
+ return skb;
+
+error:
+ ++dev->stats.rx_errors;
+
+ if (net_ratelimit())
+ netif_info(sky2, rx_err, dev,
+ "rx error, status 0x%x length %d\n", status, length);
+
+ goto resubmit;
+}
+
+/* Transmit complete */
+static inline void sky2_tx_done(struct net_device *dev, u16 last)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (netif_running(dev)) {
+ sky2_tx_complete(sky2, last);
+
+ /* Wake unless it's detached, and called e.g. from sky2_close() */
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
+ netif_wake_queue(dev);
+ }
+}
+
+static inline void sky2_skb_rx(const struct sky2_port *sky2,
+ struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&sky2->hw->napi, skb);
+}
+
+static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
+ unsigned packets, unsigned bytes)
+{
+ struct net_device *dev = hw->dev[port];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (packets == 0)
+ return;
+
+ u64_stats_update_begin(&sky2->rx_stats.syncp);
+ sky2->rx_stats.packets += packets;
+ sky2->rx_stats.bytes += bytes;
+ u64_stats_update_end(&sky2->rx_stats.syncp);
+
+ sky2->last_rx = jiffies;
+ sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
+}
+
+static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
+{
+ /* If this happens then driver assuming wrong format for chip type */
+ BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
+
+ /* Both checksum counters are programmed to start at
+ * the same offset, so unless there is a problem they
+ * should match. This failure is an early indication that
+ * hardware receive checksumming won't work.
+ */
+ if (likely((u16)(status >> 16) == (u16)status)) {
+ struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = le16_to_cpu(status);
+ } else {
+ dev_notice(&sky2->hw->pdev->dev,
+ "%s: receive checksum problem (status = %#x)\n",
+ sky2->netdev->name, status);
+
+ /* Disable checksum offload
+ * It will be reenabled on next ndo_set_features, but if it's
+ * really broken, will get disabled again
+ */
+ sky2->netdev->features &= ~NETIF_F_RXCSUM;
+ sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_CHKSUM);
+ }
+}
+
+static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
+{
+ struct sk_buff *skb;
+
+ skb = sky2->rx_ring[sky2->rx_next].skb;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length));
+}
+
+static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
+{
+ struct sk_buff *skb;
+
+ skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3);
+}
+
+/* Process status response ring */
+static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
+{
+ int work_done = 0;
+ unsigned int total_bytes[2] = { 0 };
+ unsigned int total_packets[2] = { 0 };
+
+ if (to_do <= 0)
+ return work_done;
+
+ rmb();
+ do {
+ struct sky2_port *sky2;
+ struct sky2_status_le *le = hw->st_le + hw->st_idx;
+ unsigned port;
+ struct net_device *dev;
+ struct sk_buff *skb;
+ u32 status;
+ u16 length;
+ u8 opcode = le->opcode;
+
+ if (!(opcode & HW_OWNER))
+ break;
+
+ hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
+
+ port = le->css & CSS_LINK_BIT;
+ dev = hw->dev[port];
+ sky2 = netdev_priv(dev);
+ length = le16_to_cpu(le->length);
+ status = le32_to_cpu(le->status);
+
+ le->opcode = 0;
+ switch (opcode & ~HW_OWNER) {
+ case OP_RXSTAT:
+ total_packets[port]++;
+ total_bytes[port] += length;
+
+ skb = sky2_receive(dev, length, status);
+ if (!skb)
+ break;
+
+ /* This chip reports checksum status differently */
+ if (hw->flags & SKY2_HW_NEW_LE) {
+ if ((dev->features & NETIF_F_RXCSUM) &&
+ (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
+ (le->css & CSS_TCPUDPCSOK))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ sky2_skb_rx(sky2, skb);
+
+ /* Stop after net poll weight */
+ if (++work_done >= to_do)
+ goto exit_loop;
+ break;
+
+ case OP_RXVLAN:
+ sky2_rx_tag(sky2, length);
+ break;
+
+ case OP_RXCHKSVLAN:
+ sky2_rx_tag(sky2, length);
+ fallthrough;
+ case OP_RXCHKS:
+ if (likely(dev->features & NETIF_F_RXCSUM))
+ sky2_rx_checksum(sky2, status);
+ break;
+
+ case OP_RSS_HASH:
+ sky2_rx_hash(sky2, status);
+ break;
+
+ case OP_TXINDEXLE:
+ /* TX index reports status for both ports */
+ sky2_tx_done(hw->dev[0], status & 0xfff);
+ if (hw->dev[1])
+ sky2_tx_done(hw->dev[1],
+ ((status >> 24) & 0xff)
+ | (u16)(length & 0xf) << 8);
+ break;
+
+ default:
+ if (net_ratelimit())
+ pr_warn("unknown status opcode 0x%x\n", opcode);
+ }
+ } while (hw->st_idx != idx);
+
+ /* Fully processed status ring so clear irq */
+ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+
+exit_loop:
+ sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]);
+ sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]);
+
+ return work_done;
+}
+
+static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
+{
+ struct net_device *dev = hw->dev[port];
+
+ if (net_ratelimit())
+ netdev_info(dev, "hw error interrupt status 0x%x\n", status);
+
+ if (status & Y2_IS_PAR_RD1) {
+ if (net_ratelimit())
+ netdev_err(dev, "ram data read parity error\n");
+ /* Clear IRQ */
+ sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
+ }
+
+ if (status & Y2_IS_PAR_WR1) {
+ if (net_ratelimit())
+ netdev_err(dev, "ram data write parity error\n");
+
+ sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
+ }
+
+ if (status & Y2_IS_PAR_MAC1) {
+ if (net_ratelimit())
+ netdev_err(dev, "MAC parity error\n");
+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
+ }
+
+ if (status & Y2_IS_PAR_RX1) {
+ if (net_ratelimit())
+ netdev_err(dev, "RX parity error\n");
+ sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
+ }
+
+ if (status & Y2_IS_TCP_TXA1) {
+ if (net_ratelimit())
+ netdev_err(dev, "TCP segmentation error\n");
+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
+ }
+}
+
+static void sky2_hw_intr(struct sky2_hw *hw)
+{
+ struct pci_dev *pdev = hw->pdev;
+ u32 status = sky2_read32(hw, B0_HWE_ISRC);
+ u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
+
+ status &= hwmsk;
+
+ if (status & Y2_IS_TIST_OV)
+ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+
+ if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
+ u16 pci_err;
+
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ pci_err = sky2_pci_read16(hw, PCI_STATUS);
+ if (net_ratelimit())
+ dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
+ pci_err);
+
+ sky2_pci_write16(hw, PCI_STATUS,
+ pci_err | PCI_STATUS_ERROR_BITS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ }
+
+ if (status & Y2_IS_PCI_EXP) {
+ /* PCI-Express uncorrectable Error occurred */
+ u32 err;
+
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+ sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
+ 0xfffffffful);
+ if (net_ratelimit())
+ dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
+
+ sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ }
+
+ if (status & Y2_HWE_L1_MASK)
+ sky2_hw_error(hw, 0, status);
+ status >>= 8;
+ if (status & Y2_HWE_L1_MASK)
+ sky2_hw_error(hw, 1, status);
+}
+
+static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct sky2_port *sky2 = netdev_priv(dev);
+ u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+ netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status);
+
+ if (status & GM_IS_RX_CO_OV)
+ gma_read16(hw, port, GM_RX_IRQ_SRC);
+
+ if (status & GM_IS_TX_CO_OV)
+ gma_read16(hw, port, GM_TX_IRQ_SRC);
+
+ if (status & GM_IS_RX_FF_OR) {
+ ++dev->stats.rx_fifo_errors;
+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
+ }
+
+ if (status & GM_IS_TX_FF_UR) {
+ ++dev->stats.tx_fifo_errors;
+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
+ }
+}
+
+/* This should never happen it is a bug. */
+static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
+{
+ struct net_device *dev = hw->dev[port];
+ u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
+
+ dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n",
+ dev->name, (unsigned) q, (unsigned) idx,
+ (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
+
+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
+}
+
+static int sky2_rx_hung(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ unsigned rxq = rxqaddr[port];
+ u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP));
+ u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV));
+ u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
+ u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
+
+ /* If idle and MAC or PCI is stuck */
+ if (sky2->check.last == sky2->last_rx &&
+ ((mac_rp == sky2->check.mac_rp &&
+ mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
+ /* Check if the PCI RX hang */
+ (fifo_rp == sky2->check.fifo_rp &&
+ fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
+ netdev_printk(KERN_DEBUG, dev,
+ "hung mac %d:%d fifo %d (%d:%d)\n",
+ mac_lev, mac_rp, fifo_lev,
+ fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
+ return 1;
+ } else {
+ sky2->check.last = sky2->last_rx;
+ sky2->check.mac_rp = mac_rp;
+ sky2->check.mac_lev = mac_lev;
+ sky2->check.fifo_rp = fifo_rp;
+ sky2->check.fifo_lev = fifo_lev;
+ return 0;
+ }
+}
+
+static void sky2_watchdog(struct timer_list *t)
+{
+ struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
+
+ /* Check for lost IRQ once a second */
+ if (sky2_read32(hw, B0_ISRC)) {
+ napi_schedule(&hw->napi);
+ } else {
+ int i, active = 0;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ if (!netif_running(dev))
+ continue;
+ ++active;
+
+ /* For chips with Rx FIFO, check if stuck */
+ if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
+ sky2_rx_hung(dev)) {
+ netdev_info(dev, "receiver hang detected\n");
+ schedule_work(&hw->restart_work);
+ return;
+ }
+ }
+
+ if (active == 0)
+ return;
+ }
+
+ mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
+}
+
+/* Hardware/software error handling */
+static void sky2_err_intr(struct sky2_hw *hw, u32 status)
+{
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
+
+ if (status & Y2_IS_HW_ERR)
+ sky2_hw_intr(hw);
+
+ if (status & Y2_IS_IRQ_MAC1)
+ sky2_mac_intr(hw, 0);
+
+ if (status & Y2_IS_IRQ_MAC2)
+ sky2_mac_intr(hw, 1);
+
+ if (status & Y2_IS_CHK_RX1)
+ sky2_le_error(hw, 0, Q_R1);
+
+ if (status & Y2_IS_CHK_RX2)
+ sky2_le_error(hw, 1, Q_R2);
+
+ if (status & Y2_IS_CHK_TXA1)
+ sky2_le_error(hw, 0, Q_XA1);
+
+ if (status & Y2_IS_CHK_TXA2)
+ sky2_le_error(hw, 1, Q_XA2);
+}
+
+static int sky2_poll(struct napi_struct *napi, int work_limit)
+{
+ struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi);
+ u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+ int work_done = 0;
+ u16 idx;
+
+ if (unlikely(status & Y2_IS_ERROR))
+ sky2_err_intr(hw, status);
+
+ if (status & Y2_IS_IRQ_PHY1)
+ sky2_phy_intr(hw, 0);
+
+ if (status & Y2_IS_IRQ_PHY2)
+ sky2_phy_intr(hw, 1);
+
+ if (status & Y2_IS_PHY_QLNK)
+ sky2_qlink_intr(hw);
+
+ while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
+ work_done += sky2_status_intr(hw, work_limit - work_done, idx);
+
+ if (work_done >= work_limit)
+ goto done;
+ }
+
+ napi_complete_done(napi, work_done);
+ sky2_read32(hw, B0_Y2_SP_LISR);
+done:
+
+ return work_done;
+}
+
+static irqreturn_t sky2_intr(int irq, void *dev_id)
+{
+ struct sky2_hw *hw = dev_id;
+ u32 status;
+
+ /* Reading this mask interrupts as side effect */
+ status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+ if (status == 0 || status == ~0) {
+ sky2_write32(hw, B0_Y2_SP_ICR, 2);
+ return IRQ_NONE;
+ }
+
+ prefetch(&hw->st_le[hw->st_idx]);
+
+ napi_schedule(&hw->napi);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sky2_netpoll(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ napi_schedule(&sky2->hw->napi);
+}
+#endif
+
+/* Chip internal frequency for clock calculations */
+static u32 sky2_mhz(const struct sky2_hw *hw)
+{
+ switch (hw->chip_id) {
+ case CHIP_ID_YUKON_EC:
+ case CHIP_ID_YUKON_EC_U:
+ case CHIP_ID_YUKON_EX:
+ case CHIP_ID_YUKON_SUPR:
+ case CHIP_ID_YUKON_UL_2:
+ case CHIP_ID_YUKON_OPT:
+ case CHIP_ID_YUKON_PRM:
+ case CHIP_ID_YUKON_OP_2:
+ return 125;
+
+ case CHIP_ID_YUKON_FE:
+ return 100;
+
+ case CHIP_ID_YUKON_FE_P:
+ return 50;
+
+ case CHIP_ID_YUKON_XL:
+ return 156;
+
+ default:
+ BUG();
+ }
+}
+
+static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
+{
+ return sky2_mhz(hw) * us;
+}
+
+static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
+{
+ return clk / sky2_mhz(hw);
+}
+
+
+static int sky2_init(struct sky2_hw *hw)
+{
+ u8 t8;
+
+ /* Enable all clocks and check for bad PCI access */
+ sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+
+ sky2_write8(hw, B0_CTST, CS_RST_CLR);
+
+ hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
+ hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
+
+ switch (hw->chip_id) {
+ case CHIP_ID_YUKON_XL:
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
+ if (hw->chip_rev < CHIP_REV_YU_XL_A2)
+ hw->flags |= SKY2_HW_RSS_BROKEN;
+ break;
+
+ case CHIP_ID_YUKON_EC_U:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_NEWER_PHY
+ | SKY2_HW_ADV_POWER_CTL;
+ break;
+
+ case CHIP_ID_YUKON_EX:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_NEWER_PHY
+ | SKY2_HW_NEW_LE
+ | SKY2_HW_ADV_POWER_CTL
+ | SKY2_HW_RSS_CHKSUM;
+
+ /* New transmit checksum */
+ if (hw->chip_rev != CHIP_REV_YU_EX_B0)
+ hw->flags |= SKY2_HW_AUTO_TX_SUM;
+ break;
+
+ case CHIP_ID_YUKON_EC:
+ /* This rev is really old, and requires untested workarounds */
+ if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
+ dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
+ return -EOPNOTSUPP;
+ }
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
+ break;
+
+ case CHIP_ID_YUKON_FE:
+ hw->flags = SKY2_HW_RSS_BROKEN;
+ break;
+
+ case CHIP_ID_YUKON_FE_P:
+ hw->flags = SKY2_HW_NEWER_PHY
+ | SKY2_HW_NEW_LE
+ | SKY2_HW_AUTO_TX_SUM
+ | SKY2_HW_ADV_POWER_CTL;
+
+ /* The workaround for status conflicts VLAN tag detection. */
+ if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
+ hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM;
+ break;
+
+ case CHIP_ID_YUKON_SUPR:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_NEWER_PHY
+ | SKY2_HW_NEW_LE
+ | SKY2_HW_AUTO_TX_SUM
+ | SKY2_HW_ADV_POWER_CTL;
+
+ if (hw->chip_rev == CHIP_REV_YU_SU_A0)
+ hw->flags |= SKY2_HW_RSS_CHKSUM;
+ break;
+
+ case CHIP_ID_YUKON_UL_2:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_ADV_POWER_CTL;
+ break;
+
+ case CHIP_ID_YUKON_OPT:
+ case CHIP_ID_YUKON_PRM:
+ case CHIP_ID_YUKON_OP_2:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_NEW_LE
+ | SKY2_HW_ADV_POWER_CTL;
+ break;
+
+ default:
+ dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
+ hw->chip_id);
+ return -EOPNOTSUPP;
+ }
+
+ hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
+ if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
+ hw->flags |= SKY2_HW_FIBRE_PHY;
+
+ hw->ports = 1;
+ t8 = sky2_read8(hw, B2_Y2_HW_RES);
+ if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
+ if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
+ ++hw->ports;
+ }
+
+ if (sky2_read8(hw, B2_E_0))
+ hw->flags |= SKY2_HW_RAM_BUFFER;
+
+ return 0;
+}
+
+static void sky2_reset(struct sky2_hw *hw)
+{
+ struct pci_dev *pdev = hw->pdev;
+ u16 status;
+ int i;
+ u32 hwe_mask = Y2_HWE_ALL_MASK;
+
+ /* disable ASF */
+ if (hw->chip_id == CHIP_ID_YUKON_EX
+ || hw->chip_id == CHIP_ID_YUKON_SUPR) {
+ sky2_write32(hw, CPU_WDOG, 0);
+ status = sky2_read16(hw, HCU_CCSR);
+ status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
+ HCU_CCSR_UC_STATE_MSK);
+ /*
+ * CPU clock divider shouldn't be used because
+ * - ASF firmware may malfunction
+ * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
+ */
+ status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
+ sky2_write16(hw, HCU_CCSR, status);
+ sky2_write32(hw, CPU_WDOG, 0);
+ } else
+ sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
+ sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
+
+ /* do a SW reset */
+ sky2_write8(hw, B0_CTST, CS_RST_SET);
+ sky2_write8(hw, B0_CTST, CS_RST_CLR);
+
+ /* allow writes to PCI config */
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+
+ /* clear PCI errors, if any */
+ status = sky2_pci_read16(hw, PCI_STATUS);
+ status |= PCI_STATUS_ERROR_BITS;
+ sky2_pci_write16(hw, PCI_STATUS, status);
+
+ sky2_write8(hw, B0_CTST, CS_MRST_CLR);
+
+ if (pci_is_pcie(pdev)) {
+ sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
+ 0xfffffffful);
+
+ /* If error bit is stuck on ignore it */
+ if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
+ dev_info(&pdev->dev, "ignoring stuck error report bit\n");
+ else
+ hwe_mask |= Y2_IS_PCI_EXP;
+ }
+
+ sky2_power_on(hw);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ for (i = 0; i < hw->ports; i++) {
+ sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
+ sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+ if (hw->chip_id == CHIP_ID_YUKON_EX ||
+ hw->chip_id == CHIP_ID_YUKON_SUPR)
+ sky2_write16(hw, SK_REG(i, GMAC_CTRL),
+ GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
+ | GMC_BYP_RETR_ON);
+
+ }
+
+ if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
+ /* enable MACSec clock gating */
+ sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
+ }
+
+ if (hw->chip_id == CHIP_ID_YUKON_OPT ||
+ hw->chip_id == CHIP_ID_YUKON_PRM ||
+ hw->chip_id == CHIP_ID_YUKON_OP_2) {
+ u16 reg;
+
+ if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
+ /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
+ sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
+
+ /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
+ reg = 10;
+
+ /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
+ sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
+ } else {
+ /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
+ reg = 3;
+ }
+
+ reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
+ reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT;
+
+ /* reset PHY Link Detect */
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
+
+ /* check if PSMv2 was running before */
+ reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
+ if (reg & PCI_EXP_LNKCTL_ASPMC)
+ /* restore the PCIe Link Control register */
+ sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
+ reg);
+
+ if (hw->chip_id == CHIP_ID_YUKON_PRM &&
+ hw->chip_rev == CHIP_REV_YU_PRM_A0) {
+ /* change PHY Interrupt polarity to low active */
+ reg = sky2_read16(hw, GPHY_CTRL);
+ sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL);
+
+ /* adapt HW for low active PHY Interrupt */
+ reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL);
+ sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1);
+ }
+
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
+ sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
+ }
+
+ /* Clear I2C IRQ noise */
+ sky2_write32(hw, B2_I2C_IRQ, 1);
+
+ /* turn off hardware timer (unused) */
+ sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
+ sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
+
+ /* Turn off descriptor polling */
+ sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
+
+ /* Turn off receive timestamp */
+ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
+ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+
+ /* enable the Tx Arbiters */
+ for (i = 0; i < hw->ports; i++)
+ sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
+
+ /* Initialize ram interface */
+ for (i = 0; i < hw->ports; i++) {
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
+
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
+ }
+
+ sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
+
+ for (i = 0; i < hw->ports; i++)
+ sky2_gmac_reset(hw, i);
+
+ memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
+ hw->st_idx = 0;
+
+ sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
+ sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
+
+ sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
+ sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
+
+ /* Set the list last index */
+ sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
+
+ sky2_write16(hw, STAT_TX_IDX_TH, 10);
+ sky2_write8(hw, STAT_FIFO_WM, 16);
+
+ /* set Status-FIFO ISR watermark */
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
+ sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
+ else
+ sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
+
+ sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
+ sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
+ sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
+
+ /* enable status unit */
+ sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
+
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
+ sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
+}
+
+/* Take device down (offline).
+ * Equivalent to doing dev_stop() but this does not
+ * inform upper layers of the transition.
+ */
+static void sky2_detach(struct net_device *dev)
+{
+ if (netif_running(dev)) {
+ netif_tx_lock(dev);
+ netif_device_detach(dev); /* stop txq */
+ netif_tx_unlock(dev);
+ sky2_close(dev);
+ }
+}
+
+/* Bring device back after doing sky2_detach */
+static int sky2_reattach(struct net_device *dev)
+{
+ int err = 0;
+
+ if (netif_running(dev)) {
+ err = sky2_open(dev);
+ if (err) {
+ netdev_info(dev, "could not restart %d\n", err);
+ dev_close(dev);
+ } else {
+ netif_device_attach(dev);
+ sky2_set_multicast(dev);
+ }
+ }
+
+ return err;
+}
+
+static void sky2_all_down(struct sky2_hw *hw)
+{
+ int i;
+
+ if (hw->flags & SKY2_HW_IRQ_SETUP) {
+ sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
+
+ synchronize_irq(hw->pdev->irq);
+ napi_disable(&hw->napi);
+ }
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ continue;
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ sky2_hw_down(sky2);
+ }
+}
+
+static void sky2_all_up(struct sky2_hw *hw)
+{
+ u32 imask = Y2_IS_BASE;
+ int i;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ continue;
+
+ sky2_hw_up(sky2);
+ sky2_set_multicast(dev);
+ imask |= portirq_msk[i];
+ netif_wake_queue(dev);
+ }
+
+ if (hw->flags & SKY2_HW_IRQ_SETUP) {
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
+ }
+}
+
+static void sky2_restart(struct work_struct *work)
+{
+ struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
+
+ rtnl_lock();
+
+ sky2_all_down(hw);
+ sky2_reset(hw);
+ sky2_all_up(hw);
+
+ rtnl_unlock();
+}
+
+static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
+{
+ return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
+}
+
+static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ const struct sky2_port *sky2 = netdev_priv(dev);
+
+ wol->supported = sky2_wol_supported(sky2->hw);
+ wol->wolopts = sky2->wol;
+}
+
+static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ bool enable_wakeup = false;
+ int i;
+
+ if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) ||
+ !device_can_wakeup(&hw->pdev->dev))
+ return -EOPNOTSUPP;
+
+ sky2->wol = wol->wolopts;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (sky2->wol)
+ enable_wakeup = true;
+ }
+ device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup);
+
+ return 0;
+}
+
+static u32 sky2_supported_modes(const struct sky2_hw *hw)
+{
+ if (sky2_is_copper(hw)) {
+ u32 modes = SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full;
+
+ if (hw->flags & SKY2_HW_GIGABIT)
+ modes |= SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full;
+ return modes;
+ } else
+ return SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full;
+}
+
+static int sky2_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ u32 supported, advertising;
+
+ supported = sky2_supported_modes(hw);
+ cmd->base.phy_address = PHY_ADDR_MARV;
+ if (sky2_is_copper(hw)) {
+ cmd->base.port = PORT_TP;
+ cmd->base.speed = sky2->speed;
+ supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
+ } else {
+ cmd->base.speed = SPEED_1000;
+ cmd->base.port = PORT_FIBRE;
+ supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ }
+
+ advertising = sky2->advertising;
+ cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
+ ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.duplex = sky2->duplex;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
+ return 0;
+}
+
+static int sky2_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ const struct sky2_hw *hw = sky2->hw;
+ u32 supported = sky2_supported_modes(hw);
+ u32 new_advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&new_advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (new_advertising & ~supported)
+ return -EINVAL;
+
+ if (sky2_is_copper(hw))
+ sky2->advertising = new_advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ else
+ sky2->advertising = new_advertising |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+
+ sky2->flags |= SKY2_FLAG_AUTO_SPEED;
+ sky2->duplex = -1;
+ sky2->speed = -1;
+ } else {
+ u32 setting;
+ u32 speed = cmd->base.speed;
+
+ switch (speed) {
+ case SPEED_1000:
+ if (cmd->base.duplex == DUPLEX_FULL)
+ setting = SUPPORTED_1000baseT_Full;
+ else if (cmd->base.duplex == DUPLEX_HALF)
+ setting = SUPPORTED_1000baseT_Half;
+ else
+ return -EINVAL;
+ break;
+ case SPEED_100:
+ if (cmd->base.duplex == DUPLEX_FULL)
+ setting = SUPPORTED_100baseT_Full;
+ else if (cmd->base.duplex == DUPLEX_HALF)
+ setting = SUPPORTED_100baseT_Half;
+ else
+ return -EINVAL;
+ break;
+
+ case SPEED_10:
+ if (cmd->base.duplex == DUPLEX_FULL)
+ setting = SUPPORTED_10baseT_Full;
+ else if (cmd->base.duplex == DUPLEX_HALF)
+ setting = SUPPORTED_10baseT_Half;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((setting & supported) == 0)
+ return -EINVAL;
+
+ sky2->speed = speed;
+ sky2->duplex = cmd->base.duplex;
+ sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
+ }
+
+ if (netif_running(dev)) {
+ sky2_phy_reinit(sky2);
+ sky2_set_multicast(dev);
+ }
+
+ return 0;
+}
+
+static void sky2_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sky2->hw->pdev),
+ sizeof(info->bus_info));
+}
+
+static const struct sky2_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} sky2_stats[] = {
+ { "tx_bytes", GM_TXO_OK_HI },
+ { "rx_bytes", GM_RXO_OK_HI },
+ { "tx_broadcast", GM_TXF_BC_OK },
+ { "rx_broadcast", GM_RXF_BC_OK },
+ { "tx_multicast", GM_TXF_MC_OK },
+ { "rx_multicast", GM_RXF_MC_OK },
+ { "tx_unicast", GM_TXF_UC_OK },
+ { "rx_unicast", GM_RXF_UC_OK },
+ { "tx_mac_pause", GM_TXF_MPAUSE },
+ { "rx_mac_pause", GM_RXF_MPAUSE },
+ { "collisions", GM_TXF_COL },
+ { "late_collision",GM_TXF_LAT_COL },
+ { "aborted", GM_TXF_ABO_COL },
+ { "single_collisions", GM_TXF_SNG_COL },
+ { "multi_collisions", GM_TXF_MUL_COL },
+
+ { "rx_short", GM_RXF_SHT },
+ { "rx_runt", GM_RXE_FRAG },
+ { "rx_64_byte_packets", GM_RXF_64B },
+ { "rx_65_to_127_byte_packets", GM_RXF_127B },
+ { "rx_128_to_255_byte_packets", GM_RXF_255B },
+ { "rx_256_to_511_byte_packets", GM_RXF_511B },
+ { "rx_512_to_1023_byte_packets", GM_RXF_1023B },
+ { "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
+ { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
+ { "rx_too_long", GM_RXF_LNG_ERR },
+ { "rx_fifo_overflow", GM_RXE_FIFO_OV },
+ { "rx_jabber", GM_RXF_JAB_PKT },
+ { "rx_fcs_error", GM_RXF_FCS_ERR },
+
+ { "tx_64_byte_packets", GM_TXF_64B },
+ { "tx_65_to_127_byte_packets", GM_TXF_127B },
+ { "tx_128_to_255_byte_packets", GM_TXF_255B },
+ { "tx_256_to_511_byte_packets", GM_TXF_511B },
+ { "tx_512_to_1023_byte_packets", GM_TXF_1023B },
+ { "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
+ { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
+ { "tx_fifo_underrun", GM_TXE_FIFO_UR },
+};
+
+static u32 sky2_get_msglevel(struct net_device *netdev)
+{
+ struct sky2_port *sky2 = netdev_priv(netdev);
+ return sky2->msg_enable;
+}
+
+static int sky2_nway_reset(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED))
+ return -EINVAL;
+
+ sky2_phy_reinit(sky2);
+ sky2_set_multicast(dev);
+
+ return 0;
+}
+
+static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ int i;
+
+ data[0] = get_stats64(hw, port, GM_TXO_OK_LO);
+ data[1] = get_stats64(hw, port, GM_RXO_OK_LO);
+
+ for (i = 2; i < count; i++)
+ data[i] = get_stats32(hw, port, sky2_stats[i].offset);
+}
+
+static void sky2_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct sky2_port *sky2 = netdev_priv(netdev);
+ sky2->msg_enable = value;
+}
+
+static int sky2_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(sky2_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void sky2_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
+}
+
+static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ sky2_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static int sky2_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ const struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(dev, addr->sa_data);
+ memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
+ dev->dev_addr, ETH_ALEN);
+ memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
+ dev->dev_addr, ETH_ALEN);
+
+ /* virtual address for data */
+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
+
+ /* physical address: used for pause frames */
+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
+
+ return 0;
+}
+
+static inline void sky2_add_filter(u8 filter[8], const u8 *addr)
+{
+ u32 bit;
+
+ bit = ether_crc(ETH_ALEN, addr) & 63;
+ filter[bit >> 3] |= 1 << (bit & 7);
+}
+
+static void sky2_set_multicast(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ struct netdev_hw_addr *ha;
+ u16 reg;
+ u8 filter[8];
+ int rx_pause;
+ static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
+
+ rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
+ memset(filter, 0, sizeof(filter));
+
+ reg = gma_read16(hw, port, GM_RX_CTRL);
+ reg |= GM_RXCR_UCF_ENA;
+
+ if (dev->flags & IFF_PROMISC) /* promiscuous */
+ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+ else if (dev->flags & IFF_ALLMULTI)
+ memset(filter, 0xff, sizeof(filter));
+ else if (netdev_mc_empty(dev) && !rx_pause)
+ reg &= ~GM_RXCR_MCF_ENA;
+ else {
+ reg |= GM_RXCR_MCF_ENA;
+
+ if (rx_pause)
+ sky2_add_filter(filter, pause_mc_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ sky2_add_filter(filter, ha->addr);
+ }
+
+ gma_write16(hw, port, GM_MC_ADDR_H1,
+ (u16) filter[0] | ((u16) filter[1] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H2,
+ (u16) filter[2] | ((u16) filter[3] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H3,
+ (u16) filter[4] | ((u16) filter[5] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H4,
+ (u16) filter[6] | ((u16) filter[7] << 8));
+
+ gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+static void sky2_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ unsigned int start;
+ u64 _bytes, _packets;
+
+ do {
+ start = u64_stats_fetch_begin(&sky2->rx_stats.syncp);
+ _bytes = sky2->rx_stats.bytes;
+ _packets = sky2->rx_stats.packets;
+ } while (u64_stats_fetch_retry(&sky2->rx_stats.syncp, start));
+
+ stats->rx_packets = _packets;
+ stats->rx_bytes = _bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&sky2->tx_stats.syncp);
+ _bytes = sky2->tx_stats.bytes;
+ _packets = sky2->tx_stats.packets;
+ } while (u64_stats_fetch_retry(&sky2->tx_stats.syncp, start));
+
+ stats->tx_packets = _packets;
+ stats->tx_bytes = _bytes;
+
+ stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK)
+ + get_stats32(hw, port, GM_RXF_BC_OK);
+
+ stats->collisions = get_stats32(hw, port, GM_TXF_COL);
+
+ stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR);
+ stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR);
+ stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT)
+ + get_stats32(hw, port, GM_RXE_FRAG);
+ stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV);
+
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
+}
+
+/* Can have one global because blinking is controlled by
+ * ethtool and that is always under RTNL mutex
+ */
+static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+
+ spin_lock_bh(&sky2->phy_lock);
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
+ hw->chip_id == CHIP_ID_YUKON_EX ||
+ hw->chip_id == CHIP_ID_YUKON_SUPR) {
+ u16 pg;
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+ switch (mode) {
+ case MO_LED_OFF:
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ PHY_M_LEDC_LOS_CTRL(8) |
+ PHY_M_LEDC_INIT_CTRL(8) |
+ PHY_M_LEDC_STA1_CTRL(8) |
+ PHY_M_LEDC_STA0_CTRL(8));
+ break;
+ case MO_LED_ON:
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ PHY_M_LEDC_LOS_CTRL(9) |
+ PHY_M_LEDC_INIT_CTRL(9) |
+ PHY_M_LEDC_STA1_CTRL(9) |
+ PHY_M_LEDC_STA0_CTRL(9));
+ break;
+ case MO_LED_BLINK:
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ PHY_M_LEDC_LOS_CTRL(0xa) |
+ PHY_M_LEDC_INIT_CTRL(0xa) |
+ PHY_M_LEDC_STA1_CTRL(0xa) |
+ PHY_M_LEDC_STA0_CTRL(0xa));
+ break;
+ case MO_LED_NORM:
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ PHY_M_LEDC_LOS_CTRL(1) |
+ PHY_M_LEDC_INIT_CTRL(8) |
+ PHY_M_LEDC_STA1_CTRL(7) |
+ PHY_M_LEDC_STA0_CTRL(7));
+ }
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+ } else
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+ PHY_M_LED_MO_DUP(mode) |
+ PHY_M_LED_MO_10(mode) |
+ PHY_M_LED_MO_100(mode) |
+ PHY_M_LED_MO_1000(mode) |
+ PHY_M_LED_MO_RX(mode) |
+ PHY_M_LED_MO_TX(mode));
+
+ spin_unlock_bh(&sky2->phy_lock);
+}
+
+/* blink LED's for finding board */
+static int sky2_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+ case ETHTOOL_ID_INACTIVE:
+ sky2_led(sky2, MO_LED_NORM);
+ break;
+ case ETHTOOL_ID_ON:
+ sky2_led(sky2, MO_LED_ON);
+ break;
+ case ETHTOOL_ID_OFF:
+ sky2_led(sky2, MO_LED_OFF);
+ break;
+ }
+
+ return 0;
+}
+
+static void sky2_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ switch (sky2->flow_mode) {
+ case FC_NONE:
+ ecmd->tx_pause = ecmd->rx_pause = 0;
+ break;
+ case FC_TX:
+ ecmd->tx_pause = 1, ecmd->rx_pause = 0;
+ break;
+ case FC_RX:
+ ecmd->tx_pause = 0, ecmd->rx_pause = 1;
+ break;
+ case FC_BOTH:
+ ecmd->tx_pause = ecmd->rx_pause = 1;
+ }
+
+ ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE)
+ ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+}
+
+static int sky2_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ sky2->flags |= SKY2_FLAG_AUTO_PAUSE;
+ else
+ sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE;
+
+ sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
+
+ if (netif_running(dev))
+ sky2_phy_reinit(sky2);
+
+ return 0;
+}
+
+static int sky2_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
+ ecmd->tx_coalesce_usecs = 0;
+ else {
+ u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
+ ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
+ }
+ ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
+
+ if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
+ ecmd->rx_coalesce_usecs = 0;
+ else {
+ u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
+ ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
+ }
+ ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
+
+ if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
+ ecmd->rx_coalesce_usecs_irq = 0;
+ else {
+ u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
+ ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
+ }
+
+ ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
+
+ return 0;
+}
+
+/* Note: this affect both ports */
+static int sky2_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
+
+ if (ecmd->tx_coalesce_usecs > tmax ||
+ ecmd->rx_coalesce_usecs > tmax ||
+ ecmd->rx_coalesce_usecs_irq > tmax)
+ return -EINVAL;
+
+ if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1)
+ return -EINVAL;
+ if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
+ return -EINVAL;
+ if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING)
+ return -EINVAL;
+
+ if (ecmd->tx_coalesce_usecs == 0)
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
+ else {
+ sky2_write32(hw, STAT_TX_TIMER_INI,
+ sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+ }
+ sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
+
+ if (ecmd->rx_coalesce_usecs == 0)
+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
+ else {
+ sky2_write32(hw, STAT_LEV_TIMER_INI,
+ sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
+ }
+ sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
+
+ if (ecmd->rx_coalesce_usecs_irq == 0)
+ sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
+ else {
+ sky2_write32(hw, STAT_ISR_TIMER_INI,
+ sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
+ sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
+ }
+ sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
+ return 0;
+}
+
+/*
+ * Hardware is limited to min of 128 and max of 2048 for ring size
+ * and rounded up to next power of two
+ * to avoid division in modulus calculation
+ */
+static unsigned long roundup_ring_size(unsigned long pending)
+{
+ return max(128ul, roundup_pow_of_two(pending+1));
+}
+
+static void sky2_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ ering->rx_max_pending = RX_MAX_PENDING;
+ ering->tx_max_pending = TX_MAX_PENDING;
+
+ ering->rx_pending = sky2->rx_pending;
+ ering->tx_pending = sky2->tx_pending;
+}
+
+static int sky2_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (ering->rx_pending > RX_MAX_PENDING ||
+ ering->rx_pending < 8 ||
+ ering->tx_pending < TX_MIN_PENDING ||
+ ering->tx_pending > TX_MAX_PENDING)
+ return -EINVAL;
+
+ sky2_detach(dev);
+
+ sky2->rx_pending = ering->rx_pending;
+ sky2->tx_pending = ering->tx_pending;
+ sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending);
+
+ return sky2_reattach(dev);
+}
+
+static int sky2_get_regs_len(struct net_device *dev)
+{
+ return 0x4000;
+}
+
+static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
+{
+ /* This complicated switch statement is to make sure and
+ * only access regions that are unreserved.
+ * Some blocks are only valid on dual port cards.
+ */
+ switch (b) {
+ /* second port */
+ case 5: /* Tx Arbiter 2 */
+ case 9: /* RX2 */
+ case 14 ... 15: /* TX2 */
+ case 17: case 19: /* Ram Buffer 2 */
+ case 22 ... 23: /* Tx Ram Buffer 2 */
+ case 25: /* Rx MAC Fifo 1 */
+ case 27: /* Tx MAC Fifo 2 */
+ case 31: /* GPHY 2 */
+ case 40 ... 47: /* Pattern Ram 2 */
+ case 52: case 54: /* TCP Segmentation 2 */
+ case 112 ... 116: /* GMAC 2 */
+ return hw->ports > 1;
+
+ case 0: /* Control */
+ case 2: /* Mac address */
+ case 4: /* Tx Arbiter 1 */
+ case 7: /* PCI express reg */
+ case 8: /* RX1 */
+ case 12 ... 13: /* TX1 */
+ case 16: case 18:/* Rx Ram Buffer 1 */
+ case 20 ... 21: /* Tx Ram Buffer 1 */
+ case 24: /* Rx MAC Fifo 1 */
+ case 26: /* Tx MAC Fifo 1 */
+ case 28 ... 29: /* Descriptor and status unit */
+ case 30: /* GPHY 1*/
+ case 32 ... 39: /* Pattern Ram 1 */
+ case 48: case 50: /* TCP Segmentation 1 */
+ case 56 ... 60: /* PCI space */
+ case 80 ... 84: /* GMAC 1 */
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Returns copy of control register region
+ * Note: ethtool_get_regs always provides full size (16k) buffer
+ */
+static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ const struct sky2_port *sky2 = netdev_priv(dev);
+ const void __iomem *io = sky2->hw->regs;
+ unsigned int b;
+
+ regs->version = 1;
+
+ for (b = 0; b < 128; b++) {
+ /* skip poisonous diagnostic ram region in block 3 */
+ if (b == 3)
+ memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
+ else if (sky2_reg_access_ok(sky2->hw, b))
+ memcpy_fromio(p, io, 128);
+ else
+ memset(p, 0, 128);
+
+ p += 128;
+ io += 128;
+ }
+}
+
+static int sky2_get_eeprom_len(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ u16 reg2;
+
+ reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
+ return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
+}
+
+static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ int rc;
+
+ eeprom->magic = SKY2_EEPROM_MAGIC;
+ rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
+ data);
+ if (rc < 0)
+ return rc;
+
+ eeprom->len = rc;
+
+ return 0;
+}
+
+static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ int rc;
+
+ if (eeprom->magic != SKY2_EEPROM_MAGIC)
+ return -EINVAL;
+
+ rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
+ data);
+
+ return rc < 0 ? rc : 0;
+}
+
+static netdev_features_t sky2_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ const struct sky2_port *sky2 = netdev_priv(dev);
+ const struct sky2_hw *hw = sky2->hw;
+
+ /* In order to do Jumbo packets on these chips, need to turn off the
+ * transmit store/forward. Therefore checksum offload won't work.
+ */
+ if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ netdev_info(dev, "checksum offload not possible with jumbo frames\n");
+ features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_CSUM_MASK);
+ }
+
+ /* Some hardware requires receive checksum for RSS to work. */
+ if ( (features & NETIF_F_RXHASH) &&
+ !(features & NETIF_F_RXCSUM) &&
+ (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) {
+ netdev_info(dev, "receive hashing forces receive checksum\n");
+ features |= NETIF_F_RXCSUM;
+ }
+
+ return features;
+}
+
+static int sky2_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ netdev_features_t changed = dev->features ^ features;
+
+ if ((changed & NETIF_F_RXCSUM) &&
+ !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
+ sky2_write32(sky2->hw,
+ Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ (features & NETIF_F_RXCSUM)
+ ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+ }
+
+ if (changed & NETIF_F_RXHASH)
+ rx_set_rss(dev, features);
+
+ if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
+ sky2_vlan_mode(dev, features);
+
+ return 0;
+}
+
+static const struct ethtool_ops sky2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ,
+ .get_drvinfo = sky2_get_drvinfo,
+ .get_wol = sky2_get_wol,
+ .set_wol = sky2_set_wol,
+ .get_msglevel = sky2_get_msglevel,
+ .set_msglevel = sky2_set_msglevel,
+ .nway_reset = sky2_nway_reset,
+ .get_regs_len = sky2_get_regs_len,
+ .get_regs = sky2_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = sky2_get_eeprom_len,
+ .get_eeprom = sky2_get_eeprom,
+ .set_eeprom = sky2_set_eeprom,
+ .get_strings = sky2_get_strings,
+ .get_coalesce = sky2_get_coalesce,
+ .set_coalesce = sky2_set_coalesce,
+ .get_ringparam = sky2_get_ringparam,
+ .set_ringparam = sky2_set_ringparam,
+ .get_pauseparam = sky2_get_pauseparam,
+ .set_pauseparam = sky2_set_pauseparam,
+ .set_phys_id = sky2_set_phys_id,
+ .get_sset_count = sky2_get_sset_count,
+ .get_ethtool_stats = sky2_get_ethtool_stats,
+ .get_link_ksettings = sky2_get_link_ksettings,
+ .set_link_ksettings = sky2_set_link_ksettings,
+};
+
+#ifdef CONFIG_SKY2_DEBUG
+
+static struct dentry *sky2_debug;
+
+static int sky2_debug_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ const struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ unsigned idx, last;
+ int sop;
+
+ seq_printf(seq, "IRQ src=%x mask=%x control=%x\n",
+ sky2_read32(hw, B0_ISRC),
+ sky2_read32(hw, B0_IMSK),
+ sky2_read32(hw, B0_Y2_SP_ICR));
+
+ if (!netif_running(dev)) {
+ seq_puts(seq, "network not running\n");
+ return 0;
+ }
+
+ napi_disable(&hw->napi);
+ last = sky2_read16(hw, STAT_PUT_IDX);
+
+ seq_printf(seq, "Status ring %u\n", hw->st_size);
+ if (hw->st_idx == last)
+ seq_puts(seq, "Status ring (empty)\n");
+ else {
+ seq_puts(seq, "Status ring\n");
+ for (idx = hw->st_idx; idx != last && idx < hw->st_size;
+ idx = RING_NEXT(idx, hw->st_size)) {
+ const struct sky2_status_le *le = hw->st_le + idx;
+ seq_printf(seq, "[%d] %#x %d %#x\n",
+ idx, le->opcode, le->length, le->status);
+ }
+ seq_puts(seq, "\n");
+ }
+
+ seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n",
+ sky2->tx_cons, sky2->tx_prod,
+ sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
+ sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE)));
+
+ /* Dump contents of tx ring */
+ sop = 1;
+ for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size;
+ idx = RING_NEXT(idx, sky2->tx_ring_size)) {
+ const struct sky2_tx_le *le = sky2->tx_le + idx;
+ u32 a = le32_to_cpu(le->addr);
+
+ if (sop)
+ seq_printf(seq, "%u:", idx);
+ sop = 0;
+
+ switch (le->opcode & ~HW_OWNER) {
+ case OP_ADDR64:
+ seq_printf(seq, " %#x:", a);
+ break;
+ case OP_LRGLEN:
+ seq_printf(seq, " mtu=%d", a);
+ break;
+ case OP_VLAN:
+ seq_printf(seq, " vlan=%d", be16_to_cpu(le->length));
+ break;
+ case OP_TCPLISW:
+ seq_printf(seq, " csum=%#x", a);
+ break;
+ case OP_LARGESEND:
+ seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length));
+ break;
+ case OP_PACKET:
+ seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length));
+ break;
+ case OP_BUFFER:
+ seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length));
+ break;
+ default:
+ seq_printf(seq, " op=%#x,%#x(%d)", le->opcode,
+ a, le16_to_cpu(le->length));
+ }
+
+ if (le->ctrl & EOP) {
+ seq_putc(seq, '\n');
+ sop = 1;
+ }
+ }
+
+ seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n",
+ sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)),
+ sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)),
+ sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX)));
+
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(sky2_debug);
+
+/*
+ * Use network device events to create/remove/rename
+ * debugfs file entries
+ */
+static int sky2_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ if (sky2->debugfs) {
+ sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
+ sky2_debug, dev->name);
+ }
+ break;
+
+ case NETDEV_GOING_DOWN:
+ if (sky2->debugfs) {
+ netdev_printk(KERN_DEBUG, dev, "remove debugfs\n");
+ debugfs_remove(sky2->debugfs);
+ sky2->debugfs = NULL;
+ }
+ break;
+
+ case NETDEV_UP:
+ sky2->debugfs = debugfs_create_file(dev->name, 0444,
+ sky2_debug, dev,
+ &sky2_debug_fops);
+ if (IS_ERR(sky2->debugfs))
+ sky2->debugfs = NULL;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sky2_notifier = {
+ .notifier_call = sky2_device_event,
+};
+
+
+static __init void sky2_debug_init(void)
+{
+ struct dentry *ent;
+
+ ent = debugfs_create_dir("sky2", NULL);
+ if (IS_ERR(ent))
+ return;
+
+ sky2_debug = ent;
+ register_netdevice_notifier(&sky2_notifier);
+}
+
+static __exit void sky2_debug_cleanup(void)
+{
+ if (sky2_debug) {
+ unregister_netdevice_notifier(&sky2_notifier);
+ debugfs_remove(sky2_debug);
+ sky2_debug = NULL;
+ }
+}
+
+#else
+#define sky2_debug_init()
+#define sky2_debug_cleanup()
+#endif
+
+/* Two copies of network device operations to handle special case of
+ * not allowing netpoll on second port
+ */
+static const struct net_device_ops sky2_netdev_ops[2] = {
+ {
+ .ndo_open = sky2_open,
+ .ndo_stop = sky2_close,
+ .ndo_start_xmit = sky2_xmit_frame,
+ .ndo_eth_ioctl = sky2_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = sky2_set_mac_address,
+ .ndo_set_rx_mode = sky2_set_multicast,
+ .ndo_change_mtu = sky2_change_mtu,
+ .ndo_fix_features = sky2_fix_features,
+ .ndo_set_features = sky2_set_features,
+ .ndo_tx_timeout = sky2_tx_timeout,
+ .ndo_get_stats64 = sky2_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sky2_netpoll,
+#endif
+ },
+ {
+ .ndo_open = sky2_open,
+ .ndo_stop = sky2_close,
+ .ndo_start_xmit = sky2_xmit_frame,
+ .ndo_eth_ioctl = sky2_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = sky2_set_mac_address,
+ .ndo_set_rx_mode = sky2_set_multicast,
+ .ndo_change_mtu = sky2_change_mtu,
+ .ndo_fix_features = sky2_fix_features,
+ .ndo_set_features = sky2_set_features,
+ .ndo_tx_timeout = sky2_tx_timeout,
+ .ndo_get_stats64 = sky2_get_stats,
+ },
+};
+
+/* Initialize network device */
+static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
+ int highmem, int wol)
+{
+ struct sky2_port *sky2;
+ struct net_device *dev = alloc_etherdev(sizeof(*sky2));
+ int ret;
+
+ if (!dev)
+ return NULL;
+
+ SET_NETDEV_DEV(dev, &hw->pdev->dev);
+ dev->irq = hw->pdev->irq;
+ dev->ethtool_ops = &sky2_ethtool_ops;
+ dev->watchdog_timeo = TX_WATCHDOG;
+ dev->netdev_ops = &sky2_netdev_ops[port];
+
+ sky2 = netdev_priv(dev);
+ sky2->netdev = dev;
+ sky2->hw = hw;
+ sky2->msg_enable = netif_msg_init(debug, default_msg);
+
+ u64_stats_init(&sky2->tx_stats.syncp);
+ u64_stats_init(&sky2->rx_stats.syncp);
+
+ /* Auto speed and flow control */
+ sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
+ if (hw->chip_id != CHIP_ID_YUKON_XL)
+ dev->hw_features |= NETIF_F_RXCSUM;
+
+ sky2->flow_mode = FC_BOTH;
+
+ sky2->duplex = -1;
+ sky2->speed = -1;
+ sky2->advertising = sky2_supported_modes(hw);
+ sky2->wol = wol;
+
+ spin_lock_init(&sky2->phy_lock);
+
+ sky2->tx_pending = TX_DEF_PENDING;
+ sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING);
+ sky2->rx_pending = RX_DEF_PENDING;
+
+ hw->dev[port] = dev;
+
+ sky2->port = port;
+
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
+
+ if (highmem)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ /* Enable receive hashing unless hardware is known broken */
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ dev->hw_features |= NETIF_F_RXHASH;
+
+ if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ dev->vlan_features |= SKY2_VLAN_OFFLOADS;
+ }
+
+ dev->features |= dev->hw_features;
+
+ /* MTU range: 60 - 1500 or 9000 */
+ dev->min_mtu = ETH_ZLEN;
+ if (hw->chip_id == CHIP_ID_YUKON_FE ||
+ hw->chip_id == CHIP_ID_YUKON_FE_P)
+ dev->max_mtu = ETH_DATA_LEN;
+ else
+ dev->max_mtu = ETH_JUMBO_MTU;
+
+ /* try to get mac address in the following order:
+ * 1) from device tree data
+ * 2) from internal registers set by bootloader
+ */
+ ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev);
+ if (ret) {
+ u8 addr[ETH_ALEN];
+
+ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ }
+
+ /* if the address is invalid, use a random value */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ struct sockaddr sa = { AF_UNSPEC };
+
+ dev_warn(&hw->pdev->dev, "Invalid MAC address, defaulting to random\n");
+ eth_hw_addr_random(dev);
+ memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
+ if (sky2_set_mac_address(dev, &sa))
+ dev_warn(&hw->pdev->dev, "Failed to set MAC address.\n");
+ }
+
+ return dev;
+}
+
+static void sky2_show_addr(struct net_device *dev)
+{
+ const struct sky2_port *sky2 = netdev_priv(dev);
+
+ netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr);
+}
+
+/* Handle software interrupt used during MSI test */
+static irqreturn_t sky2_test_intr(int irq, void *dev_id)
+{
+ struct sky2_hw *hw = dev_id;
+ u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ if (status & Y2_IS_IRQ_SW) {
+ hw->flags |= SKY2_HW_USE_MSI;
+ wake_up(&hw->msi_wait);
+ sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
+ }
+ sky2_write32(hw, B0_Y2_SP_ICR, 2);
+
+ return IRQ_HANDLED;
+}
+
+/* Test interrupt path by forcing a software IRQ */
+static int sky2_test_msi(struct sky2_hw *hw)
+{
+ struct pci_dev *pdev = hw->pdev;
+ int err;
+
+ init_waitqueue_head(&hw->msi_wait);
+
+ err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
+ return err;
+ }
+
+ sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
+
+ sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
+ sky2_read8(hw, B0_CTST);
+
+ wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10);
+
+ if (!(hw->flags & SKY2_HW_USE_MSI)) {
+ /* MSI test failed, go back to INTx mode */
+ dev_info(&pdev->dev, "No interrupt generated using MSI, "
+ "switching to INTx mode.\n");
+
+ err = -EOPNOTSUPP;
+ sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
+ }
+
+ sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
+
+ free_irq(pdev->irq, hw);
+
+ return err;
+}
+
+/* This driver supports yukon2 chipset only */
+static const char *sky2_name(u8 chipid, char *buf, int sz)
+{
+ static const char *const name[] = {
+ "XL", /* 0xb3 */
+ "EC Ultra", /* 0xb4 */
+ "Extreme", /* 0xb5 */
+ "EC", /* 0xb6 */
+ "FE", /* 0xb7 */
+ "FE+", /* 0xb8 */
+ "Supreme", /* 0xb9 */
+ "UL 2", /* 0xba */
+ "Unknown", /* 0xbb */
+ "Optima", /* 0xbc */
+ "OptimaEEE", /* 0xbd */
+ "Optima 2", /* 0xbe */
+ };
+
+ if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2)
+ snprintf(buf, sz, "%s", name[chipid - CHIP_ID_YUKON_XL]);
+ else
+ snprintf(buf, sz, "(chip %#x)", chipid);
+ return buf;
+}
+
+static const struct dmi_system_id msi_blacklist[] = {
+ {
+ .ident = "Dell Inspiron 1545",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
+ },
+ },
+ {
+ .ident = "Gateway P-79",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
+ },
+ },
+ {
+ .ident = "ASUS P5W DH Deluxe",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTEK COMPUTER INC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
+ },
+ },
+ {
+ .ident = "ASUS P6T",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P6T"),
+ },
+ },
+ {
+ .ident = "ASUS P6X",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P6X"),
+ },
+ },
+ {}
+};
+
+static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev, *dev1;
+ struct sky2_hw *hw;
+ int err, using_dac = 0, wol_default;
+ u32 reg;
+ char buf1[16];
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto err_out;
+ }
+
+ /* Get configuration information
+ * Note: only regular PCI config access once to test for HW issues
+ * other PCI access through shared memory for speed and to
+ * avoid MMCONFIG problems.
+ */
+ err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
+ if (err) {
+ dev_err(&pdev->dev, "PCI read config failed\n");
+ goto err_out_disable;
+ }
+
+ if (~reg == 0) {
+ dev_err(&pdev->dev, "PCI configuration read error\n");
+ err = -EIO;
+ goto err_out_disable;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+ goto err_out_disable;
+ }
+
+ pci_set_master(pdev);
+
+ if (sizeof(dma_addr_t) > sizeof(u32) &&
+ !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ using_dac = 1;
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
+ "for consistent allocations\n");
+ goto err_out_free_regions;
+ }
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_out_free_regions;
+ }
+ }
+
+
+#ifdef __BIG_ENDIAN
+ /* The sk98lin vendor driver uses hardware byte swapping but
+ * this driver uses software swapping.
+ */
+ reg &= ~PCI_REV_DESC;
+ err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
+ if (err) {
+ dev_err(&pdev->dev, "PCI write config failed\n");
+ goto err_out_free_regions;
+ }
+#endif
+
+ wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
+
+ err = -ENOMEM;
+
+ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
+ if (!hw)
+ goto err_out_free_regions;
+
+ hw->pdev = pdev;
+ sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
+
+ hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
+ if (!hw->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ goto err_out_free_hw;
+ }
+
+ err = sky2_init(hw);
+ if (err)
+ goto err_out_iounmap;
+
+ /* ring for status responses */
+ hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
+ hw->st_le = dma_alloc_coherent(&pdev->dev,
+ hw->st_size * sizeof(struct sky2_status_le),
+ &hw->st_dma, GFP_KERNEL);
+ if (!hw->st_le) {
+ err = -ENOMEM;
+ goto err_out_reset;
+ }
+
+ dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
+ sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
+
+ sky2_reset(hw);
+
+ dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out_free_pci;
+ }
+
+ if (disable_msi == -1)
+ disable_msi = !!dmi_check_system(msi_blacklist);
+
+ if (!disable_msi && pci_enable_msi(pdev) == 0) {
+ err = sky2_test_msi(hw);
+ if (err) {
+ pci_disable_msi(pdev);
+ if (err != -EOPNOTSUPP)
+ goto err_out_free_netdev;
+ }
+ }
+
+ netif_napi_add(dev, &hw->napi, sky2_poll);
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register net device\n");
+ goto err_out_free_netdev;
+ }
+
+ netif_carrier_off(dev);
+
+ sky2_show_addr(dev);
+
+ if (hw->ports > 1) {
+ dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
+ if (!dev1) {
+ err = -ENOMEM;
+ goto err_out_unregister;
+ }
+
+ err = register_netdev(dev1);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register second net device\n");
+ goto err_out_free_dev1;
+ }
+
+ err = sky2_setup_irq(hw, hw->irq_name);
+ if (err)
+ goto err_out_unregister_dev1;
+
+ sky2_show_addr(dev1);
+ }
+
+ timer_setup(&hw->watchdog_timer, sky2_watchdog, 0);
+ INIT_WORK(&hw->restart_work, sky2_restart);
+
+ pci_set_drvdata(pdev, hw);
+ pdev->d3hot_delay = 300;
+
+ return 0;
+
+err_out_unregister_dev1:
+ unregister_netdev(dev1);
+err_out_free_dev1:
+ free_netdev(dev1);
+err_out_unregister:
+ unregister_netdev(dev);
+err_out_free_netdev:
+ if (hw->flags & SKY2_HW_USE_MSI)
+ pci_disable_msi(pdev);
+ free_netdev(dev);
+err_out_free_pci:
+ dma_free_coherent(&pdev->dev,
+ hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
+err_out_reset:
+ sky2_write8(hw, B0_CTST, CS_RST_SET);
+err_out_iounmap:
+ iounmap(hw->regs);
+err_out_free_hw:
+ kfree(hw);
+err_out_free_regions:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out:
+ return err;
+}
+
+static void sky2_remove(struct pci_dev *pdev)
+{
+ struct sky2_hw *hw = pci_get_drvdata(pdev);
+ int i;
+
+ if (!hw)
+ return;
+
+ timer_shutdown_sync(&hw->watchdog_timer);
+ cancel_work_sync(&hw->restart_work);
+
+ for (i = hw->ports-1; i >= 0; --i)
+ unregister_netdev(hw->dev[i]);
+
+ sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
+
+ sky2_power_aux(hw);
+
+ sky2_write8(hw, B0_CTST, CS_RST_SET);
+ sky2_read8(hw, B0_CTST);
+
+ if (hw->ports > 1) {
+ napi_disable(&hw->napi);
+ free_irq(pdev->irq, hw);
+ }
+
+ if (hw->flags & SKY2_HW_USE_MSI)
+ pci_disable_msi(pdev);
+ dma_free_coherent(&pdev->dev,
+ hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ for (i = hw->ports-1; i >= 0; --i)
+ free_netdev(hw->dev[i]);
+
+ iounmap(hw->regs);
+ kfree(hw);
+}
+
+static int sky2_suspend(struct device *dev)
+{
+ struct sky2_hw *hw = dev_get_drvdata(dev);
+ int i;
+
+ if (!hw)
+ return 0;
+
+ del_timer_sync(&hw->watchdog_timer);
+ cancel_work_sync(&hw->restart_work);
+
+ rtnl_lock();
+
+ sky2_all_down(hw);
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (sky2->wol)
+ sky2_wol_init(sky2);
+ }
+
+ sky2_power_aux(hw);
+ rtnl_unlock();
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sky2_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sky2_hw *hw = pci_get_drvdata(pdev);
+ int err;
+
+ if (!hw)
+ return 0;
+
+ /* Re-enable all clocks */
+ err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
+ if (err) {
+ dev_err(&pdev->dev, "PCI write config failed\n");
+ goto out;
+ }
+
+ rtnl_lock();
+ sky2_reset(hw);
+ sky2_all_up(hw);
+ rtnl_unlock();
+
+ return 0;
+out:
+
+ dev_err(&pdev->dev, "resume failed (%d)\n", err);
+ pci_disable_device(pdev);
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
+#define SKY2_PM_OPS (&sky2_pm_ops)
+
+#else
+
+#define SKY2_PM_OPS NULL
+#endif
+
+static void sky2_shutdown(struct pci_dev *pdev)
+{
+ struct sky2_hw *hw = pci_get_drvdata(pdev);
+ int port;
+
+ for (port = 0; port < hw->ports; port++) {
+ struct net_device *ndev = hw->dev[port];
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ dev_close(ndev);
+ netif_device_detach(ndev);
+ }
+ rtnl_unlock();
+ }
+ sky2_suspend(&pdev->dev);
+ pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static struct pci_driver sky2_driver = {
+ .name = DRV_NAME,
+ .id_table = sky2_id_table,
+ .probe = sky2_probe,
+ .remove = sky2_remove,
+ .shutdown = sky2_shutdown,
+ .driver.pm = SKY2_PM_OPS,
+};
+
+static int __init sky2_init_module(void)
+{
+ pr_info("driver version " DRV_VERSION "\n");
+
+ sky2_debug_init();
+ return pci_register_driver(&sky2_driver);
+}
+
+static void __exit sky2_cleanup_module(void)
+{
+ pci_unregister_driver(&sky2_driver);
+ sky2_debug_cleanup();
+}
+
+module_init(sky2_init_module);
+module_exit(sky2_cleanup_module);
+
+MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
+MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
new file mode 100644
index 000000000..8d0bacf4e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -0,0 +1,2428 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for the new Marvell Yukon 2 driver.
+ */
+#ifndef _SKY2_H
+#define _SKY2_H
+
+#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */
+
+/* PCI config registers */
+enum {
+ PCI_DEV_REG1 = 0x40,
+ PCI_DEV_REG2 = 0x44,
+ PCI_DEV_STATUS = 0x7c,
+ PCI_DEV_REG3 = 0x80,
+ PCI_DEV_REG4 = 0x84,
+ PCI_DEV_REG5 = 0x88,
+ PCI_CFG_REG_0 = 0x90,
+ PCI_CFG_REG_1 = 0x94,
+
+ PSM_CONFIG_REG0 = 0x98,
+ PSM_CONFIG_REG1 = 0x9C,
+ PSM_CONFIG_REG2 = 0x160,
+ PSM_CONFIG_REG3 = 0x164,
+ PSM_CONFIG_REG4 = 0x168,
+
+ PCI_LDO_CTRL = 0xbc,
+};
+
+/* Yukon-2 */
+enum pci_dev_reg_1 {
+ PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
+ PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */
+ PCI_SW_PWR_ON_RST= 1<<30, /* SW Power on Reset (Yukon-EX) */
+ PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */
+ PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
+ PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
+ PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
+ PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */
+
+ PCI_PHY_LNK_TIM_MSK= 3L<<8,/* Bit 9.. 8: GPHY Link Trigger Timer */
+ PCI_ENA_L1_EVENT = 1<<7, /* Enable PEX L1 Event */
+ PCI_ENA_GPHY_LNK = 1<<6, /* Enable PEX L1 on GPHY Link down */
+ PCI_FORCE_PEX_L1 = 1<<5, /* Force to PEX L1 */
+};
+
+enum pci_dev_reg_2 {
+ PCI_VPD_WR_THR = 0xffL<<24, /* Bit 31..24: VPD Write Threshold */
+ PCI_DEV_SEL = 0x7fL<<17, /* Bit 23..17: EEPROM Device Select */
+ PCI_VPD_ROM_SZ = 7L<<14, /* Bit 16..14: VPD ROM Size */
+
+ PCI_PATCH_DIR = 0xfL<<8, /* Bit 11.. 8: Ext Patches dir 3..0 */
+ PCI_EXT_PATCHS = 0xfL<<4, /* Bit 7.. 4: Extended Patches 3..0 */
+ PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */
+ PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */
+
+ PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
+};
+
+/* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */
+enum pci_dev_reg_3 {
+ P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */
+ P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */
+ P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */
+ P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */
+ P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */
+ P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */
+ P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */
+ P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */
+ P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */
+ P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */
+ P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */
+ P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */
+ P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */
+ P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */
+ P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */
+ P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */
+ P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */
+ P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */
+ P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */
+ P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */
+ PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS |
+ P_CLK_COR_REGS_D0_DIS |
+ P_CLK_COR_LNK1_D0_DIS |
+ P_CLK_MAC_LNK1_D0_DIS |
+ P_CLK_PCI_MST_ARB_DIS |
+ P_CLK_COR_COMMON_DIS |
+ P_CLK_COR_LNK1_BMU_DIS,
+};
+
+/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
+enum pci_dev_reg_4 {
+ /* (Link Training & Status State Machine) */
+ P_PEX_LTSSM_STAT_MSK = 0x7fL<<25, /* Bit 31..25: PEX LTSSM Mask */
+#define P_PEX_LTSSM_STAT(x) ((x << 25) & P_PEX_LTSSM_STAT_MSK)
+ P_PEX_LTSSM_L1_STAT = 0x34,
+ P_PEX_LTSSM_DET_STAT = 0x01,
+ P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
+ /* (Active State Power Management) */
+ P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
+ P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */
+ P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */
+ P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */
+
+ P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */
+ P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */
+ P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */
+ P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */
+ P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */
+ P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN
+ | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
+};
+
+/* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */
+enum pci_dev_reg_5 {
+ /* Bit 31..27: for A3 & later */
+ P_CTL_DIV_CORE_CLK_ENA = 1<<31, /* Divide Core Clock Enable */
+ P_CTL_SRESET_VMAIN_AV = 1<<30, /* Soft Reset for Vmain_av De-Glitch */
+ P_CTL_BYPASS_VMAIN_AV = 1<<29, /* Bypass En. for Vmain_av De-Glitch */
+ P_CTL_TIM_VMAIN_AV_MSK = 3<<27, /* Bit 28..27: Timer Vmain_av Mask */
+ /* Bit 26..16: Release Clock on Event */
+ P_REL_PCIE_RST_DE_ASS = 1<<26, /* PCIe Reset De-Asserted */
+ P_REL_GPHY_REC_PACKET = 1<<25, /* GPHY Received Packet */
+ P_REL_INT_FIFO_N_EMPTY = 1<<24, /* Internal FIFO Not Empty */
+ P_REL_MAIN_PWR_AVAIL = 1<<23, /* Main Power Available */
+ P_REL_CLKRUN_REQ_REL = 1<<22, /* CLKRUN Request Release */
+ P_REL_PCIE_RESET_ASS = 1<<21, /* PCIe Reset Asserted */
+ P_REL_PME_ASSERTED = 1<<20, /* PME Asserted */
+ P_REL_PCIE_EXIT_L1_ST = 1<<19, /* PCIe Exit L1 State */
+ P_REL_LOADER_NOT_FIN = 1<<18, /* EPROM Loader Not Finished */
+ P_REL_PCIE_RX_EX_IDLE = 1<<17, /* PCIe Rx Exit Electrical Idle State */
+ P_REL_GPHY_LINK_UP = 1<<16, /* GPHY Link Up */
+
+ /* Bit 10.. 0: Mask for Gate Clock */
+ P_GAT_PCIE_RST_ASSERTED = 1<<10,/* PCIe Reset Asserted */
+ P_GAT_GPHY_N_REC_PACKET = 1<<9, /* GPHY Not Received Packet */
+ P_GAT_INT_FIFO_EMPTY = 1<<8, /* Internal FIFO Empty */
+ P_GAT_MAIN_PWR_N_AVAIL = 1<<7, /* Main Power Not Available */
+ P_GAT_CLKRUN_REQ_REL = 1<<6, /* CLKRUN Not Requested */
+ P_GAT_PCIE_RESET_ASS = 1<<5, /* PCIe Reset Asserted */
+ P_GAT_PME_DE_ASSERTED = 1<<4, /* PME De-Asserted */
+ P_GAT_PCIE_ENTER_L1_ST = 1<<3, /* PCIe Enter L1 State */
+ P_GAT_LOADER_FINISHED = 1<<2, /* EPROM Loader Finished */
+ P_GAT_PCIE_RX_EL_IDLE = 1<<1, /* PCIe Rx Electrical Idle State */
+ P_GAT_GPHY_LINK_DOWN = 1<<0, /* GPHY Link Down */
+
+ PCIE_OUR5_EVENT_CLK_D3_SET = P_REL_GPHY_REC_PACKET |
+ P_REL_INT_FIFO_N_EMPTY |
+ P_REL_PCIE_EXIT_L1_ST |
+ P_REL_PCIE_RX_EX_IDLE |
+ P_GAT_GPHY_N_REC_PACKET |
+ P_GAT_INT_FIFO_EMPTY |
+ P_GAT_PCIE_ENTER_L1_ST |
+ P_GAT_PCIE_RX_EL_IDLE,
+};
+
+/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */
+enum pci_cfg_reg1 {
+ P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */
+ /* Bit 23..21: Release Clock on Event */
+ P_CF1_REL_LDR_NOT_FIN = 1<<23, /* EEPROM Loader Not Finished */
+ P_CF1_REL_VMAIN_AVLBL = 1<<22, /* Vmain available */
+ P_CF1_REL_PCIE_RESET = 1<<21, /* PCI-E reset */
+ /* Bit 20..18: Gate Clock on Event */
+ P_CF1_GAT_LDR_NOT_FIN = 1<<20, /* EEPROM Loader Finished */
+ P_CF1_GAT_PCIE_RX_IDLE = 1<<19, /* PCI-E Rx Electrical idle */
+ P_CF1_GAT_PCIE_RESET = 1<<18, /* PCI-E Reset */
+ P_CF1_PRST_PHY_CLKREQ = 1<<17, /* Enable PCI-E rst & PM2PHY gen. CLKREQ */
+ P_CF1_PCIE_RST_CLKREQ = 1<<16, /* Enable PCI-E rst generate CLKREQ */
+
+ P_CF1_ENA_CFG_LDR_DONE = 1<<8, /* Enable core level Config loader done */
+
+ P_CF1_ENA_TXBMU_RD_IDLE = 1<<1, /* Enable TX BMU Read IDLE for ASPM */
+ P_CF1_ENA_TXBMU_WR_IDLE = 1<<0, /* Enable TX BMU Write IDLE for ASPM */
+
+ PCIE_CFG1_EVENT_CLK_D3_SET = P_CF1_DIS_REL_EVT_RST |
+ P_CF1_REL_LDR_NOT_FIN |
+ P_CF1_REL_VMAIN_AVLBL |
+ P_CF1_REL_PCIE_RESET |
+ P_CF1_GAT_LDR_NOT_FIN |
+ P_CF1_GAT_PCIE_RESET |
+ P_CF1_PRST_PHY_CLKREQ |
+ P_CF1_ENA_CFG_LDR_DONE |
+ P_CF1_ENA_TXBMU_RD_IDLE |
+ P_CF1_ENA_TXBMU_WR_IDLE,
+};
+
+/* Yukon-Optima */
+enum {
+ PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */
+
+ PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */
+ PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */
+
+ PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */
+
+ PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */
+ PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */
+ PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */
+ PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */
+
+ PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */
+
+ PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */
+ PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */
+};
+
+/* Yukon-Supreme */
+enum {
+ PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */
+
+ PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */
+ PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */
+ PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */
+ PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */
+ PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */
+ PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */
+ PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */
+ PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */
+ PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */
+ PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */
+ PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */
+ PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */
+ PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */
+ PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */
+ PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */
+ PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */
+ PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */
+ PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */
+ PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */
+
+ PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */
+ PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */
+ PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */
+ PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */
+ PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */
+ PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */
+ PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */
+ PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */
+ PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */
+ PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */
+};
+
+/* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */
+enum {
+ /* PHY Link Detect Timer */
+ PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4,
+ PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4,
+
+ PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */
+ PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */
+};
+
+
+enum csr_regs {
+ B0_RAP = 0x0000,
+ B0_CTST = 0x0004,
+
+ B0_POWER_CTRL = 0x0007,
+ B0_ISRC = 0x0008,
+ B0_IMSK = 0x000c,
+ B0_HWE_ISRC = 0x0010,
+ B0_HWE_IMSK = 0x0014,
+
+ /* Special ISR registers (Yukon-2 only) */
+ B0_Y2_SP_ISRC2 = 0x001c,
+ B0_Y2_SP_ISRC3 = 0x0020,
+ B0_Y2_SP_EISR = 0x0024,
+ B0_Y2_SP_LISR = 0x0028,
+ B0_Y2_SP_ICR = 0x002c,
+
+ B2_MAC_1 = 0x0100,
+ B2_MAC_2 = 0x0108,
+ B2_MAC_3 = 0x0110,
+ B2_CONN_TYP = 0x0118,
+ B2_PMD_TYP = 0x0119,
+ B2_MAC_CFG = 0x011a,
+ B2_CHIP_ID = 0x011b,
+ B2_E_0 = 0x011c,
+
+ B2_Y2_CLK_GATE = 0x011d,
+ B2_Y2_HW_RES = 0x011e,
+ B2_E_3 = 0x011f,
+ B2_Y2_CLK_CTRL = 0x0120,
+
+ B2_TI_INI = 0x0130,
+ B2_TI_VAL = 0x0134,
+ B2_TI_CTRL = 0x0138,
+ B2_TI_TEST = 0x0139,
+
+ B2_TST_CTRL1 = 0x0158,
+ B2_TST_CTRL2 = 0x0159,
+ B2_GP_IO = 0x015c,
+
+ B2_I2C_CTRL = 0x0160,
+ B2_I2C_DATA = 0x0164,
+ B2_I2C_IRQ = 0x0168,
+ B2_I2C_SW = 0x016c,
+
+ Y2_PEX_PHY_DATA = 0x0170,
+ Y2_PEX_PHY_ADDR = 0x0172,
+
+ B3_RAM_ADDR = 0x0180,
+ B3_RAM_DATA_LO = 0x0184,
+ B3_RAM_DATA_HI = 0x0188,
+
+/* RAM Interface Registers */
+/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */
+/*
+ * The HW-Spec. calls this registers Timeout Value 0..11. But this names are
+ * not usable in SW. Please notice these are NOT real timeouts, these are
+ * the number of qWords transferred continuously.
+ */
+#define RAM_BUFFER(port, reg) (reg | (port <<6))
+
+ B3_RI_WTO_R1 = 0x0190,
+ B3_RI_WTO_XA1 = 0x0191,
+ B3_RI_WTO_XS1 = 0x0192,
+ B3_RI_RTO_R1 = 0x0193,
+ B3_RI_RTO_XA1 = 0x0194,
+ B3_RI_RTO_XS1 = 0x0195,
+ B3_RI_WTO_R2 = 0x0196,
+ B3_RI_WTO_XA2 = 0x0197,
+ B3_RI_WTO_XS2 = 0x0198,
+ B3_RI_RTO_R2 = 0x0199,
+ B3_RI_RTO_XA2 = 0x019a,
+ B3_RI_RTO_XS2 = 0x019b,
+ B3_RI_TO_VAL = 0x019c,
+ B3_RI_CTRL = 0x01a0,
+ B3_RI_TEST = 0x01a2,
+ B3_MA_TOINI_RX1 = 0x01b0,
+ B3_MA_TOINI_RX2 = 0x01b1,
+ B3_MA_TOINI_TX1 = 0x01b2,
+ B3_MA_TOINI_TX2 = 0x01b3,
+ B3_MA_TOVAL_RX1 = 0x01b4,
+ B3_MA_TOVAL_RX2 = 0x01b5,
+ B3_MA_TOVAL_TX1 = 0x01b6,
+ B3_MA_TOVAL_TX2 = 0x01b7,
+ B3_MA_TO_CTRL = 0x01b8,
+ B3_MA_TO_TEST = 0x01ba,
+ B3_MA_RCINI_RX1 = 0x01c0,
+ B3_MA_RCINI_RX2 = 0x01c1,
+ B3_MA_RCINI_TX1 = 0x01c2,
+ B3_MA_RCINI_TX2 = 0x01c3,
+ B3_MA_RCVAL_RX1 = 0x01c4,
+ B3_MA_RCVAL_RX2 = 0x01c5,
+ B3_MA_RCVAL_TX1 = 0x01c6,
+ B3_MA_RCVAL_TX2 = 0x01c7,
+ B3_MA_RC_CTRL = 0x01c8,
+ B3_MA_RC_TEST = 0x01ca,
+ B3_PA_TOINI_RX1 = 0x01d0,
+ B3_PA_TOINI_RX2 = 0x01d4,
+ B3_PA_TOINI_TX1 = 0x01d8,
+ B3_PA_TOINI_TX2 = 0x01dc,
+ B3_PA_TOVAL_RX1 = 0x01e0,
+ B3_PA_TOVAL_RX2 = 0x01e4,
+ B3_PA_TOVAL_TX1 = 0x01e8,
+ B3_PA_TOVAL_TX2 = 0x01ec,
+ B3_PA_CTRL = 0x01f0,
+ B3_PA_TEST = 0x01f2,
+
+ Y2_CFG_SPC = 0x1c00, /* PCI config space region */
+ Y2_CFG_AER = 0x1d00, /* PCI Advanced Error Report region */
+};
+
+/* B0_CTST 24 bit Control/Status register */
+enum {
+ Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
+ Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
+ Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */
+ Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */
+ Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
+ Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
+ Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
+ Y2_CLK_RUN_DIS = 1<<10,/* CLK_RUN Disable (YUKON-2 only) */
+ Y2_LED_STAT_ON = 1<<9, /* Status LED On (YUKON-2 only) */
+ Y2_LED_STAT_OFF = 1<<8, /* Status LED Off (YUKON-2 only) */
+
+ CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
+ CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
+ CS_STOP_DONE = 1<<5, /* Stop Master is finished */
+ CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
+ CS_MRST_CLR = 1<<3, /* Clear Master reset */
+ CS_MRST_SET = 1<<2, /* Set Master reset */
+ CS_RST_CLR = 1<<1, /* Clear Software reset */
+ CS_RST_SET = 1, /* Set Software reset */
+};
+
+/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
+enum {
+ PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
+ PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
+ PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
+ PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
+ PC_VAUX_ON = 1<<3, /* Switch VAUX On */
+ PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
+ PC_VCC_ON = 1<<1, /* Switch VCC On */
+ PC_VCC_OFF = 1<<0, /* Switch VCC Off */
+};
+
+/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
+
+/* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */
+/* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */
+/* B0_Y2_SP_EISR 32 bit Enter ISR Reg */
+/* B0_Y2_SP_LISR 32 bit Leave ISR Reg */
+enum {
+ Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */
+ Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */
+ Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */
+ Y2_IS_CPU_TO = 1<<28, /* CPU Timeout */
+ Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */
+ Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */
+ Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */
+ Y2_IS_TIMINT = 1<<24, /* IRQ from Timer */
+
+ Y2_IS_IRQ_PHY2 = 1<<12, /* Interrupt from PHY 2 */
+ Y2_IS_IRQ_MAC2 = 1<<11, /* Interrupt from MAC 2 */
+ Y2_IS_CHK_RX2 = 1<<10, /* Descriptor error Rx 2 */
+ Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */
+ Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */
+
+ Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */
+ Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */
+ Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */
+
+ Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */
+ Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */
+ Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */
+ Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */
+ Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */
+
+ Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU,
+ Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1
+ | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
+ Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
+ | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
+ Y2_IS_ERROR = Y2_IS_HW_ERR |
+ Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 |
+ Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
+};
+
+/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
+enum {
+ IS_ERR_MSK = 0x00003fff,/* All Error bits */
+
+ IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
+ IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
+ IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
+ IS_IRQ_STAT = 1<<10, /* IRQ status exception */
+ IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
+ IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
+ IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
+ IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
+ IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
+ IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
+ IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
+ IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
+ IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
+ IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
+};
+
+/* Hardware error interrupt mask for Yukon 2 */
+enum {
+ Y2_IS_TIST_OV = 1<<29,/* Time Stamp Timer overflow interrupt */
+ Y2_IS_SENSOR = 1<<28, /* Sensor interrupt */
+ Y2_IS_MST_ERR = 1<<27, /* Master error interrupt */
+ Y2_IS_IRQ_STAT = 1<<26, /* Status exception interrupt */
+ Y2_IS_PCI_EXP = 1<<25, /* PCI-Express interrupt */
+ Y2_IS_PCI_NEXP = 1<<24, /* PCI-Express error similar to PCI error */
+ /* Link 2 */
+ Y2_IS_PAR_RD2 = 1<<13, /* Read RAM parity error interrupt */
+ Y2_IS_PAR_WR2 = 1<<12, /* Write RAM parity error interrupt */
+ Y2_IS_PAR_MAC2 = 1<<11, /* MAC hardware fault interrupt */
+ Y2_IS_PAR_RX2 = 1<<10, /* Parity Error Rx Queue 2 */
+ Y2_IS_TCP_TXS2 = 1<<9, /* TCP length mismatch sync Tx queue IRQ */
+ Y2_IS_TCP_TXA2 = 1<<8, /* TCP length mismatch async Tx queue IRQ */
+ /* Link 1 */
+ Y2_IS_PAR_RD1 = 1<<5, /* Read RAM parity error interrupt */
+ Y2_IS_PAR_WR1 = 1<<4, /* Write RAM parity error interrupt */
+ Y2_IS_PAR_MAC1 = 1<<3, /* MAC hardware fault interrupt */
+ Y2_IS_PAR_RX1 = 1<<2, /* Parity Error Rx Queue 1 */
+ Y2_IS_TCP_TXS1 = 1<<1, /* TCP length mismatch sync Tx queue IRQ */
+ Y2_IS_TCP_TXA1 = 1<<0, /* TCP length mismatch async Tx queue IRQ */
+
+ Y2_HWE_L1_MASK = Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 |
+ Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1,
+ Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |
+ Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2,
+
+ Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT |
+ Y2_HWE_L1_MASK | Y2_HWE_L2_MASK,
+};
+
+/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
+enum {
+ DPT_START = 1<<1,
+ DPT_STOP = 1<<0,
+};
+
+/* B2_TST_CTRL1 8 bit Test Control Register 1 */
+enum {
+ TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
+ TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
+ TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
+ TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
+ TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
+ TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
+ TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
+ TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
+};
+
+/* B2_GPIO */
+enum {
+ GLB_GPIO_CLK_DEB_ENA = 1<<31, /* Clock Debug Enable */
+ GLB_GPIO_CLK_DBG_MSK = 0xf<<26, /* Clock Debug */
+
+ GLB_GPIO_INT_RST_D3_DIS = 1<<15, /* Disable Internal Reset After D3 to D0 */
+ GLB_GPIO_LED_PAD_SPEED_UP = 1<<14, /* LED PAD Speed Up */
+ GLB_GPIO_STAT_RACE_DIS = 1<<13, /* Status Race Disable */
+ GLB_GPIO_TEST_SEL_MSK = 3<<11, /* Testmode Select */
+ GLB_GPIO_TEST_SEL_BASE = 1<<11,
+ GLB_GPIO_RAND_ENA = 1<<10, /* Random Enable */
+ GLB_GPIO_RAND_BIT_1 = 1<<9, /* Random Bit 1 */
+};
+
+/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
+enum {
+ CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
+ /* Bit 3.. 2: reserved */
+ CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
+ CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
+};
+
+/* B2_CHIP_ID 8 bit Chip Identification Number */
+enum {
+ CHIP_ID_YUKON_XL = 0xb3, /* YUKON-2 XL */
+ CHIP_ID_YUKON_EC_U = 0xb4, /* YUKON-2 EC Ultra */
+ CHIP_ID_YUKON_EX = 0xb5, /* YUKON-2 Extreme */
+ CHIP_ID_YUKON_EC = 0xb6, /* YUKON-2 EC */
+ CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */
+ CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
+ CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
+ CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
+ CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
+ CHIP_ID_YUKON_PRM = 0xbd, /* YUKON-2 Optima Prime */
+ CHIP_ID_YUKON_OP_2 = 0xbe, /* YUKON-2 Optima 2 */
+};
+
+enum yukon_xl_rev {
+ CHIP_REV_YU_XL_A0 = 0,
+ CHIP_REV_YU_XL_A1 = 1,
+ CHIP_REV_YU_XL_A2 = 2,
+ CHIP_REV_YU_XL_A3 = 3,
+};
+
+enum yukon_ec_rev {
+ CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
+ CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
+ CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
+};
+enum yukon_ec_u_rev {
+ CHIP_REV_YU_EC_U_A0 = 1,
+ CHIP_REV_YU_EC_U_A1 = 2,
+ CHIP_REV_YU_EC_U_B0 = 3,
+ CHIP_REV_YU_EC_U_B1 = 5,
+};
+enum yukon_fe_rev {
+ CHIP_REV_YU_FE_A1 = 1,
+ CHIP_REV_YU_FE_A2 = 2,
+};
+enum yukon_fe_p_rev {
+ CHIP_REV_YU_FE2_A0 = 0,
+};
+enum yukon_ex_rev {
+ CHIP_REV_YU_EX_A0 = 1,
+ CHIP_REV_YU_EX_B0 = 2,
+};
+enum yukon_supr_rev {
+ CHIP_REV_YU_SU_A0 = 0,
+ CHIP_REV_YU_SU_B0 = 1,
+ CHIP_REV_YU_SU_B1 = 3,
+};
+
+enum yukon_prm_rev {
+ CHIP_REV_YU_PRM_Z1 = 1,
+ CHIP_REV_YU_PRM_A0 = 2,
+};
+
+/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
+enum {
+ Y2_STATUS_LNK2_INAC = 1<<7, /* Status Link 2 inactive (0 = active) */
+ Y2_CLK_GAT_LNK2_DIS = 1<<6, /* Disable clock gating Link 2 */
+ Y2_COR_CLK_LNK2_DIS = 1<<5, /* Disable Core clock Link 2 */
+ Y2_PCI_CLK_LNK2_DIS = 1<<4, /* Disable PCI clock Link 2 */
+ Y2_STATUS_LNK1_INAC = 1<<3, /* Status Link 1 inactive (0 = active) */
+ Y2_CLK_GAT_LNK1_DIS = 1<<2, /* Disable clock gating Link 1 */
+ Y2_COR_CLK_LNK1_DIS = 1<<1, /* Disable Core clock Link 1 */
+ Y2_PCI_CLK_LNK1_DIS = 1<<0, /* Disable PCI clock Link 1 */
+};
+
+/* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */
+enum {
+ CFG_LED_MODE_MSK = 7<<2, /* Bit 4.. 2: LED Mode Mask */
+ CFG_LINK_2_AVAIL = 1<<1, /* Link 2 available */
+ CFG_LINK_1_AVAIL = 1<<0, /* Link 1 available */
+};
+#define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2)
+#define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL)
+
+
+/* B2_Y2_CLK_CTRL 32 bit Clock Frequency Control Register (Yukon-2/EC) */
+enum {
+ Y2_CLK_DIV_VAL_MSK = 0xff<<16,/* Bit 23..16: Clock Divisor Value */
+#define Y2_CLK_DIV_VAL(x) (((x)<<16) & Y2_CLK_DIV_VAL_MSK)
+ Y2_CLK_DIV_VAL2_MSK = 7<<21, /* Bit 23..21: Clock Divisor Value */
+ Y2_CLK_SELECT2_MSK = 0x1f<<16,/* Bit 20..16: Clock Select */
+#define Y2_CLK_DIV_VAL_2(x) (((x)<<21) & Y2_CLK_DIV_VAL2_MSK)
+#define Y2_CLK_SEL_VAL_2(x) (((x)<<16) & Y2_CLK_SELECT2_MSK)
+ Y2_CLK_DIV_ENA = 1<<1, /* Enable Core Clock Division */
+ Y2_CLK_DIV_DIS = 1<<0, /* Disable Core Clock Division */
+};
+
+/* B2_TI_CTRL 8 bit Timer control */
+/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
+enum {
+ TIM_START = 1<<2, /* Start Timer */
+ TIM_STOP = 1<<1, /* Stop Timer */
+ TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
+};
+
+/* B2_TI_TEST 8 Bit Timer Test */
+/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
+/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
+enum {
+ TIM_T_ON = 1<<2, /* Test mode on */
+ TIM_T_OFF = 1<<1, /* Test mode off */
+ TIM_T_STEP = 1<<0, /* Test step */
+};
+
+/* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */
+enum {
+ PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */
+ PEX_DB_ACCESS = 1<<30, /* Access to debug register */
+};
+
+/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
+ /* Bit 31..19: reserved */
+#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
+/* RAM Interface Registers */
+
+/* B3_RI_CTRL 16 bit RAM Interface Control Register */
+enum {
+ RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
+ RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
+
+ RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
+ RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
+};
+
+#define SK_RI_TO_53 36 /* RAM interface timeout */
+
+
+/* Port related registers FIFO, and Arbiter */
+#define SK_REG(port,reg) (((port)<<7)+(reg))
+
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
+/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
+/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
+/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
+
+#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
+
+/* TXA_CTRL 8 bit Tx Arbiter Control Register */
+enum {
+ TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
+ TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
+ TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
+ TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
+ TXA_START_RC = 1<<3, /* Start sync Rate Control */
+ TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
+ TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
+ TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
+};
+
+/*
+ * Bank 4 - 5
+ */
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+enum {
+ TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
+ TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
+ TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
+ TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
+ TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
+ TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
+ TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
+
+ RSS_KEY = 0x0220, /* RSS Key setup */
+ RSS_CFG = 0x0248, /* RSS Configuration */
+};
+
+enum {
+ HASH_TCP_IPV6_EX_CTRL = 1<<5,
+ HASH_IPV6_EX_CTRL = 1<<4,
+ HASH_TCP_IPV6_CTRL = 1<<3,
+ HASH_IPV6_CTRL = 1<<2,
+ HASH_TCP_IPV4_CTRL = 1<<1,
+ HASH_IPV4_CTRL = 1<<0,
+
+ HASH_ALL = 0x3f,
+};
+
+enum {
+ B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
+ B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
+ B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
+ B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
+ B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
+ B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
+ B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
+ B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
+ B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
+};
+
+/* Queue Register Offsets, use Q_ADDR() to access */
+enum {
+ B8_Q_REGS = 0x0400, /* base of Queue registers */
+ Q_D = 0x00, /* 8*32 bit Current Descriptor */
+ Q_VLAN = 0x20, /* 16 bit Current VLAN Tag */
+ Q_DONE = 0x24, /* 16 bit Done Index */
+ Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
+ Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
+ Q_BC = 0x30, /* 32 bit Current Byte Counter */
+ Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
+ Q_TEST = 0x38, /* 32 bit Test/Control Register */
+
+/* Yukon-2 */
+ Q_WM = 0x40, /* 16 bit FIFO Watermark */
+ Q_AL = 0x42, /* 8 bit FIFO Alignment */
+ Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */
+ Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */
+ Q_RP = 0x48, /* 8 bit FIFO Read Pointer */
+ Q_RL = 0x4a, /* 8 bit FIFO Read Level */
+ Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */
+ Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */
+ Q_WL = 0x4e, /* 8 bit FIFO Write Level */
+ Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */
+};
+#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
+
+/* Q_TEST 32 bit Test Register */
+enum {
+ /* Transmit */
+ F_TX_CHK_AUTO_OFF = 1<<31, /* Tx checksum auto calc off (Yukon EX) */
+ F_TX_CHK_AUTO_ON = 1<<30, /* Tx checksum auto calc off (Yukon EX) */
+
+ /* Receive */
+ F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */
+
+ /* Hardware testbits not used */
+};
+
+/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
+enum {
+ Y2_B8_PREF_REGS = 0x0450,
+
+ PREF_UNIT_CTRL = 0x00, /* 32 bit Control register */
+ PREF_UNIT_LAST_IDX = 0x04, /* 16 bit Last Index */
+ PREF_UNIT_ADDR_LO = 0x08, /* 32 bit List start addr, low part */
+ PREF_UNIT_ADDR_HI = 0x0c, /* 32 bit List start addr, high part*/
+ PREF_UNIT_GET_IDX = 0x10, /* 16 bit Get Index */
+ PREF_UNIT_PUT_IDX = 0x14, /* 16 bit Put Index */
+ PREF_UNIT_FIFO_WP = 0x20, /* 8 bit FIFO write pointer */
+ PREF_UNIT_FIFO_RP = 0x24, /* 8 bit FIFO read pointer */
+ PREF_UNIT_FIFO_WM = 0x28, /* 8 bit FIFO watermark */
+ PREF_UNIT_FIFO_LEV = 0x2c, /* 8 bit FIFO level */
+
+ PREF_UNIT_MASK_IDX = 0x0fff,
+};
+#define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg))
+
+/* RAM Buffer Register Offsets */
+enum {
+
+ RB_START = 0x00,/* 32 bit RAM Buffer Start Address */
+ RB_END = 0x04,/* 32 bit RAM Buffer End Address */
+ RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
+ RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
+ RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
+ RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
+ RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */
+ RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
+ /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
+ RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
+ RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
+ RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
+ RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
+ RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
+};
+
+/* Receive and Transmit Queues */
+enum {
+ Q_R1 = 0x0000, /* Receive Queue 1 */
+ Q_R2 = 0x0080, /* Receive Queue 2 */
+ Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
+ Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
+ Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
+ Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
+};
+
+/* Different PHY Types */
+enum {
+ PHY_ADDR_MARV = 0,
+};
+
+#define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs))
+
+
+enum {
+ LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
+ LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
+ LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
+ LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
+
+ LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
+
+/* Receive GMAC FIFO (YUKON and Yukon-2) */
+
+ RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
+ RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
+ RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
+ RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
+ RX_GMF_FL_THR = 0x0c50,/* 16 bit Rx GMAC FIFO Flush Threshold */
+ RX_GMF_FL_CTRL = 0x0c52,/* 16 bit Rx GMAC FIFO Flush Control */
+ RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
+ RX_GMF_UP_THR = 0x0c58,/* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */
+ RX_GMF_LP_THR = 0x0c5a,/* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */
+ RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
+ RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
+
+ RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
+
+ RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
+
+ RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
+};
+
+
+/* Q_BC 32 bit Current Byte Counter */
+
+/* BMU Control Status Registers */
+/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
+/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
+/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
+/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
+/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
+/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
+/* Q_CSR 32 bit BMU Control/Status Register */
+
+/* Rx BMU Control / Status Registers (Yukon-2) */
+enum {
+ BMU_IDLE = 1<<31, /* BMU Idle State */
+ BMU_RX_TCP_PKT = 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */
+ BMU_RX_IP_PKT = 1<<29, /* Rx IP Packet (when RSS Hash enabled) */
+
+ BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable Rx RSS Hash */
+ BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */
+ BMU_ENA_RX_CHKSUM = 1<<13, /* Enable Rx TCP/IP Checksum Check */
+ BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */
+ BMU_CLR_IRQ_PAR = 1<<11, /* Clear IRQ on Parity errors (Rx) */
+ BMU_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment. error (Tx) */
+ BMU_CLR_IRQ_CHK = 1<<10, /* Clear IRQ Check */
+ BMU_STOP = 1<<9, /* Stop Rx/Tx Queue */
+ BMU_START = 1<<8, /* Start Rx/Tx Queue */
+ BMU_FIFO_OP_ON = 1<<7, /* FIFO Operational On */
+ BMU_FIFO_OP_OFF = 1<<6, /* FIFO Operational Off */
+ BMU_FIFO_ENA = 1<<5, /* Enable FIFO */
+ BMU_FIFO_RST = 1<<4, /* Reset FIFO */
+ BMU_OP_ON = 1<<3, /* BMU Operational On */
+ BMU_OP_OFF = 1<<2, /* BMU Operational Off */
+ BMU_RST_CLR = 1<<1, /* Clear BMU Reset (Enable) */
+ BMU_RST_SET = 1<<0, /* Set BMU Reset */
+
+ BMU_CLR_RESET = BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR,
+ BMU_OPER_INIT = BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START |
+ BMU_FIFO_ENA | BMU_OP_ON,
+
+ BMU_WM_DEFAULT = 0x600,
+ BMU_WM_PEX = 0x80,
+};
+
+/* Tx BMU Control / Status Registers (Yukon-2) */
+ /* Bit 31: same as for Rx */
+enum {
+ BMU_TX_IPIDINCR_ON = 1<<13, /* Enable IP ID Increment */
+ BMU_TX_IPIDINCR_OFF = 1<<12, /* Disable IP ID Increment */
+ BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */
+};
+
+/* TBMU_TEST 0x06B8 Transmit BMU Test Register */
+enum {
+ TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */
+ TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */
+ TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */
+ TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */
+ TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */
+ TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */
+ TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */
+ TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */
+
+ TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */
+ TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */
+ TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */
+
+ TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */
+ TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */
+ TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */
+
+ TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */
+ TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */
+ TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */
+
+ TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */
+ TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */
+ TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */
+
+ TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */
+ TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */
+ TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */
+
+ TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */
+ TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */
+ TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */
+};
+
+/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
+/* PREF_UNIT_CTRL 32 bit Prefetch Control register */
+enum {
+ PREF_UNIT_OP_ON = 1<<3, /* prefetch unit operational */
+ PREF_UNIT_OP_OFF = 1<<2, /* prefetch unit not operational */
+ PREF_UNIT_RST_CLR = 1<<1, /* Clear Prefetch Unit Reset */
+ PREF_UNIT_RST_SET = 1<<0, /* Set Prefetch Unit Reset */
+};
+
+/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
+/* RB_START 32 bit RAM Buffer Start Address */
+/* RB_END 32 bit RAM Buffer End Address */
+/* RB_WP 32 bit RAM Buffer Write Pointer */
+/* RB_RP 32 bit RAM Buffer Read Pointer */
+/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
+/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
+/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
+/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
+/* RB_PC 32 bit RAM Buffer Packet Counter */
+/* RB_LEV 32 bit RAM Buffer Level Register */
+
+#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
+/* RB_TST2 8 bit RAM Buffer Test Register 2 */
+/* RB_TST1 8 bit RAM Buffer Test Register 1 */
+
+/* RB_CTRL 8 bit RAM Buffer Control Register */
+enum {
+ RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
+ RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
+ RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
+ RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
+ RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
+ RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
+};
+
+
+/* Transmit GMAC FIFO (YUKON only) */
+enum {
+ TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
+ TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
+ TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
+
+ TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
+ TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
+ TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
+
+ TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
+ TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
+ TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
+
+ /* Threshold values for Yukon-EC Ultra and Extreme */
+ ECU_AE_THR = 0x0070, /* Almost Empty Threshold */
+ ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */
+ ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */
+};
+
+/* Descriptor Poll Timer Registers */
+enum {
+ B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
+ B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
+ B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
+
+ B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
+};
+
+/* Time Stamp Timer Registers (YUKON only) */
+enum {
+ GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
+ GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
+ GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
+};
+
+/* Polling Unit Registers (Yukon-2 only) */
+enum {
+ POLL_CTRL = 0x0e20, /* 32 bit Polling Unit Control Reg */
+ POLL_LAST_IDX = 0x0e24,/* 16 bit Polling Unit List Last Index */
+
+ POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit Poll. List Start Addr (low) */
+ POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */
+};
+
+enum {
+ SMB_CFG = 0x0e40, /* 32 bit SMBus Config Register */
+ SMB_CSR = 0x0e44, /* 32 bit SMBus Control/Status Register */
+};
+
+enum {
+ CPU_WDOG = 0x0e48, /* 32 bit Watchdog Register */
+ CPU_CNTR = 0x0e4C, /* 32 bit Counter Register */
+ CPU_TIM = 0x0e50,/* 32 bit Timer Compare Register */
+ CPU_AHB_ADDR = 0x0e54, /* 32 bit CPU AHB Debug Register */
+ CPU_AHB_WDATA = 0x0e58, /* 32 bit CPU AHB Debug Register */
+ CPU_AHB_RDATA = 0x0e5C, /* 32 bit CPU AHB Debug Register */
+ HCU_MAP_BASE = 0x0e60, /* 32 bit Reset Mapping Base */
+ CPU_AHB_CTRL = 0x0e64, /* 32 bit CPU AHB Debug Register */
+ HCU_CCSR = 0x0e68, /* 32 bit CPU Control and Status Register */
+ HCU_HCSR = 0x0e6C, /* 32 bit Host Control and Status Register */
+};
+
+/* ASF Subsystem Registers (Yukon-2 only) */
+enum {
+ B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */
+ B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit ASF SMB Control/Status/Data */
+ B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit ASF IRQ Vector Base */
+
+ B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit ASF Status and Command Reg */
+ B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit ASF Host Communication Reg */
+ B28_Y2_DATA_REG_1 = 0x0e70,/* 32 bit ASF/Host Data Register 1 */
+ B28_Y2_DATA_REG_2 = 0x0e74,/* 32 bit ASF/Host Data Register 2 */
+ B28_Y2_DATA_REG_3 = 0x0e78,/* 32 bit ASF/Host Data Register 3 */
+ B28_Y2_DATA_REG_4 = 0x0e7c,/* 32 bit ASF/Host Data Register 4 */
+};
+
+/* Status BMU Registers (Yukon-2 only)*/
+enum {
+ STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */
+ STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */
+
+ STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit Status List Start Addr (low) */
+ STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit Status List Start Addr (high) */
+ STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */
+ STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */
+ STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */
+ STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */
+ STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */
+ STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */
+
+/* FIFO Control/Status Registers (Yukon-2 only)*/
+ STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */
+ STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */
+ STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */
+ STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */
+ STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */
+ STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */
+ STAT_FIFO_ISR_WM= 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */
+
+/* Level and ISR Timer Registers (Yukon-2 only)*/
+ STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit Level Timer Init. Value Reg */
+ STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit Level Timer Counter Reg */
+ STAT_LEV_TIMER_CTRL= 0x0eb8,/* 8 bit Level Timer Control Reg */
+ STAT_LEV_TIMER_TEST= 0x0eb9,/* 8 bit Level Timer Test Reg */
+ STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */
+ STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */
+ STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */
+ STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */
+ STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */
+ STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */
+ STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */
+ STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */
+};
+
+enum {
+ LINKLED_OFF = 0x01,
+ LINKLED_ON = 0x02,
+ LINKLED_LINKSYNC_OFF = 0x04,
+ LINKLED_LINKSYNC_ON = 0x08,
+ LINKLED_BLINK_OFF = 0x10,
+ LINKLED_BLINK_ON = 0x20,
+};
+
+/* GMAC and GPHY Control Registers (YUKON only) */
+enum {
+ GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
+ GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
+ GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
+ GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
+ GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
+
+/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
+ WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
+ WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
+ WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
+ WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
+ WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
+
+/* WOL Pattern Length Registers (YUKON only) */
+ WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
+ WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
+
+/* WOL Pattern Counter Registers (YUKON only) */
+ WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
+ WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
+};
+#define WOL_REGS(port, x) (x + (port)*0x80)
+
+enum {
+ WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
+ WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
+};
+#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
+
+enum {
+ BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
+ BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
+};
+
+/*
+ * Marvel-PHY Registers, indirect addressed over GMAC
+ */
+enum {
+ PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
+ PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
+ PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
+ PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
+ PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
+ PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
+ PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
+ PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
+ PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
+ /* Marvel-specific registers */
+ PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
+ PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
+ PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
+ PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
+ PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
+ PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
+ PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
+ PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
+ PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
+ PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
+ PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
+ PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
+ PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
+ PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
+ PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
+ PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
+ PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
+ PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+ PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
+ PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
+ PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
+ PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
+ PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
+};
+
+enum {
+ PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
+ PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
+ PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
+ PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
+ PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
+ PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
+ PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
+ PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
+ PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
+ PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
+};
+
+enum {
+ PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
+ PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
+ PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
+};
+
+enum {
+ PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
+
+ PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
+ PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
+ PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */
+ PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
+ PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
+ PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
+ PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
+};
+
+enum {
+ PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
+ PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
+ PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
+};
+
+/* different Marvell PHY Ids */
+enum {
+ PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
+
+ PHY_BCOM_ID1_A1 = 0x6041,
+ PHY_BCOM_ID1_B2 = 0x6043,
+ PHY_BCOM_ID1_C0 = 0x6044,
+ PHY_BCOM_ID1_C5 = 0x6047,
+
+ PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
+ PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
+ PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
+ PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
+ PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */
+ PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */
+};
+
+/* Advertisement register bits */
+enum {
+ PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
+ PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
+ PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
+
+ PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
+ PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
+ PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
+ PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
+ PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
+ PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
+ PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
+ PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
+ PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
+ PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
+ PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
+ PHY_AN_100HALF | PHY_AN_100FULL,
+};
+
+/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+enum {
+ PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
+ PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
+ PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
+ PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
+ PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
+ PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
+ /* Bit 9..8: reserved */
+ PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
+};
+
+/** Marvell-Specific */
+enum {
+ PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
+ PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
+ PHY_M_AN_RF = 1<<13, /* Remote Fault */
+
+ PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
+ PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
+ PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
+ PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
+ PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
+ PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
+ PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
+ PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
+};
+
+/* special defines for FIBER (88E1011S only) */
+enum {
+ PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
+ PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
+ PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
+ PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
+};
+
+/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
+enum {
+ PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
+ PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
+ PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
+ PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
+};
+
+/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
+enum {
+ PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
+ PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
+ PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
+ PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
+ PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
+ PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
+};
+
+/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
+enum {
+ PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
+ PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
+ PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
+ PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
+ PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
+ PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
+ PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
+ PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
+ PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
+ PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
+ PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
+ PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
+};
+
+enum {
+ PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
+ PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
+};
+
+#define PHY_M_PC_MDI_XMODE(x) (((u16)(x)<<5) & PHY_M_PC_MDIX_MSK)
+
+enum {
+ PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
+ PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
+ PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
+};
+
+/* for Yukon-EC Ultra Gigabit Ethernet PHY (88E1149 only) */
+enum {
+ PHY_M_PC_COP_TX_DIS = 1<<3, /* Copper Transmitter Disable */
+ PHY_M_PC_POW_D_ENA = 1<<2, /* Power Down Enable */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+ PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
+ PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
+ PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
+ PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
+ PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
+
+ PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
+ PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
+
+ PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
+ PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
+};
+
+/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
+enum {
+ PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
+ PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
+ PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
+ PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
+ PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
+ PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
+ PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
+ PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
+ PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
+ PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
+ PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
+ PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
+ PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
+ PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
+ PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
+ PHY_M_PS_JABBER = 1<<0, /* Jabber */
+};
+
+#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+ PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
+ PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
+};
+
+enum {
+ PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
+ PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
+ PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
+ PHY_M_IS_AN_PR = 1<<12, /* Page Received */
+ PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
+ PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
+ PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
+ PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
+ PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
+ PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
+ PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
+ PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
+
+ PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
+ PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
+ PHY_M_IS_JABBER = 1<<0, /* Jabber */
+
+ PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE
+ | PHY_M_IS_DUP_CHANGE,
+ PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
+};
+
+
+/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
+enum {
+ PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
+ PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
+
+ PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
+ PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
+ /* (88E1011 only) */
+ PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */
+ /* (88E1011 only) */
+ PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */
+ /* (88E1111 only) */
+ PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
+ /* !!! Errata in spec. (1 = disable) */
+ PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
+ PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */
+ PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
+ PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
+ PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
+ PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */
+
+ PHY_M_10B_TE_ENABLE = 1<<7, /* 10Base-Te Enable (88E8079 and above) */
+};
+#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK)
+ /* 00=1x; 01=2x; 10=3x; 11=4x */
+#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK)
+ /* 00=dis; 01=1x; 10=2x; 11=3x */
+#define PHY_M_EC_DSC_2(x) ((u16)(x)<<9 & PHY_M_EC_M_DSC_MSK2)
+ /* 000=1x; 001=2x; 010=3x; 011=4x */
+#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4 & PHY_M_EC_MAC_S_MSK)
+ /* 01X=0; 110=2.5; 111=25 (MHz) */
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+enum {
+ PHY_M_PC_DIS_LINK_Pa = 1<<15,/* Disable Link Pulses */
+ PHY_M_PC_DSC_MSK = 7<<12,/* Bit 14..12: Downshift Counter */
+ PHY_M_PC_DOWN_S_ENA = 1<<11,/* Downshift Enable */
+};
+/* !!! Errata in spec. (1 = disable) */
+
+#define PHY_M_PC_DSC(x) (((u16)(x)<<12) & PHY_M_PC_DSC_MSK)
+ /* 100=5x; 101=6x; 110=7x; 111=8x */
+enum {
+ MAC_TX_CLK_0_MHZ = 2,
+ MAC_TX_CLK_2_5_MHZ = 6,
+ MAC_TX_CLK_25_MHZ = 7,
+};
+
+/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
+enum {
+ PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
+ PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
+ PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
+ PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
+ PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
+ PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
+ PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
+ /* (88E1111 only) */
+};
+
+enum {
+ PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */
+ /* (88E1011 only) */
+ PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
+ PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
+ PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
+ PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
+ PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
+};
+
+#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
+
+/***** PHY_MARV_PHY_STAT (page 3)16 bit r/w Polarity Control Reg. *****/
+enum {
+ PHY_M_POLC_LS1M_MSK = 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */
+ PHY_M_POLC_IS0M_MSK = 0xf<<8, /* Bit 11.. 8: INIT,STAT0 Mix % Mask */
+ PHY_M_POLC_LOS_MSK = 0x3<<6, /* Bit 7.. 6: LOS Pol. Ctrl. Mask */
+ PHY_M_POLC_INIT_MSK = 0x3<<4, /* Bit 5.. 4: INIT Pol. Ctrl. Mask */
+ PHY_M_POLC_STA1_MSK = 0x3<<2, /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */
+ PHY_M_POLC_STA0_MSK = 0x3, /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */
+};
+
+#define PHY_M_POLC_LS1_P_MIX(x) (((x)<<12) & PHY_M_POLC_LS1M_MSK)
+#define PHY_M_POLC_IS0_P_MIX(x) (((x)<<8) & PHY_M_POLC_IS0M_MSK)
+#define PHY_M_POLC_LOS_CTRL(x) (((x)<<6) & PHY_M_POLC_LOS_MSK)
+#define PHY_M_POLC_INIT_CTRL(x) (((x)<<4) & PHY_M_POLC_INIT_MSK)
+#define PHY_M_POLC_STA1_CTRL(x) (((x)<<2) & PHY_M_POLC_STA1_MSK)
+#define PHY_M_POLC_STA0_CTRL(x) (((x)<<0) & PHY_M_POLC_STA0_MSK)
+
+enum {
+ PULS_NO_STR = 0,/* no pulse stretching */
+ PULS_21MS = 1,/* 21 ms to 42 ms */
+ PULS_42MS = 2,/* 42 ms to 84 ms */
+ PULS_84MS = 3,/* 84 ms to 170 ms */
+ PULS_170MS = 4,/* 170 ms to 340 ms */
+ PULS_340MS = 5,/* 340 ms to 670 ms */
+ PULS_670MS = 6,/* 670 ms to 1.3 s */
+ PULS_1300MS = 7,/* 1.3 s to 2.7 s */
+};
+
+#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
+
+enum {
+ BLINK_42MS = 0,/* 42 ms */
+ BLINK_84MS = 1,/* 84 ms */
+ BLINK_170MS = 2,/* 170 ms */
+ BLINK_340MS = 3,/* 340 ms */
+ BLINK_670MS = 4,/* 670 ms */
+};
+
+/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
+#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
+
+#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
+#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
+#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
+#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
+#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
+#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
+
+enum led_mode {
+ MO_LED_NORM = 0,
+ MO_LED_BLINK = 1,
+ MO_LED_OFF = 2,
+ MO_LED_ON = 3,
+};
+
+/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
+enum {
+ PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
+ PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
+ PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
+ PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
+ PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */
+};
+
+/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
+enum {
+ PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
+ PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
+ PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
+ PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
+ PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
+ PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
+ PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
+ /* (88E1111 only) */
+
+ PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
+ PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
+ PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
+ /* Bit 15..12: reserved (used internally) */
+enum {
+ PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
+ PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
+ PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
+};
+
+#define PHY_M_FELP_LED2_CTRL(x) (((u16)(x)<<8) & PHY_M_FELP_LED2_MSK)
+#define PHY_M_FELP_LED1_CTRL(x) (((u16)(x)<<4) & PHY_M_FELP_LED1_MSK)
+#define PHY_M_FELP_LED0_CTRL(x) (((u16)(x)<<0) & PHY_M_FELP_LED0_MSK)
+
+enum {
+ LED_PAR_CTRL_COLX = 0x00,
+ LED_PAR_CTRL_ERROR = 0x01,
+ LED_PAR_CTRL_DUPLEX = 0x02,
+ LED_PAR_CTRL_DP_COL = 0x03,
+ LED_PAR_CTRL_SPEED = 0x04,
+ LED_PAR_CTRL_LINK = 0x05,
+ LED_PAR_CTRL_TX = 0x06,
+ LED_PAR_CTRL_RX = 0x07,
+ LED_PAR_CTRL_ACT = 0x08,
+ LED_PAR_CTRL_LNK_RX = 0x09,
+ LED_PAR_CTRL_LNK_AC = 0x0a,
+ LED_PAR_CTRL_ACT_BL = 0x0b,
+ LED_PAR_CTRL_TX_BL = 0x0c,
+ LED_PAR_CTRL_RX_BL = 0x0d,
+ LED_PAR_CTRL_COL_BL = 0x0e,
+ LED_PAR_CTRL_INACT = 0x0f
+};
+
+/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
+enum {
+ PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
+ PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
+ PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
+};
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+/***** PHY_MARV_PHY_CTRL (page 1) 16 bit r/w Fiber Specific Ctrl *****/
+enum {
+ PHY_M_FIB_FORCE_LNK = 1<<10,/* Force Link Good */
+ PHY_M_FIB_SIGD_POL = 1<<9, /* SIGDET Polarity */
+ PHY_M_FIB_TX_DIS = 1<<3, /* Transmitter Disable */
+};
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
+enum {
+ PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
+ PHY_M_MAC_GMIF_PUP = 1<<3, /* GMII Power Up (88E1149 only) */
+ PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */
+ PHY_M_MAC_MD_COPPER = 5,/* Copper only */
+ PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */
+};
+#define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK)
+
+/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
+enum {
+ PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
+ PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
+ PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */
+ PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
+};
+
+#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK)
+#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK)
+#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK)
+#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK)
+
+/* GMAC registers */
+/* Port Registers */
+enum {
+ GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
+ GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
+ GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
+ GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
+ GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
+ GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
+ GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
+/* Source Address Registers */
+ GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
+ GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
+ GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
+ GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
+ GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
+ GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
+
+/* Multicast Address Hash Registers */
+ GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
+ GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
+ GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
+ GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
+
+/* Interrupt Source Registers */
+ GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
+ GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
+ GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
+
+/* Interrupt Mask Registers */
+ GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
+ GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
+ GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
+
+/* Serial Management Interface (SMI) Registers */
+ GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
+ GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
+ GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
+/* MIB Counters */
+ GM_MIB_CNT_BASE = 0x0100, /* Base Address of MIB Counters */
+ GM_MIB_CNT_END = 0x025C, /* Last MIB counter */
+};
+
+
+/*
+ * MIB Counters base address definitions (low word) -
+ * use offset 4 for access to high word (32 bit r/o)
+ */
+enum {
+ GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
+ GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
+ GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
+ GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
+ GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
+
+ GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
+ GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
+ GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
+ GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
+ GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
+ GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
+ GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
+ GM_RXF_127B = GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */
+ GM_RXF_255B = GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */
+ GM_RXF_511B = GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */
+ GM_RXF_1023B = GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */
+ GM_RXF_1518B = GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */
+ GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */
+ GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */
+ GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */
+
+ GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */
+ GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */
+ GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */
+ GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */
+ GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */
+ GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */
+ GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */
+ GM_TXF_64B = GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */
+ GM_TXF_127B = GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */
+ GM_TXF_255B = GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */
+ GM_TXF_511B = GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */
+ GM_TXF_1023B = GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */
+ GM_TXF_1518B = GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */
+ GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */
+
+ GM_TXF_COL = GM_MIB_CNT_BASE + 304,/* Tx Collision */
+ GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312,/* Tx Late Collision */
+ GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */
+ GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */
+ GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336,/* Tx Single Collision */
+ GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */
+};
+
+/* GMAC Bit Definitions */
+/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
+enum {
+ GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
+ GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
+ GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
+ GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
+ GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
+ GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
+ GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */
+ GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */
+
+ GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
+ GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
+ GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
+ GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
+ GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
+};
+
+/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
+enum {
+ GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
+ GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
+ GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
+ GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
+ GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
+ GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
+ GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
+ GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
+ GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
+ GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
+ GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
+ GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
+ GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
+ GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
+ GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
+};
+
+#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
+
+/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
+enum {
+ GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
+ GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
+ GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
+ GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
+};
+
+#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
+#define TX_COL_DEF 0x04
+
+/* GM_RX_CTRL 16 bit r/w Receive Control Register */
+enum {
+ GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
+ GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
+ GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
+ GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
+};
+
+/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
+enum {
+ GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
+ GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
+ GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
+ GM_TXPA_BO_LIM_MSK = 0x0f, /* Bit 3.. 0: Backoff Limit Mask */
+
+ TX_JAM_LEN_DEF = 0x03,
+ TX_JAM_IPG_DEF = 0x0b,
+ TX_IPG_JAM_DEF = 0x1c,
+ TX_BOF_LIM_DEF = 0x04,
+};
+
+#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
+#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
+#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
+#define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK)
+
+
+/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
+enum {
+ GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
+ GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */
+ GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */
+ GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */
+
+ GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */
+
+ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
+};
+
+#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
+#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
+
+#define DATA_BLIND_DEF 0x04
+#define IPG_DATA_DEF_1000 0x1e
+#define IPG_DATA_DEF_10_100 0x18
+
+/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
+enum {
+ GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */
+ GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */
+ GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
+ GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
+ GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
+};
+
+#define GM_SMI_CT_PHY_AD(x) (((u16)(x)<<11) & GM_SMI_CT_PHY_A_MSK)
+#define GM_SMI_CT_REG_AD(x) (((u16)(x)<<6) & GM_SMI_CT_REG_A_MSK)
+
+/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
+enum {
+ GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
+ GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
+};
+
+/* Receive Frame Status Encoding */
+enum {
+ GMR_FS_LEN = 0x7fff<<16, /* Bit 30..16: Rx Frame Length */
+ GMR_FS_VLAN = 1<<13, /* VLAN Packet */
+ GMR_FS_JABBER = 1<<12, /* Jabber Packet */
+ GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */
+ GMR_FS_MC = 1<<10, /* Multicast Packet */
+ GMR_FS_BC = 1<<9, /* Broadcast Packet */
+ GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */
+ GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */
+ GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */
+ GMR_FS_MII_ERR = 1<<5, /* MII Error */
+ GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */
+ GMR_FS_FRAGMENT = 1<<3, /* Fragment */
+
+ GMR_FS_CRC_ERR = 1<<1, /* CRC Error */
+ GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */
+
+ GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
+ GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
+ GMR_FS_MII_ERR | GMR_FS_BAD_FC |
+ GMR_FS_UN_SIZE | GMR_FS_JABBER,
+};
+
+/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
+enum {
+ RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */
+ RX_GCLKMAC_OFF = 1<<30,
+
+ RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */
+ RX_STFW_ENA = 1<<28,
+
+ RX_TRUNC_ON = 1<<27, /* enable packet truncation */
+ RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
+ RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
+ RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */
+
+ RX_MACSEC_FLUSH_ON = 1<<23,
+ RX_MACSEC_FLUSH_OFF = 1<<22,
+ RX_MACSEC_ASF_FLUSH_ON = 1<<21,
+ RX_MACSEC_ASF_FLUSH_OFF = 1<<20,
+
+ GMF_RX_OVER_ON = 1<<19, /* enable flushing on receive overrun */
+ GMF_RX_OVER_OFF = 1<<18, /* disable flushing on receive overrun */
+ GMF_ASF_RX_OVER_ON = 1<<17, /* enable flushing of ASF when overrun */
+ GMF_ASF_RX_OVER_OFF = 1<<16, /* disable flushing of ASF when overrun */
+
+ GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
+ GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
+ GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
+
+ GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
+ GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
+ GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
+ GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
+ GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
+ GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
+ GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */
+
+ GMF_OPER_ON = 1<<3, /* Operational Mode On */
+ GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
+ GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
+ GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
+
+ RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
+
+ GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON,
+};
+
+/* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */
+enum {
+ RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */
+ RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */
+ RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */
+ RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */
+ RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */
+ RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */
+ RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */
+ RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */
+ RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */
+ RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */
+};
+
+/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */
+enum {
+ TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */
+};
+
+/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
+enum {
+ TX_STFW_DIS = 1<<31,/* Disable Store & Forward */
+ TX_STFW_ENA = 1<<30,/* Enable Store & Forward */
+
+ TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
+ TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
+
+ TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */
+ TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */
+
+ GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
+ GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
+ GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
+
+ GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
+ GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
+ GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
+};
+
+/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
+enum {
+ GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
+ GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
+ GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
+};
+
+/* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */
+enum {
+ Y2_ASF_OS_PRES = 1<<4, /* ASF operation system present */
+ Y2_ASF_RESET = 1<<3, /* ASF system in reset state */
+ Y2_ASF_RUNNING = 1<<2, /* ASF system operational */
+ Y2_ASF_CLR_HSTI = 1<<1, /* Clear ASF IRQ */
+ Y2_ASF_IRQ = 1<<0, /* Issue an IRQ to ASF system */
+
+ Y2_ASF_UC_STATE = 3<<2, /* ASF uC State */
+ Y2_ASF_CLK_HALT = 0, /* ASF system clock stopped */
+};
+
+/* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */
+enum {
+ Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */
+ Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */
+};
+/* HCU_CCSR CPU Control and Status Register */
+enum {
+ HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */
+ HCU_CCSR_CPU_SLEEP = 1<<26, /* CPU sleep status */
+ /* Clock Stretching Timeout */
+ HCU_CCSR_CS_TO = 1<<25,
+ HCU_CCSR_WDOG = 1<<24, /* Watchdog Reset */
+
+ HCU_CCSR_CLR_IRQ_HOST = 1<<17, /* Clear IRQ_HOST */
+ HCU_CCSR_SET_IRQ_HCU = 1<<16, /* Set IRQ_HCU */
+
+ HCU_CCSR_AHB_RST = 1<<9, /* Reset AHB bridge */
+ HCU_CCSR_CPU_RST_MODE = 1<<8, /* CPU Reset Mode */
+
+ HCU_CCSR_SET_SYNC_CPU = 1<<5,
+ HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */
+ HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3,
+ HCU_CCSR_OS_PRSNT = 1<<2, /* ASF OS Present */
+/* Microcontroller State */
+ HCU_CCSR_UC_STATE_MSK = 3,
+ HCU_CCSR_UC_STATE_BASE = 1<<0,
+ HCU_CCSR_ASF_RESET = 0,
+ HCU_CCSR_ASF_HALTED = 1<<1,
+ HCU_CCSR_ASF_RUNNING = 1<<0,
+};
+
+/* HCU_HCSR Host Control and Status Register */
+enum {
+ HCU_HCSR_SET_IRQ_CPU = 1<<16, /* Set IRQ_CPU */
+
+ HCU_HCSR_CLR_IRQ_HCU = 1<<1, /* Clear IRQ_HCU */
+ HCU_HCSR_SET_IRQ_HOST = 1<<0, /* Set IRQ_HOST */
+};
+
+/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */
+enum {
+ SC_STAT_CLR_IRQ = 1<<4, /* Status Burst IRQ clear */
+ SC_STAT_OP_ON = 1<<3, /* Operational Mode On */
+ SC_STAT_OP_OFF = 1<<2, /* Operational Mode Off */
+ SC_STAT_RST_CLR = 1<<1, /* Clear Status Unit Reset (Enable) */
+ SC_STAT_RST_SET = 1<<0, /* Set Status Unit Reset */
+};
+
+/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
+enum {
+ GMC_SET_RST = 1<<15,/* MAC SEC RST */
+ GMC_SEC_RST_OFF = 1<<14,/* MAC SEC RSt OFF */
+ GMC_BYP_MACSECRX_ON = 1<<13,/* Bypass macsec RX */
+ GMC_BYP_MACSECRX_OFF= 1<<12,/* Bypass macsec RX off */
+ GMC_BYP_MACSECTX_ON = 1<<11,/* Bypass macsec TX */
+ GMC_BYP_MACSECTX_OFF= 1<<10,/* Bypass macsec TX off*/
+ GMC_BYP_RETR_ON = 1<<9, /* Bypass retransmit FIFO On */
+ GMC_BYP_RETR_OFF= 1<<8, /* Bypass retransmit FIFO Off */
+
+ GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
+ GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
+ GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
+ GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
+ GMC_PAUSE_ON = 1<<3, /* Pause On */
+ GMC_PAUSE_OFF = 1<<2, /* Pause Off */
+ GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
+ GMC_RST_SET = 1<<0, /* Set GMAC Reset */
+};
+
+/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
+enum {
+ GPC_TX_PAUSE = 1<<30, /* Tx pause enabled (ro) */
+ GPC_RX_PAUSE = 1<<29, /* Rx pause enabled (ro) */
+ GPC_SPEED = 3<<27, /* PHY speed (ro) */
+ GPC_LINK = 1<<26, /* Link up (ro) */
+ GPC_DUPLEX = 1<<25, /* Duplex (ro) */
+ GPC_CLOCK = 1<<24, /* 125Mhz clock stable (ro) */
+
+ GPC_PDOWN = 1<<23, /* Internal regulator 2.5 power down */
+ GPC_TSTMODE = 1<<22, /* Test mode */
+ GPC_REG18 = 1<<21, /* Reg18 Power down */
+ GPC_REG12SEL = 3<<19, /* Reg12 power setting */
+ GPC_REG18SEL = 3<<17, /* Reg18 power setting */
+ GPC_SPILOCK = 1<<16, /* SPI lock (ASF) */
+
+ GPC_LEDMUX = 3<<14, /* LED Mux */
+ GPC_INTPOL = 1<<13, /* Interrupt polarity */
+ GPC_DETECT = 1<<12, /* Energy detect */
+ GPC_1000HD = 1<<11, /* Enable 1000Mbit HD */
+ GPC_SLAVE = 1<<10, /* Slave mode */
+ GPC_PAUSE = 1<<9, /* Pause enable */
+ GPC_LEDCTL = 3<<6, /* GPHY Leds */
+
+ GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
+ GPC_RST_SET = 1<<0, /* Set GPHY Reset */
+};
+
+/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
+/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
+enum {
+ GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
+ GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
+ GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
+ GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
+ GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
+ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
+
+#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
+};
+
+/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
+enum { /* Bits 15.. 2: reserved */
+ GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
+ GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
+};
+
+
+/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
+enum {
+ WOL_CTL_LINK_CHG_OCC = 1<<15,
+ WOL_CTL_MAGIC_PKT_OCC = 1<<14,
+ WOL_CTL_PATTERN_OCC = 1<<13,
+ WOL_CTL_CLEAR_RESULT = 1<<12,
+ WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
+ WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
+ WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
+ WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
+ WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
+ WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
+ WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
+ WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
+ WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
+ WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
+ WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
+ WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
+};
+
+
+/* Control flags */
+enum {
+ UDPTCP = 1<<0,
+ CALSUM = 1<<1,
+ WR_SUM = 1<<2,
+ INIT_SUM= 1<<3,
+ LOCK_SUM= 1<<4,
+ INS_VLAN= 1<<5,
+ EOP = 1<<7,
+};
+
+enum {
+ HW_OWNER = 1<<7,
+ OP_TCPWRITE = 0x11,
+ OP_TCPSTART = 0x12,
+ OP_TCPINIT = 0x14,
+ OP_TCPLCK = 0x18,
+ OP_TCPCHKSUM = OP_TCPSTART,
+ OP_TCPIS = OP_TCPINIT | OP_TCPSTART,
+ OP_TCPLW = OP_TCPLCK | OP_TCPWRITE,
+ OP_TCPLSW = OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE,
+ OP_TCPLISW = OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE,
+
+ OP_ADDR64 = 0x21,
+ OP_VLAN = 0x22,
+ OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN,
+ OP_LRGLEN = 0x24,
+ OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN,
+ OP_MSS = 0x28,
+ OP_MSSVLAN = OP_MSS | OP_VLAN,
+
+ OP_BUFFER = 0x40,
+ OP_PACKET = 0x41,
+ OP_LARGESEND = 0x43,
+ OP_LSOV2 = 0x45,
+
+/* YUKON-2 STATUS opcodes defines */
+ OP_RXSTAT = 0x60,
+ OP_RXTIMESTAMP = 0x61,
+ OP_RXVLAN = 0x62,
+ OP_RXCHKS = 0x64,
+ OP_RXCHKSVLAN = OP_RXCHKS | OP_RXVLAN,
+ OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN,
+ OP_RSS_HASH = 0x65,
+ OP_TXINDEXLE = 0x68,
+ OP_MACSEC = 0x6c,
+ OP_PUTIDX = 0x70,
+};
+
+enum status_css {
+ CSS_TCPUDPCSOK = 1<<7, /* TCP / UDP checksum is ok */
+ CSS_ISUDP = 1<<6, /* packet is a UDP packet */
+ CSS_ISTCP = 1<<5, /* packet is a TCP packet */
+ CSS_ISIPFRAG = 1<<4, /* packet is a TCP/UDP frag, CS calc not done */
+ CSS_ISIPV6 = 1<<3, /* packet is a IPv6 packet */
+ CSS_IPV4CSUMOK = 1<<2, /* IP v4: TCP header checksum is ok */
+ CSS_ISIPV4 = 1<<1, /* packet is a IPv4 packet */
+ CSS_LINK_BIT = 1<<0, /* port number (legacy) */
+};
+
+/* Yukon 2 hardware interface */
+struct sky2_tx_le {
+ __le32 addr;
+ __le16 length; /* also vlan tag or checksum start */
+ u8 ctrl;
+ u8 opcode;
+} __packed;
+
+struct sky2_rx_le {
+ __le32 addr;
+ __le16 length;
+ u8 ctrl;
+ u8 opcode;
+} __packed;
+
+struct sky2_status_le {
+ __le32 status; /* also checksum */
+ __le16 length; /* also vlan tag */
+ u8 css;
+ u8 opcode;
+} __packed;
+
+struct tx_ring_info {
+ struct sk_buff *skb;
+ unsigned long flags;
+#define TX_MAP_SINGLE 0x0001
+#define TX_MAP_PAGE 0x0002
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct rx_ring_info {
+ struct sk_buff *skb;
+ dma_addr_t data_addr;
+ DEFINE_DMA_UNMAP_LEN(data_size);
+ dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1];
+};
+
+enum flow_control {
+ FC_NONE = 0,
+ FC_TX = 1,
+ FC_RX = 2,
+ FC_BOTH = 3,
+};
+
+struct sky2_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+};
+
+struct sky2_port {
+ struct sky2_hw *hw;
+ struct net_device *netdev;
+ unsigned port;
+ u32 msg_enable;
+ spinlock_t phy_lock;
+
+ struct tx_ring_info *tx_ring;
+ struct sky2_tx_le *tx_le;
+ struct sky2_stats tx_stats;
+
+ u16 tx_ring_size;
+ u16 tx_cons; /* next le to check */
+ u16 tx_prod; /* next le to use */
+ u16 tx_next; /* debug only */
+
+ u16 tx_pending;
+ u16 tx_last_mss;
+ u32 tx_last_upper;
+ u32 tx_tcpsum;
+
+ struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp;
+ struct sky2_rx_le *rx_le;
+ struct sky2_stats rx_stats;
+
+ u16 rx_next; /* next re to check */
+ u16 rx_put; /* next le index to use */
+ u16 rx_pending;
+ u16 rx_data_size;
+ u16 rx_nfrags;
+
+ unsigned long last_rx;
+ struct {
+ unsigned long last;
+ u32 mac_rp;
+ u8 mac_lev;
+ u8 fifo_rp;
+ u8 fifo_lev;
+ } check;
+
+ dma_addr_t rx_le_map;
+ dma_addr_t tx_le_map;
+
+ u16 advertising; /* ADVERTISED_ bits */
+ u16 speed; /* SPEED_1000, SPEED_100, ... */
+ u8 wol; /* WAKE_ bits */
+ u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
+ u16 flags;
+#define SKY2_FLAG_AUTO_SPEED 0x0002
+#define SKY2_FLAG_AUTO_PAUSE 0x0004
+
+ enum flow_control flow_mode;
+ enum flow_control flow_status;
+
+#ifdef CONFIG_SKY2_DEBUG
+ struct dentry *debugfs;
+#endif
+};
+
+struct sky2_hw {
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ struct napi_struct napi;
+ struct net_device *dev[2];
+ unsigned long flags;
+#define SKY2_HW_USE_MSI 0x00000001
+#define SKY2_HW_FIBRE_PHY 0x00000002
+#define SKY2_HW_GIGABIT 0x00000004
+#define SKY2_HW_NEWER_PHY 0x00000008
+#define SKY2_HW_RAM_BUFFER 0x00000010
+#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
+#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
+#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
+#define SKY2_HW_RSS_BROKEN 0x00000100
+#define SKY2_HW_VLAN_BROKEN 0x00000200
+#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */
+#define SKY2_HW_IRQ_SETUP 0x00000800
+
+ u8 chip_id;
+ u8 chip_rev;
+ u8 pmd_type;
+ u8 ports;
+
+ struct sky2_status_le *st_le;
+ u32 st_size;
+ u32 st_idx;
+ dma_addr_t st_dma;
+
+ struct timer_list watchdog_timer;
+ struct work_struct restart_work;
+ wait_queue_head_t msi_wait;
+
+ char irq_name[];
+};
+
+static inline int sky2_is_copper(const struct sky2_hw *hw)
+{
+ return !(hw->flags & SKY2_HW_FIBRE_PHY);
+}
+
+/* Register accessor for memory mapped device */
+static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg)
+{
+ return readl(hw->regs + reg);
+}
+
+static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg)
+{
+ return readw(hw->regs + reg);
+}
+
+static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg)
+{
+ return readb(hw->regs + reg);
+}
+
+static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val)
+{
+ writel(val, hw->regs + reg);
+}
+
+static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val)
+{
+ writew(val, hw->regs + reg);
+}
+
+static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val)
+{
+ writeb(val, hw->regs + reg);
+}
+
+/* Yukon PHY related registers */
+#define SK_GMAC_REG(port,reg) \
+ (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
+#define GM_PHY_RETRIES 100
+
+static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ return sky2_read16(hw, SK_GMAC_REG(port,reg));
+}
+
+static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ unsigned base = SK_GMAC_REG(port, reg);
+ return (u32) sky2_read16(hw, base)
+ | (u32) sky2_read16(hw, base+4) << 16;
+}
+
+static inline u64 gma_read64(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ unsigned base = SK_GMAC_REG(port, reg);
+
+ return (u64) sky2_read16(hw, base)
+ | (u64) sky2_read16(hw, base+4) << 16
+ | (u64) sky2_read16(hw, base+8) << 32
+ | (u64) sky2_read16(hw, base+12) << 48;
+}
+
+/* There is no way to atomically read32 bit values from PHY, so retry */
+static inline u32 get_stats32(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ u32 val;
+
+ do {
+ val = gma_read32(hw, port, reg);
+ } while (gma_read32(hw, port, reg) != val);
+
+ return val;
+}
+
+static inline u64 get_stats64(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ u64 val;
+
+ do {
+ val = gma_read64(hw, port, reg);
+ } while (gma_read64(hw, port, reg) != val);
+
+ return val;
+}
+
+static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v)
+{
+ sky2_write16(hw, SK_GMAC_REG(port,r), v);
+}
+
+static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
+ const u8 *addr)
+{
+ gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
+ gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
+ gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
+}
+
+/* PCI config space access */
+static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg)
+{
+ return sky2_read32(hw, Y2_CFG_SPC + reg);
+}
+
+static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg)
+{
+ return sky2_read16(hw, Y2_CFG_SPC + reg);
+}
+
+static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val)
+{
+ sky2_write32(hw, Y2_CFG_SPC + reg, val);
+}
+
+static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val)
+{
+ sky2_write16(hw, Y2_CFG_SPC + reg, val);
+}
+#endif